From 547d70d06bf4bd3b349e2eadbff1535bc2c83169 Mon Sep 17 00:00:00 2001 From: Jeremy Hicks Date: Wed, 14 Feb 2024 09:50:45 -0800 Subject: [PATCH] Indexer metrics ep2 for splunkenterprisereceiver (#30757) **Description:** Adds additional indexer metrics to Splunk Enterprise receiver obtained from ad-hoc searches against Indexer or Cluster Manager instances. Disables by default API endpoint searches that only return results for the specific instance which is having its API called. These can be re-enabled in config if those metrics are specifically required. Generated tests and docs for these additional metrics **Link to tracking Issue:** [30704](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/30704) **Testing:** Tests generated with mdatagen **Documentation:** Documentation generated with mdatagen --- .chloggen/indexer-metrics-2-splunkent.yaml | 27 + .../splunkenterprisereceiver/documentation.md | 307 +++- .../internal/metadata/generated_config.go | 80 +- .../metadata/generated_config_test.go | 36 + .../internal/metadata/generated_metrics.go | 1293 +++++++++++++++-- .../metadata/generated_metrics_test.go | 364 ++++- .../internal/metadata/testdata/config.yaml | 72 + .../splunkenterprisereceiver/metadata.yaml | 167 ++- receiver/splunkenterprisereceiver/scraper.go | 873 +++++++++++ .../splunkenterprisereceiver/search_result.go | 13 +- .../testdata/scraper/expected.yaml | 18 +- 11 files changed, 3063 insertions(+), 187 deletions(-) create mode 100755 .chloggen/indexer-metrics-2-splunkent.yaml diff --git a/.chloggen/indexer-metrics-2-splunkent.yaml b/.chloggen/indexer-metrics-2-splunkent.yaml new file mode 100755 index 000000000000..c7773843b473 --- /dev/null +++ b/.chloggen/indexer-metrics-2-splunkent.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: splunkenterprisereceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "adds additional metrics specific to indexers" + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [30704] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/receiver/splunkenterprisereceiver/documentation.md b/receiver/splunkenterprisereceiver/documentation.md index 747375b33243..a9a58472ec41 100644 --- a/receiver/splunkenterprisereceiver/documentation.md +++ b/receiver/splunkenterprisereceiver/documentation.md @@ -12,13 +12,98 @@ metrics: enabled: false ``` -### splunk.data.indexes.extended.bucket.count +### splunk.aggregation.queue.ratio -Count of buckets per index +Gauge tracking the average indexer aggregation queue ration (%). *Note:** Search is best run against a Cluster Manager. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| {buckets} | Gauge | Int | +| {%} | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.host | The name of the splunk host | Any Str | + +### splunk.buckets.searchable.status + +Gauge tracking the number of buckets and their searchable status. *Note:** Search is best run against a Cluster Manager. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {count} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.host | The name of the splunk host | Any Str | +| splunk.indexer.searchable | The searchability status reported for a specific object | Any Str | + +### splunk.indexer.avg.rate + +Gauge tracking the average rate of indexed data. **Note:** Search is best run against a Cluster Manager. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| KBy | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.host | The name of the splunk host | Any Str | + +### splunk.indexer.cpu.time + +Gauge tracking the number of indexing process cpu seconds per instance + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {s} | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.host | The name of the splunk host | Any Str | + +### splunk.indexer.queue.ratio + +Gauge tracking the average indexer index queue ration (%). *Note:** Search is best run against a Cluster Manager. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {%} | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.host | The name of the splunk host | Any Str | + +### splunk.indexer.raw.write.time + +Gauge tracking the number of raw write seconds per instance + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {s} | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.host | The name of the splunk host | Any Str | + +### splunk.indexes.avg.size + +Gauge tracking the indexes and their average size (gb). *Note:** Search is best run against a Cluster Manager. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| Gb | Gauge | Double | #### Attributes @@ -26,13 +111,13 @@ Count of buckets per index | ---- | ----------- | ------ | | splunk.index.name | The name of the index reporting a specific KPI | Any Str | -### splunk.data.indexes.extended.event.count +### splunk.indexes.avg.usage -Count of events for index, excluding frozen events. Approximately equal to the event_count sum of all buckets. +Gauge tracking the indexes and their average usage (%). *Note:** Search is best run against a Cluster Manager. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| {events} | Gauge | Int | +| {%} | Gauge | Double | #### Attributes @@ -40,13 +125,13 @@ Count of events for index, excluding frozen events. Approximately equal to the e | ---- | ----------- | ------ | | splunk.index.name | The name of the index reporting a specific KPI | Any Str | -### splunk.data.indexes.extended.raw.size +### splunk.indexes.bucket.count -Size in bytes on disk of the /rawdata/ directories of all buckets in this index, excluding frozen +Gauge tracking the indexes and their bucket counts. *Note:** Search is best run against a Cluster Manager. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| By | Gauge | Int | +| {count} | Gauge | Int | #### Attributes @@ -54,13 +139,13 @@ Size in bytes on disk of the /rawdata/ directories of all buckets in thi | ---- | ----------- | ------ | | splunk.index.name | The name of the index reporting a specific KPI | Any Str | -### splunk.data.indexes.extended.total.size +### splunk.indexes.median.data.age -Size in bytes on disk of this index +Gauge tracking the indexes and their median data age (days). *Note:** Search is best run against a Cluster Manager. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| By | Gauge | Int | +| {days} | Gauge | Int | #### Attributes @@ -68,19 +153,33 @@ Size in bytes on disk of this index | ---- | ----------- | ------ | | splunk.index.name | The name of the index reporting a specific KPI | Any Str | -### splunk.indexer.throughput +### splunk.indexes.size -Gauge tracking average bytes per second throughput of indexer +Gauge tracking the indexes and their total size (gb). *Note:** Search is best run against a Cluster Manager. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| By/s | Gauge | Double | +| Gb | Gauge | Double | #### Attributes | Name | Description | Values | | ---- | ----------- | ------ | -| splunk.indexer.status | The status message reported for a specific object | Any Str | +| splunk.index.name | The name of the index reporting a specific KPI | Any Str | + +### splunk.io.avg.iops + +Gauge tracking the average IOPs used per instance + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {iops} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.host | The name of the splunk host | Any Str | ### splunk.license.index.usage @@ -96,33 +195,89 @@ Gauge tracking the indexed license usage per index | ---- | ----------- | ------ | | splunk.index.name | The name of the index reporting a specific KPI | Any Str | -### splunk.server.introspection.queues.current +### splunk.parse.queue.ratio -Gauge tracking current length of queue +Gauge tracking the average indexer parser queue ration (%). *Note:** Search is best run against a Cluster Manager. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| {queues} | Gauge | Int | +| {%} | Gauge | Double | #### Attributes | Name | Description | Values | | ---- | ----------- | ------ | -| splunk.queue.name | The name of the queue reporting a specific KPI | Any Str | +| splunk.host | The name of the splunk host | Any Str | -### splunk.server.introspection.queues.current.bytes +### splunk.pipeline.set.count -Gauge tracking current bytes waiting in queue +Gauge tracking the number of pipeline sets per indexer. **Note:** Search is best run against a Cluster Manager. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| By | Gauge | Int | +| KBy | Gauge | Int | #### Attributes | Name | Description | Values | | ---- | ----------- | ------ | -| splunk.queue.name | The name of the queue reporting a specific KPI | Any Str | +| splunk.host | The name of the splunk host | Any Str | + +### splunk.scheduler.avg.execution.latency + +Gauge tracking the average execution latency of scheduled searches + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {ms} | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.host | The name of the splunk host | Any Str | + +### splunk.scheduler.avg.run.time + +Gauge tracking the average runtime of scheduled searches + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {ms} | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.host | The name of the splunk host | Any Str | + +### splunk.scheduler.completion.ratio + +Gauge tracking the ratio of completed to skipped scheduled searches + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {%} | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.host | The name of the splunk host | Any Str | + +### splunk.typing.queue.ratio + +Gauge tracking the average indexer typing queue ration (%). *Note:** Search is best run against a Cluster Manager. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {%} | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.host | The name of the splunk host | Any Str | ## Optional Metrics @@ -134,9 +289,23 @@ metrics: enabled: true ``` +### splunk.data.indexes.extended.bucket.count + +Count of buckets per index + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {buckets} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.index.name | The name of the index reporting a specific KPI | Any Str | + ### splunk.data.indexes.extended.bucket.event.count -Count of events in this bucket super-directory +Count of events in this bucket super-directory. *Note:** Must be pointed at specific indexer `endpoint`. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | @@ -151,7 +320,7 @@ Count of events in this bucket super-directory ### splunk.data.indexes.extended.bucket.hot.count -(If size > 0) Number of hot buckets +(If size > 0) Number of hot buckets. *Note:** Must be pointed at specific indexer `endpoint`. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | @@ -166,7 +335,7 @@ Count of events in this bucket super-directory ### splunk.data.indexes.extended.bucket.warm.count -(If size > 0) Number of warm buckets +(If size > 0) Number of warm buckets. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | @@ -178,3 +347,87 @@ Count of events in this bucket super-directory | ---- | ----------- | ------ | | splunk.index.name | The name of the index reporting a specific KPI | Any Str | | splunk.bucket.dir | The bucket super-directory (home, cold, thawed) for each index | Any Str | + +### splunk.data.indexes.extended.event.count + +Count of events for index, excluding frozen events. Approximately equal to the event_count sum of all buckets. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {events} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.index.name | The name of the index reporting a specific KPI | Any Str | + +### splunk.data.indexes.extended.raw.size + +Size in bytes on disk of the /rawdata/ directories of all buckets in this index, excluding frozen *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.index.name | The name of the index reporting a specific KPI | Any Str | + +### splunk.data.indexes.extended.total.size + +Size in bytes on disk of this index *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.index.name | The name of the index reporting a specific KPI | Any Str | + +### splunk.indexer.throughput + +Gauge tracking average bytes per second throughput of indexer. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By/s | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.indexer.status | The status message reported for a specific object | Any Str | + +### splunk.server.introspection.queues.current + +Gauge tracking current length of queue. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {queues} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.queue.name | The name of the queue reporting a specific KPI | Any Str | + +### splunk.server.introspection.queues.current.bytes + +Gauge tracking current bytes waiting in queue. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.queue.name | The name of the queue reporting a specific KPI | Any Str | diff --git a/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go b/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go index 1320bc07c1d7..5b80b4fb6dcc 100644 --- a/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go +++ b/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go @@ -25,6 +25,8 @@ func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { // MetricsConfig provides config for splunkenterprise metrics. type MetricsConfig struct { + SplunkAggregationQueueRatio MetricConfig `mapstructure:"splunk.aggregation.queue.ratio"` + SplunkBucketsSearchableStatus MetricConfig `mapstructure:"splunk.buckets.searchable.status"` SplunkDataIndexesExtendedBucketCount MetricConfig `mapstructure:"splunk.data.indexes.extended.bucket.count"` SplunkDataIndexesExtendedBucketEventCount MetricConfig `mapstructure:"splunk.data.indexes.extended.bucket.event.count"` SplunkDataIndexesExtendedBucketHotCount MetricConfig `mapstructure:"splunk.data.indexes.extended.bucket.hot.count"` @@ -32,17 +34,39 @@ type MetricsConfig struct { SplunkDataIndexesExtendedEventCount MetricConfig `mapstructure:"splunk.data.indexes.extended.event.count"` SplunkDataIndexesExtendedRawSize MetricConfig `mapstructure:"splunk.data.indexes.extended.raw.size"` SplunkDataIndexesExtendedTotalSize MetricConfig `mapstructure:"splunk.data.indexes.extended.total.size"` + SplunkIndexerAvgRate MetricConfig `mapstructure:"splunk.indexer.avg.rate"` + SplunkIndexerCPUTime MetricConfig `mapstructure:"splunk.indexer.cpu.time"` + SplunkIndexerQueueRatio MetricConfig `mapstructure:"splunk.indexer.queue.ratio"` + SplunkIndexerRawWriteTime MetricConfig `mapstructure:"splunk.indexer.raw.write.time"` SplunkIndexerThroughput MetricConfig `mapstructure:"splunk.indexer.throughput"` + SplunkIndexesAvgSize MetricConfig `mapstructure:"splunk.indexes.avg.size"` + SplunkIndexesAvgUsage MetricConfig `mapstructure:"splunk.indexes.avg.usage"` + SplunkIndexesBucketCount MetricConfig `mapstructure:"splunk.indexes.bucket.count"` + SplunkIndexesMedianDataAge MetricConfig `mapstructure:"splunk.indexes.median.data.age"` + SplunkIndexesSize MetricConfig `mapstructure:"splunk.indexes.size"` + SplunkIoAvgIops MetricConfig `mapstructure:"splunk.io.avg.iops"` SplunkLicenseIndexUsage MetricConfig `mapstructure:"splunk.license.index.usage"` + SplunkParseQueueRatio MetricConfig `mapstructure:"splunk.parse.queue.ratio"` + SplunkPipelineSetCount MetricConfig `mapstructure:"splunk.pipeline.set.count"` + SplunkSchedulerAvgExecutionLatency MetricConfig `mapstructure:"splunk.scheduler.avg.execution.latency"` + SplunkSchedulerAvgRunTime MetricConfig `mapstructure:"splunk.scheduler.avg.run.time"` + SplunkSchedulerCompletionRatio MetricConfig `mapstructure:"splunk.scheduler.completion.ratio"` SplunkServerIntrospectionQueuesCurrent MetricConfig `mapstructure:"splunk.server.introspection.queues.current"` SplunkServerIntrospectionQueuesCurrentBytes MetricConfig `mapstructure:"splunk.server.introspection.queues.current.bytes"` + SplunkTypingQueueRatio MetricConfig `mapstructure:"splunk.typing.queue.ratio"` } func DefaultMetricsConfig() MetricsConfig { return MetricsConfig{ - SplunkDataIndexesExtendedBucketCount: MetricConfig{ + SplunkAggregationQueueRatio: MetricConfig{ + Enabled: true, + }, + SplunkBucketsSearchableStatus: MetricConfig{ Enabled: true, }, + SplunkDataIndexesExtendedBucketCount: MetricConfig{ + Enabled: false, + }, SplunkDataIndexesExtendedBucketEventCount: MetricConfig{ Enabled: false, }, @@ -53,24 +77,72 @@ func DefaultMetricsConfig() MetricsConfig { Enabled: false, }, SplunkDataIndexesExtendedEventCount: MetricConfig{ - Enabled: true, + Enabled: false, }, SplunkDataIndexesExtendedRawSize: MetricConfig{ - Enabled: true, + Enabled: false, }, SplunkDataIndexesExtendedTotalSize: MetricConfig{ + Enabled: false, + }, + SplunkIndexerAvgRate: MetricConfig{ + Enabled: true, + }, + SplunkIndexerCPUTime: MetricConfig{ + Enabled: true, + }, + SplunkIndexerQueueRatio: MetricConfig{ + Enabled: true, + }, + SplunkIndexerRawWriteTime: MetricConfig{ Enabled: true, }, SplunkIndexerThroughput: MetricConfig{ + Enabled: false, + }, + SplunkIndexesAvgSize: MetricConfig{ + Enabled: true, + }, + SplunkIndexesAvgUsage: MetricConfig{ + Enabled: true, + }, + SplunkIndexesBucketCount: MetricConfig{ + Enabled: true, + }, + SplunkIndexesMedianDataAge: MetricConfig{ + Enabled: true, + }, + SplunkIndexesSize: MetricConfig{ + Enabled: true, + }, + SplunkIoAvgIops: MetricConfig{ Enabled: true, }, SplunkLicenseIndexUsage: MetricConfig{ Enabled: true, }, - SplunkServerIntrospectionQueuesCurrent: MetricConfig{ + SplunkParseQueueRatio: MetricConfig{ + Enabled: true, + }, + SplunkPipelineSetCount: MetricConfig{ Enabled: true, }, + SplunkSchedulerAvgExecutionLatency: MetricConfig{ + Enabled: true, + }, + SplunkSchedulerAvgRunTime: MetricConfig{ + Enabled: true, + }, + SplunkSchedulerCompletionRatio: MetricConfig{ + Enabled: true, + }, + SplunkServerIntrospectionQueuesCurrent: MetricConfig{ + Enabled: false, + }, SplunkServerIntrospectionQueuesCurrentBytes: MetricConfig{ + Enabled: false, + }, + SplunkTypingQueueRatio: MetricConfig{ Enabled: true, }, } diff --git a/receiver/splunkenterprisereceiver/internal/metadata/generated_config_test.go b/receiver/splunkenterprisereceiver/internal/metadata/generated_config_test.go index ffad681bdc64..77378ff1aaec 100644 --- a/receiver/splunkenterprisereceiver/internal/metadata/generated_config_test.go +++ b/receiver/splunkenterprisereceiver/internal/metadata/generated_config_test.go @@ -26,6 +26,8 @@ func TestMetricsBuilderConfig(t *testing.T) { name: "all_set", want: MetricsBuilderConfig{ Metrics: MetricsConfig{ + SplunkAggregationQueueRatio: MetricConfig{Enabled: true}, + SplunkBucketsSearchableStatus: MetricConfig{Enabled: true}, SplunkDataIndexesExtendedBucketCount: MetricConfig{Enabled: true}, SplunkDataIndexesExtendedBucketEventCount: MetricConfig{Enabled: true}, SplunkDataIndexesExtendedBucketHotCount: MetricConfig{Enabled: true}, @@ -33,10 +35,26 @@ func TestMetricsBuilderConfig(t *testing.T) { SplunkDataIndexesExtendedEventCount: MetricConfig{Enabled: true}, SplunkDataIndexesExtendedRawSize: MetricConfig{Enabled: true}, SplunkDataIndexesExtendedTotalSize: MetricConfig{Enabled: true}, + SplunkIndexerAvgRate: MetricConfig{Enabled: true}, + SplunkIndexerCPUTime: MetricConfig{Enabled: true}, + SplunkIndexerQueueRatio: MetricConfig{Enabled: true}, + SplunkIndexerRawWriteTime: MetricConfig{Enabled: true}, SplunkIndexerThroughput: MetricConfig{Enabled: true}, + SplunkIndexesAvgSize: MetricConfig{Enabled: true}, + SplunkIndexesAvgUsage: MetricConfig{Enabled: true}, + SplunkIndexesBucketCount: MetricConfig{Enabled: true}, + SplunkIndexesMedianDataAge: MetricConfig{Enabled: true}, + SplunkIndexesSize: MetricConfig{Enabled: true}, + SplunkIoAvgIops: MetricConfig{Enabled: true}, SplunkLicenseIndexUsage: MetricConfig{Enabled: true}, + SplunkParseQueueRatio: MetricConfig{Enabled: true}, + SplunkPipelineSetCount: MetricConfig{Enabled: true}, + SplunkSchedulerAvgExecutionLatency: MetricConfig{Enabled: true}, + SplunkSchedulerAvgRunTime: MetricConfig{Enabled: true}, + SplunkSchedulerCompletionRatio: MetricConfig{Enabled: true}, SplunkServerIntrospectionQueuesCurrent: MetricConfig{Enabled: true}, SplunkServerIntrospectionQueuesCurrentBytes: MetricConfig{Enabled: true}, + SplunkTypingQueueRatio: MetricConfig{Enabled: true}, }, }, }, @@ -44,6 +62,8 @@ func TestMetricsBuilderConfig(t *testing.T) { name: "none_set", want: MetricsBuilderConfig{ Metrics: MetricsConfig{ + SplunkAggregationQueueRatio: MetricConfig{Enabled: false}, + SplunkBucketsSearchableStatus: MetricConfig{Enabled: false}, SplunkDataIndexesExtendedBucketCount: MetricConfig{Enabled: false}, SplunkDataIndexesExtendedBucketEventCount: MetricConfig{Enabled: false}, SplunkDataIndexesExtendedBucketHotCount: MetricConfig{Enabled: false}, @@ -51,10 +71,26 @@ func TestMetricsBuilderConfig(t *testing.T) { SplunkDataIndexesExtendedEventCount: MetricConfig{Enabled: false}, SplunkDataIndexesExtendedRawSize: MetricConfig{Enabled: false}, SplunkDataIndexesExtendedTotalSize: MetricConfig{Enabled: false}, + SplunkIndexerAvgRate: MetricConfig{Enabled: false}, + SplunkIndexerCPUTime: MetricConfig{Enabled: false}, + SplunkIndexerQueueRatio: MetricConfig{Enabled: false}, + SplunkIndexerRawWriteTime: MetricConfig{Enabled: false}, SplunkIndexerThroughput: MetricConfig{Enabled: false}, + SplunkIndexesAvgSize: MetricConfig{Enabled: false}, + SplunkIndexesAvgUsage: MetricConfig{Enabled: false}, + SplunkIndexesBucketCount: MetricConfig{Enabled: false}, + SplunkIndexesMedianDataAge: MetricConfig{Enabled: false}, + SplunkIndexesSize: MetricConfig{Enabled: false}, + SplunkIoAvgIops: MetricConfig{Enabled: false}, SplunkLicenseIndexUsage: MetricConfig{Enabled: false}, + SplunkParseQueueRatio: MetricConfig{Enabled: false}, + SplunkPipelineSetCount: MetricConfig{Enabled: false}, + SplunkSchedulerAvgExecutionLatency: MetricConfig{Enabled: false}, + SplunkSchedulerAvgRunTime: MetricConfig{Enabled: false}, + SplunkSchedulerCompletionRatio: MetricConfig{Enabled: false}, SplunkServerIntrospectionQueuesCurrent: MetricConfig{Enabled: false}, SplunkServerIntrospectionQueuesCurrentBytes: MetricConfig{Enabled: false}, + SplunkTypingQueueRatio: MetricConfig{Enabled: false}, }, }, }, diff --git a/receiver/splunkenterprisereceiver/internal/metadata/generated_metrics.go b/receiver/splunkenterprisereceiver/internal/metadata/generated_metrics.go index 73c9a0805c99..a5992d7110a1 100644 --- a/receiver/splunkenterprisereceiver/internal/metadata/generated_metrics.go +++ b/receiver/splunkenterprisereceiver/internal/metadata/generated_metrics.go @@ -11,22 +11,842 @@ import ( "go.opentelemetry.io/collector/receiver" ) +type metricSplunkAggregationQueueRatio struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.aggregation.queue.ratio metric with initial data. +func (m *metricSplunkAggregationQueueRatio) init() { + m.data.SetName("splunk.aggregation.queue.ratio") + m.data.SetDescription("Gauge tracking the average indexer aggregation queue ration (%). *Note:** Search is best run against a Cluster Manager.") + m.data.SetUnit("{%}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkAggregationQueueRatio) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, splunkHostAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("splunk.host", splunkHostAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkAggregationQueueRatio) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkAggregationQueueRatio) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkAggregationQueueRatio(cfg MetricConfig) metricSplunkAggregationQueueRatio { + m := metricSplunkAggregationQueueRatio{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSplunkBucketsSearchableStatus struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.buckets.searchable.status metric with initial data. +func (m *metricSplunkBucketsSearchableStatus) init() { + m.data.SetName("splunk.buckets.searchable.status") + m.data.SetDescription("Gauge tracking the number of buckets and their searchable status. *Note:** Search is best run against a Cluster Manager.") + m.data.SetUnit("{count}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkBucketsSearchableStatus) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkHostAttributeValue string, splunkIndexerSearchableAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("splunk.host", splunkHostAttributeValue) + dp.Attributes().PutStr("splunk.indexer.searchable", splunkIndexerSearchableAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkBucketsSearchableStatus) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkBucketsSearchableStatus) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkBucketsSearchableStatus(cfg MetricConfig) metricSplunkBucketsSearchableStatus { + m := metricSplunkBucketsSearchableStatus{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricSplunkDataIndexesExtendedBucketCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills splunk.data.indexes.extended.bucket.count metric with initial data. -func (m *metricSplunkDataIndexesExtendedBucketCount) init() { - m.data.SetName("splunk.data.indexes.extended.bucket.count") - m.data.SetDescription("Count of buckets per index") - m.data.SetUnit("{buckets}") +// init fills splunk.data.indexes.extended.bucket.count metric with initial data. +func (m *metricSplunkDataIndexesExtendedBucketCount) init() { + m.data.SetName("splunk.data.indexes.extended.bucket.count") + m.data.SetDescription("Count of buckets per index") + m.data.SetUnit("{buckets}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkDataIndexesExtendedBucketCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkIndexNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("splunk.index.name", splunkIndexNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkDataIndexesExtendedBucketCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkDataIndexesExtendedBucketCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkDataIndexesExtendedBucketCount(cfg MetricConfig) metricSplunkDataIndexesExtendedBucketCount { + m := metricSplunkDataIndexesExtendedBucketCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSplunkDataIndexesExtendedBucketEventCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.data.indexes.extended.bucket.event.count metric with initial data. +func (m *metricSplunkDataIndexesExtendedBucketEventCount) init() { + m.data.SetName("splunk.data.indexes.extended.bucket.event.count") + m.data.SetDescription("Count of events in this bucket super-directory. *Note:** Must be pointed at specific indexer `endpoint`.") + m.data.SetUnit("{events}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkDataIndexesExtendedBucketEventCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkIndexNameAttributeValue string, splunkBucketDirAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("splunk.index.name", splunkIndexNameAttributeValue) + dp.Attributes().PutStr("splunk.bucket.dir", splunkBucketDirAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkDataIndexesExtendedBucketEventCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkDataIndexesExtendedBucketEventCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkDataIndexesExtendedBucketEventCount(cfg MetricConfig) metricSplunkDataIndexesExtendedBucketEventCount { + m := metricSplunkDataIndexesExtendedBucketEventCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSplunkDataIndexesExtendedBucketHotCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.data.indexes.extended.bucket.hot.count metric with initial data. +func (m *metricSplunkDataIndexesExtendedBucketHotCount) init() { + m.data.SetName("splunk.data.indexes.extended.bucket.hot.count") + m.data.SetDescription("(If size > 0) Number of hot buckets. *Note:** Must be pointed at specific indexer `endpoint`.") + m.data.SetUnit("{buckets}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkDataIndexesExtendedBucketHotCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkIndexNameAttributeValue string, splunkBucketDirAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("splunk.index.name", splunkIndexNameAttributeValue) + dp.Attributes().PutStr("splunk.bucket.dir", splunkBucketDirAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkDataIndexesExtendedBucketHotCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkDataIndexesExtendedBucketHotCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkDataIndexesExtendedBucketHotCount(cfg MetricConfig) metricSplunkDataIndexesExtendedBucketHotCount { + m := metricSplunkDataIndexesExtendedBucketHotCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSplunkDataIndexesExtendedBucketWarmCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.data.indexes.extended.bucket.warm.count metric with initial data. +func (m *metricSplunkDataIndexesExtendedBucketWarmCount) init() { + m.data.SetName("splunk.data.indexes.extended.bucket.warm.count") + m.data.SetDescription("(If size > 0) Number of warm buckets. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer.") + m.data.SetUnit("{buckets}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkDataIndexesExtendedBucketWarmCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkIndexNameAttributeValue string, splunkBucketDirAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("splunk.index.name", splunkIndexNameAttributeValue) + dp.Attributes().PutStr("splunk.bucket.dir", splunkBucketDirAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkDataIndexesExtendedBucketWarmCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkDataIndexesExtendedBucketWarmCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkDataIndexesExtendedBucketWarmCount(cfg MetricConfig) metricSplunkDataIndexesExtendedBucketWarmCount { + m := metricSplunkDataIndexesExtendedBucketWarmCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSplunkDataIndexesExtendedEventCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.data.indexes.extended.event.count metric with initial data. +func (m *metricSplunkDataIndexesExtendedEventCount) init() { + m.data.SetName("splunk.data.indexes.extended.event.count") + m.data.SetDescription("Count of events for index, excluding frozen events. Approximately equal to the event_count sum of all buckets. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer.") + m.data.SetUnit("{events}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkDataIndexesExtendedEventCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkIndexNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("splunk.index.name", splunkIndexNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkDataIndexesExtendedEventCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkDataIndexesExtendedEventCount) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkDataIndexesExtendedEventCount(cfg MetricConfig) metricSplunkDataIndexesExtendedEventCount { + m := metricSplunkDataIndexesExtendedEventCount{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSplunkDataIndexesExtendedRawSize struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.data.indexes.extended.raw.size metric with initial data. +func (m *metricSplunkDataIndexesExtendedRawSize) init() { + m.data.SetName("splunk.data.indexes.extended.raw.size") + m.data.SetDescription("Size in bytes on disk of the /rawdata/ directories of all buckets in this index, excluding frozen *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkDataIndexesExtendedRawSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkIndexNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("splunk.index.name", splunkIndexNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkDataIndexesExtendedRawSize) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkDataIndexesExtendedRawSize) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkDataIndexesExtendedRawSize(cfg MetricConfig) metricSplunkDataIndexesExtendedRawSize { + m := metricSplunkDataIndexesExtendedRawSize{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSplunkDataIndexesExtendedTotalSize struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.data.indexes.extended.total.size metric with initial data. +func (m *metricSplunkDataIndexesExtendedTotalSize) init() { + m.data.SetName("splunk.data.indexes.extended.total.size") + m.data.SetDescription("Size in bytes on disk of this index *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkDataIndexesExtendedTotalSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkIndexNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("splunk.index.name", splunkIndexNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkDataIndexesExtendedTotalSize) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkDataIndexesExtendedTotalSize) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkDataIndexesExtendedTotalSize(cfg MetricConfig) metricSplunkDataIndexesExtendedTotalSize { + m := metricSplunkDataIndexesExtendedTotalSize{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSplunkIndexerAvgRate struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.indexer.avg.rate metric with initial data. +func (m *metricSplunkIndexerAvgRate) init() { + m.data.SetName("splunk.indexer.avg.rate") + m.data.SetDescription("Gauge tracking the average rate of indexed data. **Note:** Search is best run against a Cluster Manager.") + m.data.SetUnit("KBy") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkIndexerAvgRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, splunkHostAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("splunk.host", splunkHostAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkIndexerAvgRate) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkIndexerAvgRate) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkIndexerAvgRate(cfg MetricConfig) metricSplunkIndexerAvgRate { + m := metricSplunkIndexerAvgRate{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSplunkIndexerCPUTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.indexer.cpu.time metric with initial data. +func (m *metricSplunkIndexerCPUTime) init() { + m.data.SetName("splunk.indexer.cpu.time") + m.data.SetDescription("Gauge tracking the number of indexing process cpu seconds per instance") + m.data.SetUnit("{s}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkIndexerCPUTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, splunkHostAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("splunk.host", splunkHostAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkIndexerCPUTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkIndexerCPUTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkIndexerCPUTime(cfg MetricConfig) metricSplunkIndexerCPUTime { + m := metricSplunkIndexerCPUTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSplunkIndexerQueueRatio struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.indexer.queue.ratio metric with initial data. +func (m *metricSplunkIndexerQueueRatio) init() { + m.data.SetName("splunk.indexer.queue.ratio") + m.data.SetDescription("Gauge tracking the average indexer index queue ration (%). *Note:** Search is best run against a Cluster Manager.") + m.data.SetUnit("{%}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkIndexerQueueRatio) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, splunkHostAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("splunk.host", splunkHostAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkIndexerQueueRatio) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkIndexerQueueRatio) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkIndexerQueueRatio(cfg MetricConfig) metricSplunkIndexerQueueRatio { + m := metricSplunkIndexerQueueRatio{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSplunkIndexerRawWriteTime struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.indexer.raw.write.time metric with initial data. +func (m *metricSplunkIndexerRawWriteTime) init() { + m.data.SetName("splunk.indexer.raw.write.time") + m.data.SetDescription("Gauge tracking the number of raw write seconds per instance") + m.data.SetUnit("{s}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkIndexerRawWriteTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, splunkHostAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("splunk.host", splunkHostAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkIndexerRawWriteTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkIndexerRawWriteTime) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkIndexerRawWriteTime(cfg MetricConfig) metricSplunkIndexerRawWriteTime { + m := metricSplunkIndexerRawWriteTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSplunkIndexerThroughput struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.indexer.throughput metric with initial data. +func (m *metricSplunkIndexerThroughput) init() { + m.data.SetName("splunk.indexer.throughput") + m.data.SetDescription("Gauge tracking average bytes per second throughput of indexer. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer.") + m.data.SetUnit("By/s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkIndexerThroughput) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, splunkIndexerStatusAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("splunk.indexer.status", splunkIndexerStatusAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkIndexerThroughput) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkIndexerThroughput) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkIndexerThroughput(cfg MetricConfig) metricSplunkIndexerThroughput { + m := metricSplunkIndexerThroughput{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSplunkIndexesAvgSize struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.indexes.avg.size metric with initial data. +func (m *metricSplunkIndexesAvgSize) init() { + m.data.SetName("splunk.indexes.avg.size") + m.data.SetDescription("Gauge tracking the indexes and their average size (gb). *Note:** Search is best run against a Cluster Manager.") + m.data.SetUnit("Gb") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkIndexesAvgSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, splunkIndexNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("splunk.index.name", splunkIndexNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkIndexesAvgSize) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkIndexesAvgSize) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkIndexesAvgSize(cfg MetricConfig) metricSplunkIndexesAvgSize { + m := metricSplunkIndexesAvgSize{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSplunkIndexesAvgUsage struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.indexes.avg.usage metric with initial data. +func (m *metricSplunkIndexesAvgUsage) init() { + m.data.SetName("splunk.indexes.avg.usage") + m.data.SetDescription("Gauge tracking the indexes and their average usage (%). *Note:** Search is best run against a Cluster Manager.") + m.data.SetUnit("{%}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkIndexesAvgUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, splunkIndexNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("splunk.index.name", splunkIndexNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkIndexesAvgUsage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkIndexesAvgUsage) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkIndexesAvgUsage(cfg MetricConfig) metricSplunkIndexesAvgUsage { + m := metricSplunkIndexesAvgUsage{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSplunkIndexesBucketCount struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.indexes.bucket.count metric with initial data. +func (m *metricSplunkIndexesBucketCount) init() { + m.data.SetName("splunk.indexes.bucket.count") + m.data.SetDescription("Gauge tracking the indexes and their bucket counts. *Note:** Search is best run against a Cluster Manager.") + m.data.SetUnit("{count}") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSplunkDataIndexesExtendedBucketCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkIndexNameAttributeValue string) { +func (m *metricSplunkIndexesBucketCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkIndexNameAttributeValue string) { if !m.config.Enabled { return } @@ -38,14 +858,14 @@ func (m *metricSplunkDataIndexesExtendedBucketCount) recordDataPoint(start pcomm } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricSplunkDataIndexesExtendedBucketCount) updateCapacity() { +func (m *metricSplunkIndexesBucketCount) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSplunkDataIndexesExtendedBucketCount) emit(metrics pmetric.MetricSlice) { +func (m *metricSplunkIndexesBucketCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -53,8 +873,8 @@ func (m *metricSplunkDataIndexesExtendedBucketCount) emit(metrics pmetric.Metric } } -func newMetricSplunkDataIndexesExtendedBucketCount(cfg MetricConfig) metricSplunkDataIndexesExtendedBucketCount { - m := metricSplunkDataIndexesExtendedBucketCount{config: cfg} +func newMetricSplunkIndexesBucketCount(cfg MetricConfig) metricSplunkIndexesBucketCount { + m := metricSplunkIndexesBucketCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -62,22 +882,22 @@ func newMetricSplunkDataIndexesExtendedBucketCount(cfg MetricConfig) metricSplun return m } -type metricSplunkDataIndexesExtendedBucketEventCount struct { +type metricSplunkIndexesMedianDataAge struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills splunk.data.indexes.extended.bucket.event.count metric with initial data. -func (m *metricSplunkDataIndexesExtendedBucketEventCount) init() { - m.data.SetName("splunk.data.indexes.extended.bucket.event.count") - m.data.SetDescription("Count of events in this bucket super-directory") - m.data.SetUnit("{events}") +// init fills splunk.indexes.median.data.age metric with initial data. +func (m *metricSplunkIndexesMedianDataAge) init() { + m.data.SetName("splunk.indexes.median.data.age") + m.data.SetDescription("Gauge tracking the indexes and their median data age (days). *Note:** Search is best run against a Cluster Manager.") + m.data.SetUnit("{days}") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSplunkDataIndexesExtendedBucketEventCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkIndexNameAttributeValue string, splunkBucketDirAttributeValue string) { +func (m *metricSplunkIndexesMedianDataAge) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkIndexNameAttributeValue string) { if !m.config.Enabled { return } @@ -86,18 +906,17 @@ func (m *metricSplunkDataIndexesExtendedBucketEventCount) recordDataPoint(start dp.SetTimestamp(ts) dp.SetIntValue(val) dp.Attributes().PutStr("splunk.index.name", splunkIndexNameAttributeValue) - dp.Attributes().PutStr("splunk.bucket.dir", splunkBucketDirAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricSplunkDataIndexesExtendedBucketEventCount) updateCapacity() { +func (m *metricSplunkIndexesMedianDataAge) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSplunkDataIndexesExtendedBucketEventCount) emit(metrics pmetric.MetricSlice) { +func (m *metricSplunkIndexesMedianDataAge) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -105,8 +924,8 @@ func (m *metricSplunkDataIndexesExtendedBucketEventCount) emit(metrics pmetric.M } } -func newMetricSplunkDataIndexesExtendedBucketEventCount(cfg MetricConfig) metricSplunkDataIndexesExtendedBucketEventCount { - m := metricSplunkDataIndexesExtendedBucketEventCount{config: cfg} +func newMetricSplunkIndexesMedianDataAge(cfg MetricConfig) metricSplunkIndexesMedianDataAge { + m := metricSplunkIndexesMedianDataAge{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -114,42 +933,41 @@ func newMetricSplunkDataIndexesExtendedBucketEventCount(cfg MetricConfig) metric return m } -type metricSplunkDataIndexesExtendedBucketHotCount struct { +type metricSplunkIndexesSize struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills splunk.data.indexes.extended.bucket.hot.count metric with initial data. -func (m *metricSplunkDataIndexesExtendedBucketHotCount) init() { - m.data.SetName("splunk.data.indexes.extended.bucket.hot.count") - m.data.SetDescription("(If size > 0) Number of hot buckets") - m.data.SetUnit("{buckets}") +// init fills splunk.indexes.size metric with initial data. +func (m *metricSplunkIndexesSize) init() { + m.data.SetName("splunk.indexes.size") + m.data.SetDescription("Gauge tracking the indexes and their total size (gb). *Note:** Search is best run against a Cluster Manager.") + m.data.SetUnit("Gb") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSplunkDataIndexesExtendedBucketHotCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkIndexNameAttributeValue string, splunkBucketDirAttributeValue string) { +func (m *metricSplunkIndexesSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, splunkIndexNameAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) - dp.SetIntValue(val) + dp.SetDoubleValue(val) dp.Attributes().PutStr("splunk.index.name", splunkIndexNameAttributeValue) - dp.Attributes().PutStr("splunk.bucket.dir", splunkBucketDirAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricSplunkDataIndexesExtendedBucketHotCount) updateCapacity() { +func (m *metricSplunkIndexesSize) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSplunkDataIndexesExtendedBucketHotCount) emit(metrics pmetric.MetricSlice) { +func (m *metricSplunkIndexesSize) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -157,8 +975,8 @@ func (m *metricSplunkDataIndexesExtendedBucketHotCount) emit(metrics pmetric.Met } } -func newMetricSplunkDataIndexesExtendedBucketHotCount(cfg MetricConfig) metricSplunkDataIndexesExtendedBucketHotCount { - m := metricSplunkDataIndexesExtendedBucketHotCount{config: cfg} +func newMetricSplunkIndexesSize(cfg MetricConfig) metricSplunkIndexesSize { + m := metricSplunkIndexesSize{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -166,22 +984,22 @@ func newMetricSplunkDataIndexesExtendedBucketHotCount(cfg MetricConfig) metricSp return m } -type metricSplunkDataIndexesExtendedBucketWarmCount struct { +type metricSplunkIoAvgIops struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills splunk.data.indexes.extended.bucket.warm.count metric with initial data. -func (m *metricSplunkDataIndexesExtendedBucketWarmCount) init() { - m.data.SetName("splunk.data.indexes.extended.bucket.warm.count") - m.data.SetDescription("(If size > 0) Number of warm buckets") - m.data.SetUnit("{buckets}") +// init fills splunk.io.avg.iops metric with initial data. +func (m *metricSplunkIoAvgIops) init() { + m.data.SetName("splunk.io.avg.iops") + m.data.SetDescription("Gauge tracking the average IOPs used per instance") + m.data.SetUnit("{iops}") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSplunkDataIndexesExtendedBucketWarmCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkIndexNameAttributeValue string, splunkBucketDirAttributeValue string) { +func (m *metricSplunkIoAvgIops) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkHostAttributeValue string) { if !m.config.Enabled { return } @@ -189,19 +1007,18 @@ func (m *metricSplunkDataIndexesExtendedBucketWarmCount) recordDataPoint(start p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) - dp.Attributes().PutStr("splunk.index.name", splunkIndexNameAttributeValue) - dp.Attributes().PutStr("splunk.bucket.dir", splunkBucketDirAttributeValue) + dp.Attributes().PutStr("splunk.host", splunkHostAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricSplunkDataIndexesExtendedBucketWarmCount) updateCapacity() { +func (m *metricSplunkIoAvgIops) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSplunkDataIndexesExtendedBucketWarmCount) emit(metrics pmetric.MetricSlice) { +func (m *metricSplunkIoAvgIops) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -209,8 +1026,8 @@ func (m *metricSplunkDataIndexesExtendedBucketWarmCount) emit(metrics pmetric.Me } } -func newMetricSplunkDataIndexesExtendedBucketWarmCount(cfg MetricConfig) metricSplunkDataIndexesExtendedBucketWarmCount { - m := metricSplunkDataIndexesExtendedBucketWarmCount{config: cfg} +func newMetricSplunkIoAvgIops(cfg MetricConfig) metricSplunkIoAvgIops { + m := metricSplunkIoAvgIops{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -218,22 +1035,22 @@ func newMetricSplunkDataIndexesExtendedBucketWarmCount(cfg MetricConfig) metricS return m } -type metricSplunkDataIndexesExtendedEventCount struct { +type metricSplunkLicenseIndexUsage struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills splunk.data.indexes.extended.event.count metric with initial data. -func (m *metricSplunkDataIndexesExtendedEventCount) init() { - m.data.SetName("splunk.data.indexes.extended.event.count") - m.data.SetDescription("Count of events for index, excluding frozen events. Approximately equal to the event_count sum of all buckets.") - m.data.SetUnit("{events}") +// init fills splunk.license.index.usage metric with initial data. +func (m *metricSplunkLicenseIndexUsage) init() { + m.data.SetName("splunk.license.index.usage") + m.data.SetDescription("Gauge tracking the indexed license usage per index") + m.data.SetUnit("By") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSplunkDataIndexesExtendedEventCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkIndexNameAttributeValue string) { +func (m *metricSplunkLicenseIndexUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkIndexNameAttributeValue string) { if !m.config.Enabled { return } @@ -245,14 +1062,14 @@ func (m *metricSplunkDataIndexesExtendedEventCount) recordDataPoint(start pcommo } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricSplunkDataIndexesExtendedEventCount) updateCapacity() { +func (m *metricSplunkLicenseIndexUsage) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSplunkDataIndexesExtendedEventCount) emit(metrics pmetric.MetricSlice) { +func (m *metricSplunkLicenseIndexUsage) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -260,8 +1077,8 @@ func (m *metricSplunkDataIndexesExtendedEventCount) emit(metrics pmetric.MetricS } } -func newMetricSplunkDataIndexesExtendedEventCount(cfg MetricConfig) metricSplunkDataIndexesExtendedEventCount { - m := metricSplunkDataIndexesExtendedEventCount{config: cfg} +func newMetricSplunkLicenseIndexUsage(cfg MetricConfig) metricSplunkLicenseIndexUsage { + m := metricSplunkLicenseIndexUsage{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -269,41 +1086,41 @@ func newMetricSplunkDataIndexesExtendedEventCount(cfg MetricConfig) metricSplunk return m } -type metricSplunkDataIndexesExtendedRawSize struct { +type metricSplunkParseQueueRatio struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills splunk.data.indexes.extended.raw.size metric with initial data. -func (m *metricSplunkDataIndexesExtendedRawSize) init() { - m.data.SetName("splunk.data.indexes.extended.raw.size") - m.data.SetDescription("Size in bytes on disk of the /rawdata/ directories of all buckets in this index, excluding frozen") - m.data.SetUnit("By") +// init fills splunk.parse.queue.ratio metric with initial data. +func (m *metricSplunkParseQueueRatio) init() { + m.data.SetName("splunk.parse.queue.ratio") + m.data.SetDescription("Gauge tracking the average indexer parser queue ration (%). *Note:** Search is best run against a Cluster Manager.") + m.data.SetUnit("{%}") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSplunkDataIndexesExtendedRawSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkIndexNameAttributeValue string) { +func (m *metricSplunkParseQueueRatio) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, splunkHostAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("splunk.index.name", splunkIndexNameAttributeValue) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("splunk.host", splunkHostAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricSplunkDataIndexesExtendedRawSize) updateCapacity() { +func (m *metricSplunkParseQueueRatio) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSplunkDataIndexesExtendedRawSize) emit(metrics pmetric.MetricSlice) { +func (m *metricSplunkParseQueueRatio) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -311,8 +1128,8 @@ func (m *metricSplunkDataIndexesExtendedRawSize) emit(metrics pmetric.MetricSlic } } -func newMetricSplunkDataIndexesExtendedRawSize(cfg MetricConfig) metricSplunkDataIndexesExtendedRawSize { - m := metricSplunkDataIndexesExtendedRawSize{config: cfg} +func newMetricSplunkParseQueueRatio(cfg MetricConfig) metricSplunkParseQueueRatio { + m := metricSplunkParseQueueRatio{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -320,22 +1137,22 @@ func newMetricSplunkDataIndexesExtendedRawSize(cfg MetricConfig) metricSplunkDat return m } -type metricSplunkDataIndexesExtendedTotalSize struct { +type metricSplunkPipelineSetCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills splunk.data.indexes.extended.total.size metric with initial data. -func (m *metricSplunkDataIndexesExtendedTotalSize) init() { - m.data.SetName("splunk.data.indexes.extended.total.size") - m.data.SetDescription("Size in bytes on disk of this index") - m.data.SetUnit("By") +// init fills splunk.pipeline.set.count metric with initial data. +func (m *metricSplunkPipelineSetCount) init() { + m.data.SetName("splunk.pipeline.set.count") + m.data.SetDescription("Gauge tracking the number of pipeline sets per indexer. **Note:** Search is best run against a Cluster Manager.") + m.data.SetUnit("KBy") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSplunkDataIndexesExtendedTotalSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkIndexNameAttributeValue string) { +func (m *metricSplunkPipelineSetCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkHostAttributeValue string) { if !m.config.Enabled { return } @@ -343,18 +1160,18 @@ func (m *metricSplunkDataIndexesExtendedTotalSize) recordDataPoint(start pcommon dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntValue(val) - dp.Attributes().PutStr("splunk.index.name", splunkIndexNameAttributeValue) + dp.Attributes().PutStr("splunk.host", splunkHostAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricSplunkDataIndexesExtendedTotalSize) updateCapacity() { +func (m *metricSplunkPipelineSetCount) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSplunkDataIndexesExtendedTotalSize) emit(metrics pmetric.MetricSlice) { +func (m *metricSplunkPipelineSetCount) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -362,8 +1179,8 @@ func (m *metricSplunkDataIndexesExtendedTotalSize) emit(metrics pmetric.MetricSl } } -func newMetricSplunkDataIndexesExtendedTotalSize(cfg MetricConfig) metricSplunkDataIndexesExtendedTotalSize { - m := metricSplunkDataIndexesExtendedTotalSize{config: cfg} +func newMetricSplunkPipelineSetCount(cfg MetricConfig) metricSplunkPipelineSetCount { + m := metricSplunkPipelineSetCount{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -371,22 +1188,22 @@ func newMetricSplunkDataIndexesExtendedTotalSize(cfg MetricConfig) metricSplunkD return m } -type metricSplunkIndexerThroughput struct { +type metricSplunkSchedulerAvgExecutionLatency struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills splunk.indexer.throughput metric with initial data. -func (m *metricSplunkIndexerThroughput) init() { - m.data.SetName("splunk.indexer.throughput") - m.data.SetDescription("Gauge tracking average bytes per second throughput of indexer") - m.data.SetUnit("By/s") +// init fills splunk.scheduler.avg.execution.latency metric with initial data. +func (m *metricSplunkSchedulerAvgExecutionLatency) init() { + m.data.SetName("splunk.scheduler.avg.execution.latency") + m.data.SetDescription("Gauge tracking the average execution latency of scheduled searches") + m.data.SetUnit("{ms}") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSplunkIndexerThroughput) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, splunkIndexerStatusAttributeValue string) { +func (m *metricSplunkSchedulerAvgExecutionLatency) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, splunkHostAttributeValue string) { if !m.config.Enabled { return } @@ -394,18 +1211,18 @@ func (m *metricSplunkIndexerThroughput) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleValue(val) - dp.Attributes().PutStr("splunk.indexer.status", splunkIndexerStatusAttributeValue) + dp.Attributes().PutStr("splunk.host", splunkHostAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricSplunkIndexerThroughput) updateCapacity() { +func (m *metricSplunkSchedulerAvgExecutionLatency) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSplunkIndexerThroughput) emit(metrics pmetric.MetricSlice) { +func (m *metricSplunkSchedulerAvgExecutionLatency) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -413,8 +1230,8 @@ func (m *metricSplunkIndexerThroughput) emit(metrics pmetric.MetricSlice) { } } -func newMetricSplunkIndexerThroughput(cfg MetricConfig) metricSplunkIndexerThroughput { - m := metricSplunkIndexerThroughput{config: cfg} +func newMetricSplunkSchedulerAvgExecutionLatency(cfg MetricConfig) metricSplunkSchedulerAvgExecutionLatency { + m := metricSplunkSchedulerAvgExecutionLatency{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -422,41 +1239,41 @@ func newMetricSplunkIndexerThroughput(cfg MetricConfig) metricSplunkIndexerThrou return m } -type metricSplunkLicenseIndexUsage struct { +type metricSplunkSchedulerAvgRunTime struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. capacity int // max observed number of data points added to the metric. } -// init fills splunk.license.index.usage metric with initial data. -func (m *metricSplunkLicenseIndexUsage) init() { - m.data.SetName("splunk.license.index.usage") - m.data.SetDescription("Gauge tracking the indexed license usage per index") - m.data.SetUnit("By") +// init fills splunk.scheduler.avg.run.time metric with initial data. +func (m *metricSplunkSchedulerAvgRunTime) init() { + m.data.SetName("splunk.scheduler.avg.run.time") + m.data.SetDescription("Gauge tracking the average runtime of scheduled searches") + m.data.SetUnit("{ms}") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSplunkLicenseIndexUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkIndexNameAttributeValue string) { +func (m *metricSplunkSchedulerAvgRunTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, splunkHostAttributeValue string) { if !m.config.Enabled { return } dp := m.data.Gauge().DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(ts) - dp.SetIntValue(val) - dp.Attributes().PutStr("splunk.index.name", splunkIndexNameAttributeValue) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("splunk.host", splunkHostAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. -func (m *metricSplunkLicenseIndexUsage) updateCapacity() { +func (m *metricSplunkSchedulerAvgRunTime) updateCapacity() { if m.data.Gauge().DataPoints().Len() > m.capacity { m.capacity = m.data.Gauge().DataPoints().Len() } } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSplunkLicenseIndexUsage) emit(metrics pmetric.MetricSlice) { +func (m *metricSplunkSchedulerAvgRunTime) emit(metrics pmetric.MetricSlice) { if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -464,8 +1281,59 @@ func (m *metricSplunkLicenseIndexUsage) emit(metrics pmetric.MetricSlice) { } } -func newMetricSplunkLicenseIndexUsage(cfg MetricConfig) metricSplunkLicenseIndexUsage { - m := metricSplunkLicenseIndexUsage{config: cfg} +func newMetricSplunkSchedulerAvgRunTime(cfg MetricConfig) metricSplunkSchedulerAvgRunTime { + m := metricSplunkSchedulerAvgRunTime{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSplunkSchedulerCompletionRatio struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.scheduler.completion.ratio metric with initial data. +func (m *metricSplunkSchedulerCompletionRatio) init() { + m.data.SetName("splunk.scheduler.completion.ratio") + m.data.SetDescription("Gauge tracking the ratio of completed to skipped scheduled searches") + m.data.SetUnit("{%}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkSchedulerCompletionRatio) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, splunkHostAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("splunk.host", splunkHostAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkSchedulerCompletionRatio) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkSchedulerCompletionRatio) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkSchedulerCompletionRatio(cfg MetricConfig) metricSplunkSchedulerCompletionRatio { + m := metricSplunkSchedulerCompletionRatio{config: cfg} if cfg.Enabled { m.data = pmetric.NewMetric() m.init() @@ -482,7 +1350,7 @@ type metricSplunkServerIntrospectionQueuesCurrent struct { // init fills splunk.server.introspection.queues.current metric with initial data. func (m *metricSplunkServerIntrospectionQueuesCurrent) init() { m.data.SetName("splunk.server.introspection.queues.current") - m.data.SetDescription("Gauge tracking current length of queue") + m.data.SetDescription("Gauge tracking current length of queue. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer.") m.data.SetUnit("{queues}") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) @@ -533,7 +1401,7 @@ type metricSplunkServerIntrospectionQueuesCurrentBytes struct { // init fills splunk.server.introspection.queues.current.bytes metric with initial data. func (m *metricSplunkServerIntrospectionQueuesCurrentBytes) init() { m.data.SetName("splunk.server.introspection.queues.current.bytes") - m.data.SetDescription("Gauge tracking current bytes waiting in queue") + m.data.SetDescription("Gauge tracking current bytes waiting in queue. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer.") m.data.SetUnit("By") m.data.SetEmptyGauge() m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) @@ -575,6 +1443,57 @@ func newMetricSplunkServerIntrospectionQueuesCurrentBytes(cfg MetricConfig) metr return m } +type metricSplunkTypingQueueRatio struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.typing.queue.ratio metric with initial data. +func (m *metricSplunkTypingQueueRatio) init() { + m.data.SetName("splunk.typing.queue.ratio") + m.data.SetDescription("Gauge tracking the average indexer typing queue ration (%). *Note:** Search is best run against a Cluster Manager.") + m.data.SetUnit("{%}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkTypingQueueRatio) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, splunkHostAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("splunk.host", splunkHostAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkTypingQueueRatio) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkTypingQueueRatio) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkTypingQueueRatio(cfg MetricConfig) metricSplunkTypingQueueRatio { + m := metricSplunkTypingQueueRatio{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user config. type MetricsBuilder struct { @@ -583,6 +1502,8 @@ type MetricsBuilder struct { metricsCapacity int // maximum observed number of metrics per resource. metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. buildInfo component.BuildInfo // contains version information. + metricSplunkAggregationQueueRatio metricSplunkAggregationQueueRatio + metricSplunkBucketsSearchableStatus metricSplunkBucketsSearchableStatus metricSplunkDataIndexesExtendedBucketCount metricSplunkDataIndexesExtendedBucketCount metricSplunkDataIndexesExtendedBucketEventCount metricSplunkDataIndexesExtendedBucketEventCount metricSplunkDataIndexesExtendedBucketHotCount metricSplunkDataIndexesExtendedBucketHotCount @@ -590,10 +1511,26 @@ type MetricsBuilder struct { metricSplunkDataIndexesExtendedEventCount metricSplunkDataIndexesExtendedEventCount metricSplunkDataIndexesExtendedRawSize metricSplunkDataIndexesExtendedRawSize metricSplunkDataIndexesExtendedTotalSize metricSplunkDataIndexesExtendedTotalSize + metricSplunkIndexerAvgRate metricSplunkIndexerAvgRate + metricSplunkIndexerCPUTime metricSplunkIndexerCPUTime + metricSplunkIndexerQueueRatio metricSplunkIndexerQueueRatio + metricSplunkIndexerRawWriteTime metricSplunkIndexerRawWriteTime metricSplunkIndexerThroughput metricSplunkIndexerThroughput + metricSplunkIndexesAvgSize metricSplunkIndexesAvgSize + metricSplunkIndexesAvgUsage metricSplunkIndexesAvgUsage + metricSplunkIndexesBucketCount metricSplunkIndexesBucketCount + metricSplunkIndexesMedianDataAge metricSplunkIndexesMedianDataAge + metricSplunkIndexesSize metricSplunkIndexesSize + metricSplunkIoAvgIops metricSplunkIoAvgIops metricSplunkLicenseIndexUsage metricSplunkLicenseIndexUsage + metricSplunkParseQueueRatio metricSplunkParseQueueRatio + metricSplunkPipelineSetCount metricSplunkPipelineSetCount + metricSplunkSchedulerAvgExecutionLatency metricSplunkSchedulerAvgExecutionLatency + metricSplunkSchedulerAvgRunTime metricSplunkSchedulerAvgRunTime + metricSplunkSchedulerCompletionRatio metricSplunkSchedulerCompletionRatio metricSplunkServerIntrospectionQueuesCurrent metricSplunkServerIntrospectionQueuesCurrent metricSplunkServerIntrospectionQueuesCurrentBytes metricSplunkServerIntrospectionQueuesCurrentBytes + metricSplunkTypingQueueRatio metricSplunkTypingQueueRatio } // metricBuilderOption applies changes to default metrics builder. @@ -608,10 +1545,12 @@ func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - config: mbc, - startTime: pcommon.NewTimestampFromTime(time.Now()), - metricsBuffer: pmetric.NewMetrics(), - buildInfo: settings.BuildInfo, + config: mbc, + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), + buildInfo: settings.BuildInfo, + metricSplunkAggregationQueueRatio: newMetricSplunkAggregationQueueRatio(mbc.Metrics.SplunkAggregationQueueRatio), + metricSplunkBucketsSearchableStatus: newMetricSplunkBucketsSearchableStatus(mbc.Metrics.SplunkBucketsSearchableStatus), metricSplunkDataIndexesExtendedBucketCount: newMetricSplunkDataIndexesExtendedBucketCount(mbc.Metrics.SplunkDataIndexesExtendedBucketCount), metricSplunkDataIndexesExtendedBucketEventCount: newMetricSplunkDataIndexesExtendedBucketEventCount(mbc.Metrics.SplunkDataIndexesExtendedBucketEventCount), metricSplunkDataIndexesExtendedBucketHotCount: newMetricSplunkDataIndexesExtendedBucketHotCount(mbc.Metrics.SplunkDataIndexesExtendedBucketHotCount), @@ -619,10 +1558,26 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting metricSplunkDataIndexesExtendedEventCount: newMetricSplunkDataIndexesExtendedEventCount(mbc.Metrics.SplunkDataIndexesExtendedEventCount), metricSplunkDataIndexesExtendedRawSize: newMetricSplunkDataIndexesExtendedRawSize(mbc.Metrics.SplunkDataIndexesExtendedRawSize), metricSplunkDataIndexesExtendedTotalSize: newMetricSplunkDataIndexesExtendedTotalSize(mbc.Metrics.SplunkDataIndexesExtendedTotalSize), + metricSplunkIndexerAvgRate: newMetricSplunkIndexerAvgRate(mbc.Metrics.SplunkIndexerAvgRate), + metricSplunkIndexerCPUTime: newMetricSplunkIndexerCPUTime(mbc.Metrics.SplunkIndexerCPUTime), + metricSplunkIndexerQueueRatio: newMetricSplunkIndexerQueueRatio(mbc.Metrics.SplunkIndexerQueueRatio), + metricSplunkIndexerRawWriteTime: newMetricSplunkIndexerRawWriteTime(mbc.Metrics.SplunkIndexerRawWriteTime), metricSplunkIndexerThroughput: newMetricSplunkIndexerThroughput(mbc.Metrics.SplunkIndexerThroughput), + metricSplunkIndexesAvgSize: newMetricSplunkIndexesAvgSize(mbc.Metrics.SplunkIndexesAvgSize), + metricSplunkIndexesAvgUsage: newMetricSplunkIndexesAvgUsage(mbc.Metrics.SplunkIndexesAvgUsage), + metricSplunkIndexesBucketCount: newMetricSplunkIndexesBucketCount(mbc.Metrics.SplunkIndexesBucketCount), + metricSplunkIndexesMedianDataAge: newMetricSplunkIndexesMedianDataAge(mbc.Metrics.SplunkIndexesMedianDataAge), + metricSplunkIndexesSize: newMetricSplunkIndexesSize(mbc.Metrics.SplunkIndexesSize), + metricSplunkIoAvgIops: newMetricSplunkIoAvgIops(mbc.Metrics.SplunkIoAvgIops), metricSplunkLicenseIndexUsage: newMetricSplunkLicenseIndexUsage(mbc.Metrics.SplunkLicenseIndexUsage), + metricSplunkParseQueueRatio: newMetricSplunkParseQueueRatio(mbc.Metrics.SplunkParseQueueRatio), + metricSplunkPipelineSetCount: newMetricSplunkPipelineSetCount(mbc.Metrics.SplunkPipelineSetCount), + metricSplunkSchedulerAvgExecutionLatency: newMetricSplunkSchedulerAvgExecutionLatency(mbc.Metrics.SplunkSchedulerAvgExecutionLatency), + metricSplunkSchedulerAvgRunTime: newMetricSplunkSchedulerAvgRunTime(mbc.Metrics.SplunkSchedulerAvgRunTime), + metricSplunkSchedulerCompletionRatio: newMetricSplunkSchedulerCompletionRatio(mbc.Metrics.SplunkSchedulerCompletionRatio), metricSplunkServerIntrospectionQueuesCurrent: newMetricSplunkServerIntrospectionQueuesCurrent(mbc.Metrics.SplunkServerIntrospectionQueuesCurrent), metricSplunkServerIntrospectionQueuesCurrentBytes: newMetricSplunkServerIntrospectionQueuesCurrentBytes(mbc.Metrics.SplunkServerIntrospectionQueuesCurrentBytes), + metricSplunkTypingQueueRatio: newMetricSplunkTypingQueueRatio(mbc.Metrics.SplunkTypingQueueRatio), } for _, op := range options { op(mb) @@ -679,6 +1634,8 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { ils.Scope().SetName("otelcol/splunkenterprisereceiver") ils.Scope().SetVersion(mb.buildInfo.Version) ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricSplunkAggregationQueueRatio.emit(ils.Metrics()) + mb.metricSplunkBucketsSearchableStatus.emit(ils.Metrics()) mb.metricSplunkDataIndexesExtendedBucketCount.emit(ils.Metrics()) mb.metricSplunkDataIndexesExtendedBucketEventCount.emit(ils.Metrics()) mb.metricSplunkDataIndexesExtendedBucketHotCount.emit(ils.Metrics()) @@ -686,10 +1643,26 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricSplunkDataIndexesExtendedEventCount.emit(ils.Metrics()) mb.metricSplunkDataIndexesExtendedRawSize.emit(ils.Metrics()) mb.metricSplunkDataIndexesExtendedTotalSize.emit(ils.Metrics()) + mb.metricSplunkIndexerAvgRate.emit(ils.Metrics()) + mb.metricSplunkIndexerCPUTime.emit(ils.Metrics()) + mb.metricSplunkIndexerQueueRatio.emit(ils.Metrics()) + mb.metricSplunkIndexerRawWriteTime.emit(ils.Metrics()) mb.metricSplunkIndexerThroughput.emit(ils.Metrics()) + mb.metricSplunkIndexesAvgSize.emit(ils.Metrics()) + mb.metricSplunkIndexesAvgUsage.emit(ils.Metrics()) + mb.metricSplunkIndexesBucketCount.emit(ils.Metrics()) + mb.metricSplunkIndexesMedianDataAge.emit(ils.Metrics()) + mb.metricSplunkIndexesSize.emit(ils.Metrics()) + mb.metricSplunkIoAvgIops.emit(ils.Metrics()) mb.metricSplunkLicenseIndexUsage.emit(ils.Metrics()) + mb.metricSplunkParseQueueRatio.emit(ils.Metrics()) + mb.metricSplunkPipelineSetCount.emit(ils.Metrics()) + mb.metricSplunkSchedulerAvgExecutionLatency.emit(ils.Metrics()) + mb.metricSplunkSchedulerAvgRunTime.emit(ils.Metrics()) + mb.metricSplunkSchedulerCompletionRatio.emit(ils.Metrics()) mb.metricSplunkServerIntrospectionQueuesCurrent.emit(ils.Metrics()) mb.metricSplunkServerIntrospectionQueuesCurrentBytes.emit(ils.Metrics()) + mb.metricSplunkTypingQueueRatio.emit(ils.Metrics()) for _, op := range rmo { op(rm) @@ -710,6 +1683,16 @@ func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics { return metrics } +// RecordSplunkAggregationQueueRatioDataPoint adds a data point to splunk.aggregation.queue.ratio metric. +func (mb *MetricsBuilder) RecordSplunkAggregationQueueRatioDataPoint(ts pcommon.Timestamp, val float64, splunkHostAttributeValue string) { + mb.metricSplunkAggregationQueueRatio.recordDataPoint(mb.startTime, ts, val, splunkHostAttributeValue) +} + +// RecordSplunkBucketsSearchableStatusDataPoint adds a data point to splunk.buckets.searchable.status metric. +func (mb *MetricsBuilder) RecordSplunkBucketsSearchableStatusDataPoint(ts pcommon.Timestamp, val int64, splunkHostAttributeValue string, splunkIndexerSearchableAttributeValue string) { + mb.metricSplunkBucketsSearchableStatus.recordDataPoint(mb.startTime, ts, val, splunkHostAttributeValue, splunkIndexerSearchableAttributeValue) +} + // RecordSplunkDataIndexesExtendedBucketCountDataPoint adds a data point to splunk.data.indexes.extended.bucket.count metric. func (mb *MetricsBuilder) RecordSplunkDataIndexesExtendedBucketCountDataPoint(ts pcommon.Timestamp, val int64, splunkIndexNameAttributeValue string) { mb.metricSplunkDataIndexesExtendedBucketCount.recordDataPoint(mb.startTime, ts, val, splunkIndexNameAttributeValue) @@ -745,16 +1728,91 @@ func (mb *MetricsBuilder) RecordSplunkDataIndexesExtendedTotalSizeDataPoint(ts p mb.metricSplunkDataIndexesExtendedTotalSize.recordDataPoint(mb.startTime, ts, val, splunkIndexNameAttributeValue) } +// RecordSplunkIndexerAvgRateDataPoint adds a data point to splunk.indexer.avg.rate metric. +func (mb *MetricsBuilder) RecordSplunkIndexerAvgRateDataPoint(ts pcommon.Timestamp, val float64, splunkHostAttributeValue string) { + mb.metricSplunkIndexerAvgRate.recordDataPoint(mb.startTime, ts, val, splunkHostAttributeValue) +} + +// RecordSplunkIndexerCPUTimeDataPoint adds a data point to splunk.indexer.cpu.time metric. +func (mb *MetricsBuilder) RecordSplunkIndexerCPUTimeDataPoint(ts pcommon.Timestamp, val float64, splunkHostAttributeValue string) { + mb.metricSplunkIndexerCPUTime.recordDataPoint(mb.startTime, ts, val, splunkHostAttributeValue) +} + +// RecordSplunkIndexerQueueRatioDataPoint adds a data point to splunk.indexer.queue.ratio metric. +func (mb *MetricsBuilder) RecordSplunkIndexerQueueRatioDataPoint(ts pcommon.Timestamp, val float64, splunkHostAttributeValue string) { + mb.metricSplunkIndexerQueueRatio.recordDataPoint(mb.startTime, ts, val, splunkHostAttributeValue) +} + +// RecordSplunkIndexerRawWriteTimeDataPoint adds a data point to splunk.indexer.raw.write.time metric. +func (mb *MetricsBuilder) RecordSplunkIndexerRawWriteTimeDataPoint(ts pcommon.Timestamp, val float64, splunkHostAttributeValue string) { + mb.metricSplunkIndexerRawWriteTime.recordDataPoint(mb.startTime, ts, val, splunkHostAttributeValue) +} + // RecordSplunkIndexerThroughputDataPoint adds a data point to splunk.indexer.throughput metric. func (mb *MetricsBuilder) RecordSplunkIndexerThroughputDataPoint(ts pcommon.Timestamp, val float64, splunkIndexerStatusAttributeValue string) { mb.metricSplunkIndexerThroughput.recordDataPoint(mb.startTime, ts, val, splunkIndexerStatusAttributeValue) } +// RecordSplunkIndexesAvgSizeDataPoint adds a data point to splunk.indexes.avg.size metric. +func (mb *MetricsBuilder) RecordSplunkIndexesAvgSizeDataPoint(ts pcommon.Timestamp, val float64, splunkIndexNameAttributeValue string) { + mb.metricSplunkIndexesAvgSize.recordDataPoint(mb.startTime, ts, val, splunkIndexNameAttributeValue) +} + +// RecordSplunkIndexesAvgUsageDataPoint adds a data point to splunk.indexes.avg.usage metric. +func (mb *MetricsBuilder) RecordSplunkIndexesAvgUsageDataPoint(ts pcommon.Timestamp, val float64, splunkIndexNameAttributeValue string) { + mb.metricSplunkIndexesAvgUsage.recordDataPoint(mb.startTime, ts, val, splunkIndexNameAttributeValue) +} + +// RecordSplunkIndexesBucketCountDataPoint adds a data point to splunk.indexes.bucket.count metric. +func (mb *MetricsBuilder) RecordSplunkIndexesBucketCountDataPoint(ts pcommon.Timestamp, val int64, splunkIndexNameAttributeValue string) { + mb.metricSplunkIndexesBucketCount.recordDataPoint(mb.startTime, ts, val, splunkIndexNameAttributeValue) +} + +// RecordSplunkIndexesMedianDataAgeDataPoint adds a data point to splunk.indexes.median.data.age metric. +func (mb *MetricsBuilder) RecordSplunkIndexesMedianDataAgeDataPoint(ts pcommon.Timestamp, val int64, splunkIndexNameAttributeValue string) { + mb.metricSplunkIndexesMedianDataAge.recordDataPoint(mb.startTime, ts, val, splunkIndexNameAttributeValue) +} + +// RecordSplunkIndexesSizeDataPoint adds a data point to splunk.indexes.size metric. +func (mb *MetricsBuilder) RecordSplunkIndexesSizeDataPoint(ts pcommon.Timestamp, val float64, splunkIndexNameAttributeValue string) { + mb.metricSplunkIndexesSize.recordDataPoint(mb.startTime, ts, val, splunkIndexNameAttributeValue) +} + +// RecordSplunkIoAvgIopsDataPoint adds a data point to splunk.io.avg.iops metric. +func (mb *MetricsBuilder) RecordSplunkIoAvgIopsDataPoint(ts pcommon.Timestamp, val int64, splunkHostAttributeValue string) { + mb.metricSplunkIoAvgIops.recordDataPoint(mb.startTime, ts, val, splunkHostAttributeValue) +} + // RecordSplunkLicenseIndexUsageDataPoint adds a data point to splunk.license.index.usage metric. func (mb *MetricsBuilder) RecordSplunkLicenseIndexUsageDataPoint(ts pcommon.Timestamp, val int64, splunkIndexNameAttributeValue string) { mb.metricSplunkLicenseIndexUsage.recordDataPoint(mb.startTime, ts, val, splunkIndexNameAttributeValue) } +// RecordSplunkParseQueueRatioDataPoint adds a data point to splunk.parse.queue.ratio metric. +func (mb *MetricsBuilder) RecordSplunkParseQueueRatioDataPoint(ts pcommon.Timestamp, val float64, splunkHostAttributeValue string) { + mb.metricSplunkParseQueueRatio.recordDataPoint(mb.startTime, ts, val, splunkHostAttributeValue) +} + +// RecordSplunkPipelineSetCountDataPoint adds a data point to splunk.pipeline.set.count metric. +func (mb *MetricsBuilder) RecordSplunkPipelineSetCountDataPoint(ts pcommon.Timestamp, val int64, splunkHostAttributeValue string) { + mb.metricSplunkPipelineSetCount.recordDataPoint(mb.startTime, ts, val, splunkHostAttributeValue) +} + +// RecordSplunkSchedulerAvgExecutionLatencyDataPoint adds a data point to splunk.scheduler.avg.execution.latency metric. +func (mb *MetricsBuilder) RecordSplunkSchedulerAvgExecutionLatencyDataPoint(ts pcommon.Timestamp, val float64, splunkHostAttributeValue string) { + mb.metricSplunkSchedulerAvgExecutionLatency.recordDataPoint(mb.startTime, ts, val, splunkHostAttributeValue) +} + +// RecordSplunkSchedulerAvgRunTimeDataPoint adds a data point to splunk.scheduler.avg.run.time metric. +func (mb *MetricsBuilder) RecordSplunkSchedulerAvgRunTimeDataPoint(ts pcommon.Timestamp, val float64, splunkHostAttributeValue string) { + mb.metricSplunkSchedulerAvgRunTime.recordDataPoint(mb.startTime, ts, val, splunkHostAttributeValue) +} + +// RecordSplunkSchedulerCompletionRatioDataPoint adds a data point to splunk.scheduler.completion.ratio metric. +func (mb *MetricsBuilder) RecordSplunkSchedulerCompletionRatioDataPoint(ts pcommon.Timestamp, val float64, splunkHostAttributeValue string) { + mb.metricSplunkSchedulerCompletionRatio.recordDataPoint(mb.startTime, ts, val, splunkHostAttributeValue) +} + // RecordSplunkServerIntrospectionQueuesCurrentDataPoint adds a data point to splunk.server.introspection.queues.current metric. func (mb *MetricsBuilder) RecordSplunkServerIntrospectionQueuesCurrentDataPoint(ts pcommon.Timestamp, val int64, splunkQueueNameAttributeValue string) { mb.metricSplunkServerIntrospectionQueuesCurrent.recordDataPoint(mb.startTime, ts, val, splunkQueueNameAttributeValue) @@ -765,6 +1823,11 @@ func (mb *MetricsBuilder) RecordSplunkServerIntrospectionQueuesCurrentBytesDataP mb.metricSplunkServerIntrospectionQueuesCurrentBytes.recordDataPoint(mb.startTime, ts, val, splunkQueueNameAttributeValue) } +// RecordSplunkTypingQueueRatioDataPoint adds a data point to splunk.typing.queue.ratio metric. +func (mb *MetricsBuilder) RecordSplunkTypingQueueRatioDataPoint(ts pcommon.Timestamp, val float64, splunkHostAttributeValue string) { + mb.metricSplunkTypingQueueRatio.recordDataPoint(mb.startTime, ts, val, splunkHostAttributeValue) +} + // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { diff --git a/receiver/splunkenterprisereceiver/internal/metadata/generated_metrics_test.go b/receiver/splunkenterprisereceiver/internal/metadata/generated_metrics_test.go index b427937ceced..1ac5c9950090 100644 --- a/receiver/splunkenterprisereceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/splunkenterprisereceiver/internal/metadata/generated_metrics_test.go @@ -56,6 +56,13 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount := 0 defaultMetricsCount++ + allMetricsCount++ + mb.RecordSplunkAggregationQueueRatioDataPoint(ts, 1, "splunk.host-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordSplunkBucketsSearchableStatusDataPoint(ts, 1, "splunk.host-val", "splunk.indexer.searchable-val") + allMetricsCount++ mb.RecordSplunkDataIndexesExtendedBucketCountDataPoint(ts, 1, "splunk.index.name-val") @@ -68,34 +75,92 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordSplunkDataIndexesExtendedBucketWarmCountDataPoint(ts, 1, "splunk.index.name-val", "splunk.bucket.dir-val") - defaultMetricsCount++ allMetricsCount++ mb.RecordSplunkDataIndexesExtendedEventCountDataPoint(ts, 1, "splunk.index.name-val") - defaultMetricsCount++ allMetricsCount++ mb.RecordSplunkDataIndexesExtendedRawSizeDataPoint(ts, 1, "splunk.index.name-val") - defaultMetricsCount++ allMetricsCount++ mb.RecordSplunkDataIndexesExtendedTotalSizeDataPoint(ts, 1, "splunk.index.name-val") defaultMetricsCount++ + allMetricsCount++ + mb.RecordSplunkIndexerAvgRateDataPoint(ts, 1, "splunk.host-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordSplunkIndexerCPUTimeDataPoint(ts, 1, "splunk.host-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordSplunkIndexerQueueRatioDataPoint(ts, 1, "splunk.host-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordSplunkIndexerRawWriteTimeDataPoint(ts, 1, "splunk.host-val") + allMetricsCount++ mb.RecordSplunkIndexerThroughputDataPoint(ts, 1, "splunk.indexer.status-val") + defaultMetricsCount++ + allMetricsCount++ + mb.RecordSplunkIndexesAvgSizeDataPoint(ts, 1, "splunk.index.name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordSplunkIndexesAvgUsageDataPoint(ts, 1, "splunk.index.name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordSplunkIndexesBucketCountDataPoint(ts, 1, "splunk.index.name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordSplunkIndexesMedianDataAgeDataPoint(ts, 1, "splunk.index.name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordSplunkIndexesSizeDataPoint(ts, 1, "splunk.index.name-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordSplunkIoAvgIopsDataPoint(ts, 1, "splunk.host-val") + defaultMetricsCount++ allMetricsCount++ mb.RecordSplunkLicenseIndexUsageDataPoint(ts, 1, "splunk.index.name-val") defaultMetricsCount++ allMetricsCount++ - mb.RecordSplunkServerIntrospectionQueuesCurrentDataPoint(ts, 1, "splunk.queue.name-val") + mb.RecordSplunkParseQueueRatioDataPoint(ts, 1, "splunk.host-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordSplunkPipelineSetCountDataPoint(ts, 1, "splunk.host-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordSplunkSchedulerAvgExecutionLatencyDataPoint(ts, 1, "splunk.host-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordSplunkSchedulerAvgRunTimeDataPoint(ts, 1, "splunk.host-val") defaultMetricsCount++ + allMetricsCount++ + mb.RecordSplunkSchedulerCompletionRatioDataPoint(ts, 1, "splunk.host-val") + + allMetricsCount++ + mb.RecordSplunkServerIntrospectionQueuesCurrentDataPoint(ts, 1, "splunk.queue.name-val") + allMetricsCount++ mb.RecordSplunkServerIntrospectionQueuesCurrentBytesDataPoint(ts, 1, "splunk.queue.name-val") + defaultMetricsCount++ + allMetricsCount++ + mb.RecordSplunkTypingQueueRatioDataPoint(ts, 1, "splunk.host-val") + res := pcommon.NewResource() metrics := mb.Emit(WithResource(res)) @@ -118,6 +183,39 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics := make(map[string]bool) for i := 0; i < ms.Len(); i++ { switch ms.At(i).Name() { + case "splunk.aggregation.queue.ratio": + assert.False(t, validatedMetrics["splunk.aggregation.queue.ratio"], "Found a duplicate in the metrics slice: splunk.aggregation.queue.ratio") + validatedMetrics["splunk.aggregation.queue.ratio"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking the average indexer aggregation queue ration (%). *Note:** Search is best run against a Cluster Manager.", ms.At(i).Description()) + assert.Equal(t, "{%}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("splunk.host") + assert.True(t, ok) + assert.EqualValues(t, "splunk.host-val", attrVal.Str()) + case "splunk.buckets.searchable.status": + assert.False(t, validatedMetrics["splunk.buckets.searchable.status"], "Found a duplicate in the metrics slice: splunk.buckets.searchable.status") + validatedMetrics["splunk.buckets.searchable.status"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking the number of buckets and their searchable status. *Note:** Search is best run against a Cluster Manager.", ms.At(i).Description()) + assert.Equal(t, "{count}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("splunk.host") + assert.True(t, ok) + assert.EqualValues(t, "splunk.host-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("splunk.indexer.searchable") + assert.True(t, ok) + assert.EqualValues(t, "splunk.indexer.searchable-val", attrVal.Str()) case "splunk.data.indexes.extended.bucket.count": assert.False(t, validatedMetrics["splunk.data.indexes.extended.bucket.count"], "Found a duplicate in the metrics slice: splunk.data.indexes.extended.bucket.count") validatedMetrics["splunk.data.indexes.extended.bucket.count"] = true @@ -138,7 +236,7 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics["splunk.data.indexes.extended.bucket.event.count"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Count of events in this bucket super-directory", ms.At(i).Description()) + assert.Equal(t, "Count of events in this bucket super-directory. *Note:** Must be pointed at specific indexer `endpoint`.", ms.At(i).Description()) assert.Equal(t, "{events}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -156,7 +254,7 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics["splunk.data.indexes.extended.bucket.hot.count"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "(If size > 0) Number of hot buckets", ms.At(i).Description()) + assert.Equal(t, "(If size > 0) Number of hot buckets. *Note:** Must be pointed at specific indexer `endpoint`.", ms.At(i).Description()) assert.Equal(t, "{buckets}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -174,7 +272,7 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics["splunk.data.indexes.extended.bucket.warm.count"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "(If size > 0) Number of warm buckets", ms.At(i).Description()) + assert.Equal(t, "(If size > 0) Number of warm buckets. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer.", ms.At(i).Description()) assert.Equal(t, "{buckets}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -192,7 +290,7 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics["splunk.data.indexes.extended.event.count"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Count of events for index, excluding frozen events. Approximately equal to the event_count sum of all buckets.", ms.At(i).Description()) + assert.Equal(t, "Count of events for index, excluding frozen events. Approximately equal to the event_count sum of all buckets. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer.", ms.At(i).Description()) assert.Equal(t, "{events}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -207,7 +305,7 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics["splunk.data.indexes.extended.raw.size"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Size in bytes on disk of the /rawdata/ directories of all buckets in this index, excluding frozen", ms.At(i).Description()) + assert.Equal(t, "Size in bytes on disk of the /rawdata/ directories of all buckets in this index, excluding frozen *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer.", ms.At(i).Description()) assert.Equal(t, "By", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -222,7 +320,7 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics["splunk.data.indexes.extended.total.size"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Size in bytes on disk of this index", ms.At(i).Description()) + assert.Equal(t, "Size in bytes on disk of this index *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer.", ms.At(i).Description()) assert.Equal(t, "By", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -232,12 +330,72 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("splunk.index.name") assert.True(t, ok) assert.EqualValues(t, "splunk.index.name-val", attrVal.Str()) + case "splunk.indexer.avg.rate": + assert.False(t, validatedMetrics["splunk.indexer.avg.rate"], "Found a duplicate in the metrics slice: splunk.indexer.avg.rate") + validatedMetrics["splunk.indexer.avg.rate"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking the average rate of indexed data. **Note:** Search is best run against a Cluster Manager.", ms.At(i).Description()) + assert.Equal(t, "KBy", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("splunk.host") + assert.True(t, ok) + assert.EqualValues(t, "splunk.host-val", attrVal.Str()) + case "splunk.indexer.cpu.time": + assert.False(t, validatedMetrics["splunk.indexer.cpu.time"], "Found a duplicate in the metrics slice: splunk.indexer.cpu.time") + validatedMetrics["splunk.indexer.cpu.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking the number of indexing process cpu seconds per instance", ms.At(i).Description()) + assert.Equal(t, "{s}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("splunk.host") + assert.True(t, ok) + assert.EqualValues(t, "splunk.host-val", attrVal.Str()) + case "splunk.indexer.queue.ratio": + assert.False(t, validatedMetrics["splunk.indexer.queue.ratio"], "Found a duplicate in the metrics slice: splunk.indexer.queue.ratio") + validatedMetrics["splunk.indexer.queue.ratio"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking the average indexer index queue ration (%). *Note:** Search is best run against a Cluster Manager.", ms.At(i).Description()) + assert.Equal(t, "{%}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("splunk.host") + assert.True(t, ok) + assert.EqualValues(t, "splunk.host-val", attrVal.Str()) + case "splunk.indexer.raw.write.time": + assert.False(t, validatedMetrics["splunk.indexer.raw.write.time"], "Found a duplicate in the metrics slice: splunk.indexer.raw.write.time") + validatedMetrics["splunk.indexer.raw.write.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking the number of raw write seconds per instance", ms.At(i).Description()) + assert.Equal(t, "{s}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("splunk.host") + assert.True(t, ok) + assert.EqualValues(t, "splunk.host-val", attrVal.Str()) case "splunk.indexer.throughput": assert.False(t, validatedMetrics["splunk.indexer.throughput"], "Found a duplicate in the metrics slice: splunk.indexer.throughput") validatedMetrics["splunk.indexer.throughput"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Gauge tracking average bytes per second throughput of indexer", ms.At(i).Description()) + assert.Equal(t, "Gauge tracking average bytes per second throughput of indexer. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer.", ms.At(i).Description()) assert.Equal(t, "By/s", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -247,6 +405,96 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("splunk.indexer.status") assert.True(t, ok) assert.EqualValues(t, "splunk.indexer.status-val", attrVal.Str()) + case "splunk.indexes.avg.size": + assert.False(t, validatedMetrics["splunk.indexes.avg.size"], "Found a duplicate in the metrics slice: splunk.indexes.avg.size") + validatedMetrics["splunk.indexes.avg.size"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking the indexes and their average size (gb). *Note:** Search is best run against a Cluster Manager.", ms.At(i).Description()) + assert.Equal(t, "Gb", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("splunk.index.name") + assert.True(t, ok) + assert.EqualValues(t, "splunk.index.name-val", attrVal.Str()) + case "splunk.indexes.avg.usage": + assert.False(t, validatedMetrics["splunk.indexes.avg.usage"], "Found a duplicate in the metrics slice: splunk.indexes.avg.usage") + validatedMetrics["splunk.indexes.avg.usage"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking the indexes and their average usage (%). *Note:** Search is best run against a Cluster Manager.", ms.At(i).Description()) + assert.Equal(t, "{%}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("splunk.index.name") + assert.True(t, ok) + assert.EqualValues(t, "splunk.index.name-val", attrVal.Str()) + case "splunk.indexes.bucket.count": + assert.False(t, validatedMetrics["splunk.indexes.bucket.count"], "Found a duplicate in the metrics slice: splunk.indexes.bucket.count") + validatedMetrics["splunk.indexes.bucket.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking the indexes and their bucket counts. *Note:** Search is best run against a Cluster Manager.", ms.At(i).Description()) + assert.Equal(t, "{count}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("splunk.index.name") + assert.True(t, ok) + assert.EqualValues(t, "splunk.index.name-val", attrVal.Str()) + case "splunk.indexes.median.data.age": + assert.False(t, validatedMetrics["splunk.indexes.median.data.age"], "Found a duplicate in the metrics slice: splunk.indexes.median.data.age") + validatedMetrics["splunk.indexes.median.data.age"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking the indexes and their median data age (days). *Note:** Search is best run against a Cluster Manager.", ms.At(i).Description()) + assert.Equal(t, "{days}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("splunk.index.name") + assert.True(t, ok) + assert.EqualValues(t, "splunk.index.name-val", attrVal.Str()) + case "splunk.indexes.size": + assert.False(t, validatedMetrics["splunk.indexes.size"], "Found a duplicate in the metrics slice: splunk.indexes.size") + validatedMetrics["splunk.indexes.size"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking the indexes and their total size (gb). *Note:** Search is best run against a Cluster Manager.", ms.At(i).Description()) + assert.Equal(t, "Gb", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("splunk.index.name") + assert.True(t, ok) + assert.EqualValues(t, "splunk.index.name-val", attrVal.Str()) + case "splunk.io.avg.iops": + assert.False(t, validatedMetrics["splunk.io.avg.iops"], "Found a duplicate in the metrics slice: splunk.io.avg.iops") + validatedMetrics["splunk.io.avg.iops"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking the average IOPs used per instance", ms.At(i).Description()) + assert.Equal(t, "{iops}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("splunk.host") + assert.True(t, ok) + assert.EqualValues(t, "splunk.host-val", attrVal.Str()) case "splunk.license.index.usage": assert.False(t, validatedMetrics["splunk.license.index.usage"], "Found a duplicate in the metrics slice: splunk.license.index.usage") validatedMetrics["splunk.license.index.usage"] = true @@ -262,12 +510,87 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("splunk.index.name") assert.True(t, ok) assert.EqualValues(t, "splunk.index.name-val", attrVal.Str()) + case "splunk.parse.queue.ratio": + assert.False(t, validatedMetrics["splunk.parse.queue.ratio"], "Found a duplicate in the metrics slice: splunk.parse.queue.ratio") + validatedMetrics["splunk.parse.queue.ratio"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking the average indexer parser queue ration (%). *Note:** Search is best run against a Cluster Manager.", ms.At(i).Description()) + assert.Equal(t, "{%}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("splunk.host") + assert.True(t, ok) + assert.EqualValues(t, "splunk.host-val", attrVal.Str()) + case "splunk.pipeline.set.count": + assert.False(t, validatedMetrics["splunk.pipeline.set.count"], "Found a duplicate in the metrics slice: splunk.pipeline.set.count") + validatedMetrics["splunk.pipeline.set.count"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking the number of pipeline sets per indexer. **Note:** Search is best run against a Cluster Manager.", ms.At(i).Description()) + assert.Equal(t, "KBy", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("splunk.host") + assert.True(t, ok) + assert.EqualValues(t, "splunk.host-val", attrVal.Str()) + case "splunk.scheduler.avg.execution.latency": + assert.False(t, validatedMetrics["splunk.scheduler.avg.execution.latency"], "Found a duplicate in the metrics slice: splunk.scheduler.avg.execution.latency") + validatedMetrics["splunk.scheduler.avg.execution.latency"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking the average execution latency of scheduled searches", ms.At(i).Description()) + assert.Equal(t, "{ms}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("splunk.host") + assert.True(t, ok) + assert.EqualValues(t, "splunk.host-val", attrVal.Str()) + case "splunk.scheduler.avg.run.time": + assert.False(t, validatedMetrics["splunk.scheduler.avg.run.time"], "Found a duplicate in the metrics slice: splunk.scheduler.avg.run.time") + validatedMetrics["splunk.scheduler.avg.run.time"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking the average runtime of scheduled searches", ms.At(i).Description()) + assert.Equal(t, "{ms}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("splunk.host") + assert.True(t, ok) + assert.EqualValues(t, "splunk.host-val", attrVal.Str()) + case "splunk.scheduler.completion.ratio": + assert.False(t, validatedMetrics["splunk.scheduler.completion.ratio"], "Found a duplicate in the metrics slice: splunk.scheduler.completion.ratio") + validatedMetrics["splunk.scheduler.completion.ratio"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking the ratio of completed to skipped scheduled searches", ms.At(i).Description()) + assert.Equal(t, "{%}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("splunk.host") + assert.True(t, ok) + assert.EqualValues(t, "splunk.host-val", attrVal.Str()) case "splunk.server.introspection.queues.current": assert.False(t, validatedMetrics["splunk.server.introspection.queues.current"], "Found a duplicate in the metrics slice: splunk.server.introspection.queues.current") validatedMetrics["splunk.server.introspection.queues.current"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Gauge tracking current length of queue", ms.At(i).Description()) + assert.Equal(t, "Gauge tracking current length of queue. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer.", ms.At(i).Description()) assert.Equal(t, "{queues}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -282,7 +605,7 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics["splunk.server.introspection.queues.current.bytes"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Gauge tracking current bytes waiting in queue", ms.At(i).Description()) + assert.Equal(t, "Gauge tracking current bytes waiting in queue. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer.", ms.At(i).Description()) assert.Equal(t, "By", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -292,6 +615,21 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("splunk.queue.name") assert.True(t, ok) assert.EqualValues(t, "splunk.queue.name-val", attrVal.Str()) + case "splunk.typing.queue.ratio": + assert.False(t, validatedMetrics["splunk.typing.queue.ratio"], "Found a duplicate in the metrics slice: splunk.typing.queue.ratio") + validatedMetrics["splunk.typing.queue.ratio"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking the average indexer typing queue ration (%). *Note:** Search is best run against a Cluster Manager.", ms.At(i).Description()) + assert.Equal(t, "{%}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("splunk.host") + assert.True(t, ok) + assert.EqualValues(t, "splunk.host-val", attrVal.Str()) } } }) diff --git a/receiver/splunkenterprisereceiver/internal/metadata/testdata/config.yaml b/receiver/splunkenterprisereceiver/internal/metadata/testdata/config.yaml index dd3a85680e84..44ad32cb2a7e 100644 --- a/receiver/splunkenterprisereceiver/internal/metadata/testdata/config.yaml +++ b/receiver/splunkenterprisereceiver/internal/metadata/testdata/config.yaml @@ -1,6 +1,10 @@ default: all_set: metrics: + splunk.aggregation.queue.ratio: + enabled: true + splunk.buckets.searchable.status: + enabled: true splunk.data.indexes.extended.bucket.count: enabled: true splunk.data.indexes.extended.bucket.event.count: @@ -15,16 +19,52 @@ all_set: enabled: true splunk.data.indexes.extended.total.size: enabled: true + splunk.indexer.avg.rate: + enabled: true + splunk.indexer.cpu.time: + enabled: true + splunk.indexer.queue.ratio: + enabled: true + splunk.indexer.raw.write.time: + enabled: true splunk.indexer.throughput: enabled: true + splunk.indexes.avg.size: + enabled: true + splunk.indexes.avg.usage: + enabled: true + splunk.indexes.bucket.count: + enabled: true + splunk.indexes.median.data.age: + enabled: true + splunk.indexes.size: + enabled: true + splunk.io.avg.iops: + enabled: true splunk.license.index.usage: enabled: true + splunk.parse.queue.ratio: + enabled: true + splunk.pipeline.set.count: + enabled: true + splunk.scheduler.avg.execution.latency: + enabled: true + splunk.scheduler.avg.run.time: + enabled: true + splunk.scheduler.completion.ratio: + enabled: true splunk.server.introspection.queues.current: enabled: true splunk.server.introspection.queues.current.bytes: enabled: true + splunk.typing.queue.ratio: + enabled: true none_set: metrics: + splunk.aggregation.queue.ratio: + enabled: false + splunk.buckets.searchable.status: + enabled: false splunk.data.indexes.extended.bucket.count: enabled: false splunk.data.indexes.extended.bucket.event.count: @@ -39,11 +79,43 @@ none_set: enabled: false splunk.data.indexes.extended.total.size: enabled: false + splunk.indexer.avg.rate: + enabled: false + splunk.indexer.cpu.time: + enabled: false + splunk.indexer.queue.ratio: + enabled: false + splunk.indexer.raw.write.time: + enabled: false splunk.indexer.throughput: enabled: false + splunk.indexes.avg.size: + enabled: false + splunk.indexes.avg.usage: + enabled: false + splunk.indexes.bucket.count: + enabled: false + splunk.indexes.median.data.age: + enabled: false + splunk.indexes.size: + enabled: false + splunk.io.avg.iops: + enabled: false splunk.license.index.usage: enabled: false + splunk.parse.queue.ratio: + enabled: false + splunk.pipeline.set.count: + enabled: false + splunk.scheduler.avg.execution.latency: + enabled: false + splunk.scheduler.avg.run.time: + enabled: false + splunk.scheduler.completion.ratio: + enabled: false splunk.server.introspection.queues.current: enabled: false splunk.server.introspection.queues.current.bytes: enabled: false + splunk.typing.queue.ratio: + enabled: false diff --git a/receiver/splunkenterprisereceiver/metadata.yaml b/receiver/splunkenterprisereceiver/metadata.yaml index 8666de0743fb..cfaa9efef72f 100644 --- a/receiver/splunkenterprisereceiver/metadata.yaml +++ b/receiver/splunkenterprisereceiver/metadata.yaml @@ -9,12 +9,18 @@ status: active: [shalper2, MovieStoreGuy, greatestusername] attributes: + splunk.host: + description: The name of the splunk host + type: string splunk.index.name: description: The name of the index reporting a specific KPI type: string splunk.indexer.status: description: The status message reported for a specific object type: string + splunk.indexer.searchable: + description: The searchability status reported for a specific object + type: string splunk.bucket.dir: description: The bucket super-directory (home, cold, thawed) for each index type: string @@ -30,10 +36,137 @@ metrics: gauge: value_type: int attributes: [splunk.index.name] + splunk.scheduler.avg.execution.latency: + enabled: true + description: Gauge tracking the average execution latency of scheduled searches + unit: '{ms}' + gauge: + value_type: double + attributes: [splunk.host] + splunk.scheduler.completion.ratio: + enabled: true + description: Gauge tracking the ratio of completed to skipped scheduled searches + unit: '{%}' + gauge: + value_type: double + attributes: [splunk.host] + splunk.indexer.avg.rate: + enabled: true + description: Gauge tracking the average rate of indexed data. **Note:** Search is best run against a Cluster Manager. + unit: KBy + gauge: + value_type: double + attributes: [splunk.host] + splunk.pipeline.set.count: + enabled: true + description: Gauge tracking the number of pipeline sets per indexer. **Note:** Search is best run against a Cluster Manager. + unit: KBy + gauge: + value_type: int + attributes: [splunk.host] + splunk.parse.queue.ratio: + enabled: true + description: Gauge tracking the average indexer parser queue ration (%). *Note:** Search is best run against a Cluster Manager. + unit: '{%}' + gauge: + value_type: double + attributes: [splunk.host] + splunk.aggregation.queue.ratio: + enabled: true + description: Gauge tracking the average indexer aggregation queue ration (%). *Note:** Search is best run against a Cluster Manager. + unit: '{%}' + gauge: + value_type: double + attributes: [splunk.host] + splunk.typing.queue.ratio: + enabled: true + description: Gauge tracking the average indexer typing queue ration (%). *Note:** Search is best run against a Cluster Manager. + unit: '{%}' + gauge: + value_type: double + attributes: [splunk.host] + splunk.indexer.queue.ratio: + enabled: true + description: Gauge tracking the average indexer index queue ration (%). *Note:** Search is best run against a Cluster Manager. + unit: '{%}' + gauge: + value_type: double + attributes: [splunk.host] + splunk.scheduler.avg.run.time: + enabled: true + description: Gauge tracking the average runtime of scheduled searches + unit: '{ms}' + gauge: + value_type: double + attributes: [splunk.host] + splunk.indexer.raw.write.time: + enabled: true + description: Gauge tracking the number of raw write seconds per instance + unit: '{s}' + gauge: + value_type: double + attributes: [splunk.host] + splunk.indexer.cpu.time: + enabled: true + description: Gauge tracking the number of indexing process cpu seconds per instance + unit: '{s}' + gauge: + value_type: double + attributes: [splunk.host] + splunk.io.avg.iops: + enabled: true + description: Gauge tracking the average IOPs used per instance + unit: '{iops}' + gauge: + value_type: int + attributes: [splunk.host] + splunk.buckets.searchable.status: + enabled: true + description: Gauge tracking the number of buckets and their searchable status. *Note:** Search is best run against a Cluster Manager. + unit: '{count}' + gauge: + value_type: int + attributes: [splunk.host, splunk.indexer.searchable] + splunk.indexes.bucket.count: + enabled: true + description: Gauge tracking the indexes and their bucket counts. *Note:** Search is best run against a Cluster Manager. + unit: '{count}' + gauge: + value_type: int + attributes: [splunk.index.name] + splunk.indexes.size: + enabled: true + description: Gauge tracking the indexes and their total size (gb). *Note:** Search is best run against a Cluster Manager. + unit: Gb + gauge: + value_type: double + attributes: [splunk.index.name] + splunk.indexes.avg.size: + enabled: true + description: Gauge tracking the indexes and their average size (gb). *Note:** Search is best run against a Cluster Manager. + unit: Gb + gauge: + value_type: double + attributes: [splunk.index.name] + splunk.indexes.avg.usage: + enabled: true + description: Gauge tracking the indexes and their average usage (%). *Note:** Search is best run against a Cluster Manager. + unit: '{%}' + gauge: + value_type: double + attributes: [splunk.index.name] + splunk.indexes.median.data.age: + enabled: true + description: Gauge tracking the indexes and their median data age (days). *Note:** Search is best run against a Cluster Manager. + unit: '{days}' + gauge: + value_type: int + attributes: [splunk.index.name] + # 'services/server/introspection/indexer' splunk.indexer.throughput: - enabled: true - description: Gauge tracking average bytes per second throughput of indexer + enabled: false + description: Gauge tracking average bytes per second throughput of indexer. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer. unit: By/s gauge: value_type: double @@ -41,29 +174,29 @@ metrics: attributes: [splunk.indexer.status] # 'services/data/indexes-extended' splunk.data.indexes.extended.total.size: - enabled: true - description: Size in bytes on disk of this index + enabled: false + description: Size in bytes on disk of this index *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer. unit: By gauge: value_type: int attributes: [splunk.index.name] splunk.data.indexes.extended.event.count: - enabled: true - description: Count of events for index, excluding frozen events. Approximately equal to the event_count sum of all buckets. + enabled: false + description: Count of events for index, excluding frozen events. Approximately equal to the event_count sum of all buckets. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer. unit: '{events}' gauge: value_type: int attributes: [splunk.index.name] splunk.data.indexes.extended.bucket.count: - enabled: true + enabled: false description: Count of buckets per index unit: '{buckets}' gauge: value_type: int attributes: [splunk.index.name] splunk.data.indexes.extended.raw.size: - enabled: true - description: Size in bytes on disk of the /rawdata/ directories of all buckets in this index, excluding frozen + enabled: false + description: Size in bytes on disk of the /rawdata/ directories of all buckets in this index, excluding frozen *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer. unit: By gauge: value_type: int @@ -71,40 +204,38 @@ metrics: ## Broken down `bucket_dirs` splunk.data.indexes.extended.bucket.event.count: enabled: false - description: Count of events in this bucket super-directory + description: Count of events in this bucket super-directory. *Note:** Must be pointed at specific indexer `endpoint`. unit: '{events}' gauge: value_type: int attributes: [splunk.index.name, splunk.bucket.dir] splunk.data.indexes.extended.bucket.hot.count: enabled: false - description: (If size > 0) Number of hot buckets + description: (If size > 0) Number of hot buckets. *Note:** Must be pointed at specific indexer `endpoint`. unit: '{buckets}' gauge: value_type: int attributes: [splunk.index.name, splunk.bucket.dir] splunk.data.indexes.extended.bucket.warm.count: enabled: false - description: (If size > 0) Number of warm buckets + description: (If size > 0) Number of warm buckets. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer. unit: '{buckets}' gauge: value_type: int attributes: [splunk.index.name, splunk.bucket.dir] #'services/server/introspection/queues' splunk.server.introspection.queues.current: - enabled: true - description: Gauge tracking current length of queue + enabled: false + description: Gauge tracking current length of queue. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer. unit: '{queues}' gauge: value_type: int attributes: [splunk.queue.name] splunk.server.introspection.queues.current.bytes: - enabled: true - description: Gauge tracking current bytes waiting in queue + enabled: false + description: Gauge tracking current bytes waiting in queue. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer. unit: By gauge: value_type: int attributes: [splunk.queue.name] -tests: - config: diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index 8cbe50cbb7ec..59a2b5bdb6fe 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -58,6 +58,13 @@ func (s *splunkScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { now := pcommon.NewTimestampFromTime(time.Now()) s.scrapeLicenseUsageByIndex(ctx, now, errs) + s.scrapeAvgExecLatencyByHost(ctx, now, errs) + s.scrapeSchedulerCompletionRatioByHost(ctx, now, errs) + s.scrapeIndexerAvgRate(ctx, now, errs) + s.scrapeSchedulerRunTimeByHost(ctx, now, errs) + s.scrapeIndexerRawWriteSecondsByHost(ctx, now, errs) + s.scrapeIndexerCPUSecondsByHost(ctx, now, errs) + s.scrapeAvgIopsByHost(ctx, now, errs) s.scrapeIndexThroughput(ctx, now, errs) s.scrapeIndexesTotalSize(ctx, now, errs) s.scrapeIndexesEventCount(ctx, now, errs) @@ -67,6 +74,9 @@ func (s *splunkScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { s.scrapeIndexesBucketHotWarmCount(ctx, now, errs) s.scrapeIntrospectionQueues(ctx, now, errs) s.scrapeIntrospectionQueuesBytes(ctx, now, errs) + s.scrapeIndexerPipelineQueues(ctx, now, errs) + s.scrapeBucketsSearchableStatus(ctx, now, errs) + s.scrapeIndexesBucketCountAdHoc(ctx, now, errs) return s.mb.Emit(), errs.Combine() } @@ -145,6 +155,869 @@ func (s *splunkScraper) scrapeLicenseUsageByIndex(ctx context.Context, now pcomm } } +func (s *splunkScraper) scrapeAvgExecLatencyByHost(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + var sr searchResponse + // Because we have to utilize network resources for each KPI we should check that each metrics + // is enabled before proceeding + if !s.conf.MetricsBuilderConfig.Metrics.SplunkSchedulerAvgExecutionLatency.Enabled { + return + } + + sr = searchResponse{ + search: searchDict[`SplunkSchedulerAvgExecLatencySearch`], + } + + var ( + req *http.Request + res *http.Response + err error + ) + + start := time.Now() + + for { + req, err = s.splunkClient.createRequest(ctx, &sr) + if err != nil { + errs.Add(err) + return + } + + res, err = s.splunkClient.makeRequest(req) + if err != nil { + errs.Add(err) + return + } + + // if its a 204 the body will be empty because we are still waiting on search results + err = unmarshallSearchReq(res, &sr) + if err != nil { + errs.Add(err) + } + res.Body.Close() + + // if no errors and 200 returned scrape was successful, return. Note we must make sure that + // the 200 is coming after the first request which provides a jobId to retrieve results + if sr.Return == 200 && sr.Jobid != nil { + break + } + + if sr.Return == 204 { + time.Sleep(2 * time.Second) + } + + if sr.Return == 400 { + break + } + + if time.Since(start) > s.conf.ScraperControllerSettings.Timeout { + errs.Add(errMaxSearchWaitTimeExceeded) + return + } + } + + // Record the results + var host string + for _, f := range sr.Fields { + switch fieldName := f.FieldName; fieldName { + case "host": + host = f.Value + continue + case "latency_avg_exec": + v, err := strconv.ParseFloat(f.Value, 64) + if err != nil { + errs.Add(err) + continue + } + s.mb.RecordSplunkSchedulerAvgExecutionLatencyDataPoint(now, v, host) + } + } +} + +func (s *splunkScraper) scrapeIndexerAvgRate(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + var sr searchResponse + // Because we have to utilize network resources for each KPI we should check that each metrics + // is enabled before proceeding + if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexerAvgRate.Enabled { + return + } + + sr = searchResponse{ + search: searchDict[`SplunkIndexerAvgRate`], + } + + var ( + req *http.Request + res *http.Response + err error + ) + + start := time.Now() + + for { + req, err = s.splunkClient.createRequest(ctx, &sr) + if err != nil { + errs.Add(err) + return + } + + res, err = s.splunkClient.makeRequest(req) + if err != nil { + errs.Add(err) + return + } + + // if its a 204 the body will be empty because we are still waiting on search results + err = unmarshallSearchReq(res, &sr) + if err != nil { + errs.Add(err) + } + res.Body.Close() + + // if no errors and 200 returned scrape was successful, return. Note we must make sure that + // the 200 is coming after the first request which provides a jobId to retrieve results + if sr.Return == 200 && sr.Jobid != nil { + break + } + + if sr.Return == 200 { + break + } + + if sr.Return == 204 { + time.Sleep(2 * time.Second) + } + + if sr.Return == 400 { + break + } + + if time.Since(start) > s.conf.ScraperControllerSettings.Timeout { + errs.Add(errMaxSearchWaitTimeExceeded) + return + } + } + // Record the results + var host string + for _, f := range sr.Fields { + switch fieldName := f.FieldName; fieldName { + case "host": + host = f.Value + continue + case "indexer_avg_kbps": + v, err := strconv.ParseFloat(f.Value, 64) + if err != nil { + errs.Add(err) + continue + } + s.mb.RecordSplunkIndexerAvgRateDataPoint(now, v, host) + } + } +} + +func (s *splunkScraper) scrapeIndexerPipelineQueues(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + var sr searchResponse + // Because we have to utilize network resources for each KPI we should check that each metrics + // is enabled before proceeding + if !s.conf.MetricsBuilderConfig.Metrics.SplunkAggregationQueueRatio.Enabled { + return + } + + sr = searchResponse{ + search: searchDict[`SplunkPipelineQueues`], + } + + var ( + req *http.Request + res *http.Response + err error + ) + + start := time.Now() + + for { + req, err = s.splunkClient.createRequest(ctx, &sr) + if err != nil { + errs.Add(err) + return + } + + res, err = s.splunkClient.makeRequest(req) + if err != nil { + errs.Add(err) + return + } + + // if its a 204 the body will be empty because we are still waiting on search results + err = unmarshallSearchReq(res, &sr) + if err != nil { + errs.Add(err) + } + + res.Body.Close() + + // if no errors and 200 returned scrape was successful, return. Note we must make sure that + // the 200 is coming after the first request which provides a jobId to retrieve results + if sr.Return == 200 && sr.Jobid != nil { + break + } + + if sr.Return == 200 { + break + } + + if sr.Return == 204 { + time.Sleep(2 * time.Second) + } + + if sr.Return == 400 { + break + } + + if time.Since(start) > s.conf.ScraperControllerSettings.Timeout { + errs.Add(errMaxSearchWaitTimeExceeded) + return + } + + } + // Record the results + var host string + var ps int64 + for _, f := range sr.Fields { + switch fieldName := f.FieldName; fieldName { + case "host": + host = f.Value + continue + case "agg_queue_ratio": + v, err := strconv.ParseFloat(f.Value, 64) + if err != nil { + errs.Add(err) + continue + } + s.mb.RecordSplunkAggregationQueueRatioDataPoint(now, v, host) + case "index_queue_ratio": + v, err := strconv.ParseFloat(f.Value, 64) + if err != nil { + errs.Add(err) + continue + } + s.mb.RecordSplunkIndexerQueueRatioDataPoint(now, v, host) + case "parse_queue_ratio": + v, err := strconv.ParseFloat(f.Value, 64) + if err != nil { + errs.Add(err) + continue + } + s.mb.RecordSplunkParseQueueRatioDataPoint(now, v, host) + case "pipeline_sets": + v, err := strconv.ParseInt(f.Value, 10, 64) + ps = v + if err != nil { + errs.Add(err) + continue + } + s.mb.RecordSplunkPipelineSetCountDataPoint(now, ps, host) + case "typing_queue_ratio": + v, err := strconv.ParseFloat(f.Value, 64) + if err != nil { + errs.Add(err) + continue + } + s.mb.RecordSplunkTypingQueueRatioDataPoint(now, v, host) + } + } +} + +func (s *splunkScraper) scrapeBucketsSearchableStatus(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + var sr searchResponse + // Because we have to utilize network resources for each KPI we should check that each metrics + // is enabled before proceeding + if !s.conf.MetricsBuilderConfig.Metrics.SplunkBucketsSearchableStatus.Enabled { + return + } + + sr = searchResponse{ + search: searchDict[`SplunkBucketsSearchableStatus`], + } + + var ( + req *http.Request + res *http.Response + err error + ) + + start := time.Now() + + for { + req, err = s.splunkClient.createRequest(ctx, &sr) + if err != nil { + errs.Add(err) + return + } + + res, err = s.splunkClient.makeRequest(req) + if err != nil { + errs.Add(err) + return + } + + // if its a 204 the body will be empty because we are still waiting on search results + err = unmarshallSearchReq(res, &sr) + if err != nil { + errs.Add(err) + } + + res.Body.Close() + + // if no errors and 200 returned scrape was successful, return. Note we must make sure that + // the 200 is coming after the first request which provides a jobId to retrieve results + if sr.Return == 200 && sr.Jobid != nil { + break + } + + if sr.Return == 200 { + break + } + + if sr.Return == 204 { + time.Sleep(2 * time.Second) + } + + if sr.Return == 400 { + break + } + + if time.Since(start) > s.conf.ScraperControllerSettings.Timeout { + errs.Add(errMaxSearchWaitTimeExceeded) + return + } + } + // Record the results + var host string + var searchable string + var bc int64 + for _, f := range sr.Fields { + switch fieldName := f.FieldName; fieldName { + case "host": + host = f.Value + continue + case "is_searchable": + searchable = f.Value + continue + case "bucket_count": + v, err := strconv.ParseInt(f.Value, 10, 64) + bc = v + if err != nil { + errs.Add(err) + continue + } + s.mb.RecordSplunkBucketsSearchableStatusDataPoint(now, bc, host, searchable) + } + } +} + +func (s *splunkScraper) scrapeIndexesBucketCountAdHoc(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + var sr searchResponse + // Because we have to utilize network resources for each KPI we should check that each metrics + // is enabled before proceeding + if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexesSize.Enabled { + return + } + + sr = searchResponse{ + search: searchDict[`SplunkIndexesData`], + } + + var ( + req *http.Request + res *http.Response + err error + ) + + start := time.Now() + + for { + req, err = s.splunkClient.createRequest(ctx, &sr) + if err != nil { + errs.Add(err) + return + } + + res, err = s.splunkClient.makeRequest(req) + if err != nil { + errs.Add(err) + return + } + + // if its a 204 the body will be empty because we are still waiting on search results + err = unmarshallSearchReq(res, &sr) + if err != nil { + errs.Add(err) + } + res.Body.Close() + + // if no errors and 200 returned scrape was successful, return. Note we must make sure that + // the 200 is coming after the first request which provides a jobId to retrieve results + + if sr.Return == 200 && sr.Jobid != nil { + break + } + + if sr.Return == 200 { + break + } + + if sr.Return == 204 { + time.Sleep(2 * time.Second) + } + + if sr.Return == 400 { + break + } + + if time.Since(start) > s.conf.ScraperControllerSettings.Timeout { + errs.Add(errMaxSearchWaitTimeExceeded) + return + } + } + // Record the results + var indexer string + var bc int64 + for _, f := range sr.Fields { + switch fieldName := f.FieldName; fieldName { + case "title": + indexer = f.Value + continue + case "total_size_gb": + v, err := strconv.ParseFloat(f.Value, 64) + if err != nil { + errs.Add(err) + continue + } + s.mb.RecordSplunkIndexesSizeDataPoint(now, v, indexer) + case "average_size_gb": + v, err := strconv.ParseFloat(f.Value, 64) + if err != nil { + errs.Add(err) + continue + } + s.mb.RecordSplunkIndexesAvgSizeDataPoint(now, v, indexer) + case "average_usage_perc": + v, err := strconv.ParseFloat(f.Value, 64) + if err != nil { + errs.Add(err) + continue + } + s.mb.RecordSplunkIndexesAvgUsageDataPoint(now, v, indexer) + case "median_data_age": + v, err := strconv.ParseInt(f.Value, 10, 64) + bc = v + if err != nil { + errs.Add(err) + continue + } + s.mb.RecordSplunkIndexesMedianDataAgeDataPoint(now, bc, indexer) + case "bucket_count": + v, err := strconv.ParseInt(f.Value, 10, 64) + bc = v + if err != nil { + errs.Add(err) + continue + } + s.mb.RecordSplunkIndexesBucketCountDataPoint(now, bc, indexer) + } + } +} + +func (s *splunkScraper) scrapeSchedulerCompletionRatioByHost(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + var sr searchResponse + // Because we have to utilize network resources for each KPI we should check that each metrics + // is enabled before proceeding + if !s.conf.MetricsBuilderConfig.Metrics.SplunkSchedulerCompletionRatio.Enabled { + return + } + + sr = searchResponse{ + search: searchDict[`SplunkSchedulerCompletionRatio`], + } + + var ( + req *http.Request + res *http.Response + err error + ) + + start := time.Now() + + for { + req, err = s.splunkClient.createRequest(ctx, &sr) + if err != nil { + errs.Add(err) + return + } + + res, err = s.splunkClient.makeRequest(req) + if err != nil { + errs.Add(err) + return + } + + // if its a 204 the body will be empty because we are still waiting on search results + err = unmarshallSearchReq(res, &sr) + if err != nil { + errs.Add(err) + } + res.Body.Close() + + // if no errors and 200 returned scrape was successful, return. Note we must make sure that + // the 200 is coming after the first request which provides a jobId to retrieve results + if sr.Return == 200 && sr.Jobid != nil { + break + } + + if sr.Return == 204 { + time.Sleep(2 * time.Second) + } + + if sr.Return == 400 { + break + } + + if time.Since(start) > s.conf.ScraperControllerSettings.Timeout { + errs.Add(errMaxSearchWaitTimeExceeded) + return + } + } + + // Record the results + var host string + for _, f := range sr.Fields { + switch fieldName := f.FieldName; fieldName { + case "host": + host = f.Value + continue + case "completion_ratio": + v, err := strconv.ParseFloat(f.Value, 64) + if err != nil { + errs.Add(err) + continue + } + s.mb.RecordSplunkSchedulerCompletionRatioDataPoint(now, v, host) + } + } +} + +func (s *splunkScraper) scrapeIndexerRawWriteSecondsByHost(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + var sr searchResponse + // Because we have to utilize network resources for each KPI we should check that each metrics + // is enabled before proceeding + if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexerRawWriteTime.Enabled { + return + } + + sr = searchResponse{ + search: searchDict[`SplunkIndexerRawWriteSeconds`], + } + + var ( + req *http.Request + res *http.Response + err error + ) + + start := time.Now() + + for { + req, err = s.splunkClient.createRequest(ctx, &sr) + if err != nil { + errs.Add(err) + return + } + + res, err = s.splunkClient.makeRequest(req) + if err != nil { + errs.Add(err) + return + } + + // if its a 204 the body will be empty because we are still waiting on search results + err = unmarshallSearchReq(res, &sr) + if err != nil { + errs.Add(err) + } + res.Body.Close() + + // if no errors and 200 returned scrape was successful, return. Note we must make sure that + // the 200 is coming after the first request which provides a jobId to retrieve results + if sr.Return == 200 && sr.Jobid != nil { + break + } + + if sr.Return == 204 { + time.Sleep(2 * time.Second) + } + + if sr.Return == 400 { + break + } + + if time.Since(start) > s.conf.ScraperControllerSettings.Timeout { + errs.Add(errMaxSearchWaitTimeExceeded) + return + } + } + + // Record the results + var host string + for _, f := range sr.Fields { + switch fieldName := f.FieldName; fieldName { + case "host": + host = f.Value + continue + case "raw_data_write_seconds": + v, err := strconv.ParseFloat(f.Value, 64) + if err != nil { + errs.Add(err) + continue + } + s.mb.RecordSplunkIndexerRawWriteTimeDataPoint(now, v, host) + } + } +} + +func (s *splunkScraper) scrapeIndexerCPUSecondsByHost(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + var sr searchResponse + // Because we have to utilize network resources for each KPI we should check that each metrics + // is enabled before proceeding + if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexerCPUTime.Enabled { + return + } + + sr = searchResponse{ + search: searchDict[`SplunkIndexerCpuSeconds`], + } + + var ( + req *http.Request + res *http.Response + err error + ) + + start := time.Now() + + for { + req, err = s.splunkClient.createRequest(ctx, &sr) + if err != nil { + errs.Add(err) + return + } + + res, err = s.splunkClient.makeRequest(req) + if err != nil { + errs.Add(err) + return + } + + // if its a 204 the body will be empty because we are still waiting on search results + err = unmarshallSearchReq(res, &sr) + if err != nil { + errs.Add(err) + } + res.Body.Close() + + // if no errors and 200 returned scrape was successful, return. Note we must make sure that + // the 200 is coming after the first request which provides a jobId to retrieve results + if sr.Return == 200 && sr.Jobid != nil { + break + } + + if sr.Return == 204 { + time.Sleep(2 * time.Second) + } + + if sr.Return == 400 { + break + } + + if time.Since(start) > s.conf.ScraperControllerSettings.Timeout { + errs.Add(errMaxSearchWaitTimeExceeded) + return + } + } + + // Record the results + var host string + for _, f := range sr.Fields { + switch fieldName := f.FieldName; fieldName { + case "host": + host = f.Value + continue + case "service_cpu_seconds": + v, err := strconv.ParseFloat(f.Value, 64) + if err != nil { + errs.Add(err) + continue + } + s.mb.RecordSplunkIndexerCPUTimeDataPoint(now, v, host) + } + } +} + +func (s *splunkScraper) scrapeAvgIopsByHost(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + var sr searchResponse + // Because we have to utilize network resources for each KPI we should check that each metrics + // is enabled before proceeding + if !s.conf.MetricsBuilderConfig.Metrics.SplunkIoAvgIops.Enabled { + return + } + + sr = searchResponse{ + search: searchDict[`SplunkIoAvgIops`], + } + + var ( + req *http.Request + res *http.Response + err error + ) + + start := time.Now() + + for { + req, err = s.splunkClient.createRequest(ctx, &sr) + if err != nil { + errs.Add(err) + return + } + + res, err = s.splunkClient.makeRequest(req) + if err != nil { + errs.Add(err) + return + } + + // if its a 204 the body will be empty because we are still waiting on search results + err = unmarshallSearchReq(res, &sr) + if err != nil { + errs.Add(err) + } + res.Body.Close() + + // if no errors and 200 returned scrape was successful, return. Note we must make sure that + // the 200 is coming after the first request which provides a jobId to retrieve results + if sr.Return == 200 && sr.Jobid != nil { + break + } + + if sr.Return == 204 { + time.Sleep(2 * time.Second) + } + + if sr.Return == 400 { + break + } + + if time.Since(start) > s.conf.ScraperControllerSettings.Timeout { + errs.Add(errMaxSearchWaitTimeExceeded) + return + } + } + + // Record the results + var host string + for _, f := range sr.Fields { + switch fieldName := f.FieldName; fieldName { + case "host": + host = f.Value + continue + case "iops": + v, err := strconv.ParseInt(f.Value, 10, 64) + if err != nil { + errs.Add(err) + continue + } + s.mb.RecordSplunkIoAvgIopsDataPoint(now, v, host) + } + } +} + +func (s *splunkScraper) scrapeSchedulerRunTimeByHost(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + var sr searchResponse + // Because we have to utilize network resources for each KPI we should check that each metrics + // is enabled before proceeding + if !s.conf.MetricsBuilderConfig.Metrics.SplunkSchedulerAvgRunTime.Enabled { + return + } + + sr = searchResponse{ + search: searchDict[`SplunkSchedulerAvgRunTime`], + } + + var ( + req *http.Request + res *http.Response + err error + ) + + start := time.Now() + + for { + req, err = s.splunkClient.createRequest(ctx, &sr) + if err != nil { + errs.Add(err) + return + } + + res, err = s.splunkClient.makeRequest(req) + if err != nil { + errs.Add(err) + return + } + + // if its a 204 the body will be empty because we are still waiting on search results + err = unmarshallSearchReq(res, &sr) + if err != nil { + errs.Add(err) + } + res.Body.Close() + + // if no errors and 200 returned scrape was successful, return. Note we must make sure that + // the 200 is coming after the first request which provides a jobId to retrieve results + if sr.Return == 200 && sr.Jobid != nil { + break + } + + if sr.Return == 204 { + time.Sleep(2 * time.Second) + } + + if sr.Return == 400 { + break + } + + if time.Since(start) > s.conf.ScraperControllerSettings.Timeout { + errs.Add(errMaxSearchWaitTimeExceeded) + return + } + } + + // Record the results + var host string + for _, f := range sr.Fields { + switch fieldName := f.FieldName; fieldName { + case "host": + host = f.Value + continue + case "run_time_avg": + v, err := strconv.ParseFloat(f.Value, 64) + if err != nil { + errs.Add(err) + continue + } + s.mb.RecordSplunkSchedulerAvgRunTimeDataPoint(now, v, host) + } + } +} + // Helper function for unmarshaling search endpoint requests func unmarshallSearchReq(res *http.Response, sr *searchResponse) error { sr.Return = res.StatusCode diff --git a/receiver/splunkenterprisereceiver/search_result.go b/receiver/splunkenterprisereceiver/search_result.go index 4833e86cc3b2..7a40049ada71 100644 --- a/receiver/splunkenterprisereceiver/search_result.go +++ b/receiver/splunkenterprisereceiver/search_result.go @@ -5,7 +5,18 @@ package splunkenterprisereceiver // import "github.com/open-telemetry/openteleme // metric name and its associated search as a key value pair var searchDict = map[string]string{ - `SplunkLicenseIndexUsageSearch`: `search=search index=_internal source=*license_usage.log type="Usage"| fields idx, b| eval indexname = if(len(idx)=0 OR isnull(idx),"(UNKNOWN)",idx)| stats sum(b) as b by indexname| eval By=round(b, 9)| fields indexname, By`, + `SplunkLicenseIndexUsageSearch`: `search=search earliest=-10m latest=now index=_internal source=*license_usage.log type="Usage"| fields idx, b| eval indexname = if(len(idx)=0 OR isnull(idx),"(UNKNOWN)",idx)| stats sum(b) as b by indexname| eval By=round(b, 9)| fields indexname, By`, + `SplunkIndexerAvgRate`: `search=search earliest=-10m latest=now index=_telemetry | stats count(index) | appendcols [| rest splunk_server_group=dmc_group_indexer splunk_server_group="dmc_group_indexer" /services/server/introspection/indexer | eval average_KBps = round(average_KBps, 0) | eval status = if((reason == ".") OR (reason == "") OR isnull(reason), status, status.": ".reason) | fields splunk_server, average_KBps, status] | eval host = splunk_server | stats avg(average_KBps) as "indexer_avg_kbps", values(status) as "status" by host | fields host, indexer_avg_kbps`, + `SplunkSchedulerAvgExecLatencySearch`: `search=search earliest=-10m latest=now index=_internal host=* sourcetype=scheduler (status="completed" OR status="skipped" OR status="deferred" OR status="success") | eval window_time = if(isnull('window_time'), 0, 'window_time') | eval execution_latency = max(0.00, ('dispatch_time' - (scheduled_time %2B window_time))) | stats avg(execution_latency) AS avg_exec_latency by host | eval host = if(isnull(host), "(UNKNOWN)", host) | eval latency_avg_exec = round(avg_exec_latency, 2) | fields host, latency_avg_exec`, + `SplunkSchedulerCompletionRatio`: `search=search earliest=-10m latest=now index=_internal host=* sourcetype=scheduler (status="completed" OR status="skipped" OR status="deferred" OR status="success") | stats count(eval(status=="completed" OR status=="skipped" OR status="success")) AS total_exec, count(eval(status=="skipped")) AS skipped_exec by host | eval completion_ratio = round((1-(skipped_exec / total_exec)) * 100, 2) | fields host, completion_ratio`, + `SplunkSchedulerAvgRunTime`: `search=search earliest=-10m latest=now index=_internal host=* sourcetype=scheduler (status="completed" OR status="skipped" OR status="deferred" OR status="success") | eval runTime = avg(run_time) | stats avg(runTime) AS runTime by host | eval host = if(isnull(host), "(UNKNOWN)", host) | eval run_time_avg = round(runTime, 2) | fields host, run_time_avg`, + `SplunkIndexerRawWriteSeconds`: `search=search earliest=-10m latest=now index=_internal host=* source=*metrics.log sourcetype=splunkd group=pipeline name=indexerpipe processor=indexer | eval ingest_pipe = if(isnotnull(ingest_pipe), ingest_pipe, "none") | search ingest_pipe=* | stats sum(write_cpu_seconds) AS "raw_data_write_seconds" by host | fields host, raw_data_write_seconds`, + `SplunkIndexerCpuSeconds`: `search=search earliest=-10m latest=now index=_internal host=* source=*metrics.log sourcetype=splunkd group=pipeline name=indexerpipe processor=indexer | eval ingest_pipe = if(isnotnull(ingest_pipe), ingest_pipe, "none") | search ingest_pipe=* | stats sum(service_cpu_seconds) AS "service_cpu_seconds" by host | fields host, service_cpu_seconds`, + `SplunkIoAvgIops`: `search=search earliest=-10m latest=now index=_introspection sourcetype=splunk_resource_usage component=IOStats host=* | eval mount_point = 'data.mount_point' | eval reads_ps = 'data.reads_ps' | eval writes_ps = 'data.writes_ps' | eval interval = 'data.interval' | eval total_io = reads_ps %2B writes_ps| eval op_count = (interval * total_io)| search data.mount_point="/opt/splunk/var" | stats avg(op_count) as iops by host| eval iops = round(iops) | fields host, iops`, + `SplunkPipelineQueues`: `search=search earliest=-10m latest=now index=_telemetry | stats count(index) | appendcols [| rest splunk_server_group=dmc_group_indexer splunk_server_group="dmc_group_indexer" /services/server/introspection/queues | search title=parsingQueue* OR title=aggQueue* OR title=typingQueue* OR title=indexQueue* | eval fill_perc=round(current_size_bytes / max_size_bytes * 100,2) | fields splunk_server, title, fill_perc | rex field=title %22%28%3F%3Cqueue_name%3E%5E%5Cw%2B%29%28%3F%3A%5C.%28%3F%3Cpipeline_number%3E%5Cd%2B%29%29%3F%22 | eval fill_perc = if(isnotnull(pipeline_number), "pset".pipeline_number.": ".fill_perc, fill_perc) | chart values(fill_perc) over splunk_server by queue_name | eval pset_count = mvcount(parsingQueue)] | eval host = splunk_server | stats sum(pset_count) as "pipeline_sets", sum(parsingQueue) as "parse_queue_ratio", sum(aggQueue) as "agg_queue_ratio", sum(typingQueue) as "typing_queue_ratio", sum(indexQueue) as "index_queue_ratio" by host | fields host, pipeline_sets, parse_queue_ratio, agg_queue_ratio, typing_queue_ratio, index_queue_ratio`, + `SplunkBucketsSearchableStatus`: `search=search earliest=-10m latest=now index=_telemetry | stats count(index) | appendcols [| rest splunk_server_group=dmc_group_cluster_master splunk_server_group=* /services/cluster/master/peers | eval splunk_server = label | fields splunk_server, label, is_searchable, status, site, bucket_count, host_port_pair, last_heartbeat, replication_port, base_generation_id, title, bucket_count_by_index.* | eval is_searchable = if(is_searchable == 1 or is_searchable == "1", "Yes", "No")] | sort - last_heartbeat | search label="***" | search is_searchable="*" | search status="*" | search site="*" | eval host = splunk_server | stats values(is_searchable) as is_searchable, values(status) as status, avg(bucket_count) as bucket_count by host | fields host, is_searchable, status, bucket_count`, + `SplunkIndexesData`: `search=search earliest=-10m latest=now index=_telemetry | stats count(index) | appendcols [| rest splunk_server_group=dmc_group_indexer splunk_server_group="*" /services/data/indexes] | join title splunk_server type=outer [ rest splunk_server_group=dmc_group_indexer splunk_server_group="*" /services/data/indexes-extended ] | eval elapsedTime = now() - strptime(minTime,"%25Y-%25m-%25dT%25H%3A%25M%3A%25S%25z") | eval dataAge = ceiling(elapsedTime / 86400) | eval indexSizeGB = if(currentDBSizeMB >= 1 AND totalEventCount >=1, currentDBSizeMB/1024, null()) | eval maxSizeGB = maxTotalDataSizeMB / 1024 | eval sizeUsagePerc = indexSizeGB / maxSizeGB * 100 | stats dc(splunk_server) AS splunk_server_count count(indexSizeGB) as "non_empty_instances" sum(indexSizeGB) AS total_size_gb avg(indexSizeGB) as average_size_gb avg(sizeUsagePerc) as average_usage_perc median(dataAge) as median_data_age max(dataAge) as oldest_data_age latest(bucket_dirs.home.warm_bucket_count) as warm_bucket_count latest(bucket_dirs.home.hot_bucket_count) as hot_bucket_count by title, datatype | eval warm_bucket_count = if(isnotnull(warm_bucket_count), warm_bucket_count, 0)| eval hot_bucket_count = if(isnotnull(hot_bucket_count), hot_bucket_count, 0)| eval bucket_count = (warm_bucket_count %2B hot_bucket_count)| eval total_size_gb = if(isnotnull(total_size_gb), round(total_size_gb, 2), 0) | eval average_size_gb = if(isnotnull(average_size_gb), round(average_size_gb, 2), 0) | eval average_usage_perc = if(isnotnull(average_usage_perc), round(average_usage_perc, 2), 0) | eval median_data_age = if(isNum(median_data_age), median_data_age, 0) | eval oldest_data_age = if(isNum(oldest_data_age), oldest_data_age, 0) | fields title splunk_server_count non_empty_instances total_size_gb average_size_gb average_usage_perc median_data_age bucket_count warm_bucket_count hot_bucket_count`, + `SplunkIndexesBucketCounts`: `search=search earliest=-10m latest=now index=_telemetry | stats count(index) | appendcols [| rest splunk_server_group=dmc_group_cluster_master splunk_server_group=* /services/cluster/master/indexes | fields title, is_searchable, replicated_copies_tracker*, searchable_copies_tracker*, num_buckets, index_size] | rename replicated_copies_tracker.*.* as rp**, searchable_copies_tracker.*.* as sb** | foreach rp0actual_copies_per_slot [ eval replicated_data_copies_ratio = ('rp0actual_copies_per_slot' / 'rp0expected_total_per_slot') ] | foreach sb0actual_copies_per_slot [ eval searchable_data_copies_ratio = ('sb0actual_copies_per_slot' / 'sb0expected_total_per_slot')] | eval is_searchable = if((is_searchable == 1) or (is_searchable == "1"), "Yes", "No") | eval index_size_gb = round(index_size / 1024 / 1024 / 1024, 2) | fields title, is_searchable, searchable_data_copies_ratio, replicated_data_copies_ratio, num_buckets, index_size_gb | search title="***" | search is_searchable="*" | stats latest(searchable_data_copies_ratio) as searchable_data_copies_ratio, latest(replicated_data_copies_ratio) as replicated_data_copies_ratio, latest(num_buckets) as num_buckets, latest(index_size_gb) as index_size_gb by title | fields title searchable_data_copies_ratio replicated_data_copies_ratio num_buckets index_size_gb`, } var apiDict = map[string]string{ diff --git a/receiver/splunkenterprisereceiver/testdata/scraper/expected.yaml b/receiver/splunkenterprisereceiver/testdata/scraper/expected.yaml index b6b6ceca5b53..c7c1ea59a0d9 100644 --- a/receiver/splunkenterprisereceiver/testdata/scraper/expected.yaml +++ b/receiver/splunkenterprisereceiver/testdata/scraper/expected.yaml @@ -14,7 +14,7 @@ resourceMetrics: timeUnixNano: "2000000" name: splunk.data.indexes.extended.bucket.count unit: '{buckets}' - - description: Count of events in this bucket super-directory + - description: Count of events in this bucket super-directory. *Note:** Must be pointed at specific indexer `endpoint`. gauge: dataPoints: - asInt: "107267027" @@ -29,7 +29,7 @@ resourceMetrics: timeUnixNano: "2000000" name: splunk.data.indexes.extended.bucket.event.count unit: '{events}' - - description: (If size > 0) Number of hot buckets + - description: (If size > 0) Number of hot buckets. *Note:** Must be pointed at specific indexer `endpoint`. gauge: dataPoints: - asInt: "1" @@ -44,7 +44,7 @@ resourceMetrics: timeUnixNano: "2000000" name: splunk.data.indexes.extended.bucket.hot.count unit: '{buckets}' - - description: (If size > 0) Number of warm buckets + - description: (If size > 0) Number of warm buckets. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer. gauge: dataPoints: - asInt: "50" @@ -59,7 +59,7 @@ resourceMetrics: timeUnixNano: "2000000" name: splunk.data.indexes.extended.bucket.warm.count unit: '{buckets}' - - description: Count of events for index, excluding frozen events. Approximately equal to the event_count sum of all buckets. + - description: Count of events for index, excluding frozen events. Approximately equal to the event_count sum of all buckets. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer. gauge: dataPoints: - asInt: "108411855" @@ -71,7 +71,7 @@ resourceMetrics: timeUnixNano: "2000000" name: splunk.data.indexes.extended.event.count unit: '{events}' - - description: Size in bytes on disk of the /rawdata/ directories of all buckets in this index, excluding frozen + - description: Size in bytes on disk of the /rawdata/ directories of all buckets in this index, excluding frozen *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer. gauge: dataPoints: - asInt: "70825079209" @@ -83,7 +83,7 @@ resourceMetrics: timeUnixNano: "2000000" name: splunk.data.indexes.extended.raw.size unit: By - - description: Size in bytes on disk of this index + - description: Size in bytes on disk of this index *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer. gauge: dataPoints: - asInt: "20818468798" @@ -95,7 +95,7 @@ resourceMetrics: timeUnixNano: "2000000" name: splunk.data.indexes.extended.total.size unit: By - - description: Gauge tracking average bytes per second throughput of indexer + - description: Gauge tracking average bytes per second throughput of indexer. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer. gauge: dataPoints: - asDouble: 25579.690815904476 @@ -107,7 +107,7 @@ resourceMetrics: timeUnixNano: "2000000" name: splunk.indexer.throughput unit: By/s - - description: Gauge tracking current length of queue + - description: Gauge tracking current length of queue. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer. gauge: dataPoints: - asInt: "1" @@ -119,7 +119,7 @@ resourceMetrics: timeUnixNano: "2000000" name: splunk.server.introspection.queues.current unit: '{queues}' - - description: Gauge tracking current bytes waiting in queue + - description: Gauge tracking current bytes waiting in queue. *Note:** Must be pointed at specific indexer `endpoint` and gathers metrics from only that indexer. gauge: dataPoints: - asInt: "100"