diff --git a/.chloggen/addfsyncflagconfig.yaml b/.chloggen/do-not-scrape-fs-metrics-from-snap-mount.yaml similarity index 68% rename from .chloggen/addfsyncflagconfig.yaml rename to .chloggen/do-not-scrape-fs-metrics-from-snap-mount.yaml index bbcdbaf09..1d041956f 100644 --- a/.chloggen/addfsyncflagconfig.yaml +++ b/.chloggen/do-not-scrape-fs-metrics-from-snap-mount.yaml @@ -1,8 +1,8 @@ # One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' -change_type: enhancement +change_type: bug_fix # The name of the component, or a single word describing the area of concern, (e.g. agent, clusterReceiver, gateway, operator, chart, other) component: agent # A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). -note: Added `fsyncFlag` configuration to allow users to enable fsync on the filestorage. +note: Scrape FS metrics from one host disk mounted to the root to avoid scraping errors since the collector likely doesn't have access to other mounts. # One or more tracking issues related to the change -issues: [1425] +issues: [1569] diff --git a/.chloggen/fixexplicittokenta.yaml b/.chloggen/fips.yaml similarity index 77% rename from .chloggen/fixexplicittokenta.yaml rename to .chloggen/fips.yaml index cc5b74f48..c5b2e5ea2 100644 --- a/.chloggen/fixexplicittokenta.yaml +++ b/.chloggen/fips.yaml @@ -1,11 +1,11 @@ # One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' -change_type: bug_fix +change_type: enhancement # The name of the component, or a single word describing the area of concern, (e.g. agent, clusterReceiver, gateway, operator, chart, other) -component: targetAllocator +component: collector # A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). -note: Fix the name of the service account token given when featureGates.explicitMountServiceAccountToken is true +note: Document the possible use of a FIPS-140 compliant image # One or more tracking issues related to the change -issues: [1427] +issues: [1582] # (Optional) One or more lines of additional information to render under the primary note. # These lines will be padded with 2 spaces and then inserted directly into the document. # Use pipe (|) for multiline entries. diff --git a/.chloggen/ingest_cluster_receiver_events_into_index_from_annotation.yaml b/.chloggen/ingest_cluster_receiver_events_into_index_from_annotation.yaml new file mode 100644 index 000000000..4058549de --- /dev/null +++ b/.chloggen/ingest_cluster_receiver_events_into_index_from_annotation.yaml @@ -0,0 +1,12 @@ +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: 'enhancement' +# The name of the component, or a single word describing the area of concern, (e.g. agent, clusterReceiver, gateway, operator, chart, other) +component: clusterReceiver +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Configure k8s attributes processor for cluster receiver to ingest events into index defined in namespace annotation +# One or more tracking issues related to the change +issues: [1481] +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: diff --git a/.chloggen/controlplanemetricshistogram.yaml b/.chloggen/refactor-agent-daemonset-toleration.yaml similarity index 71% rename from .chloggen/controlplanemetricshistogram.yaml rename to .chloggen/refactor-agent-daemonset-toleration.yaml index 572f927d5..3362b470f 100644 --- a/.chloggen/controlplanemetricshistogram.yaml +++ b/.chloggen/refactor-agent-daemonset-toleration.yaml @@ -3,12 +3,10 @@ change_type: enhancement # The name of the component, or a single word describing the area of concern, (e.g. agent, clusterReceiver, gateway, operator, chart, other) component: agent # A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). -note: Add a feature gate `useControlPlaneMetricsHistogramData` +note: Make it so the default tolerations used to deploy the agent collector account for k8s distribution # One or more tracking issues related to the change -issues: [1372] +issues: [1562] # (Optional) One or more lines of additional information to render under the primary note. # These lines will be padded with 2 spaces and then inserted directly into the document. # Use pipe (|) for multiline entries. -subtext: | - This feature gate allows to gather control plane metrics and send them as histogram data to Observability Cloud. - This is an experimental feature under heavy development. +subtext: OpenShift infra nodes and AKS system nodes will now be monitored by the agent by default diff --git a/.chloggen/operator-instrumentation-update.yaml b/.chloggen/update-dotnet.yaml similarity index 61% rename from .chloggen/operator-instrumentation-update.yaml rename to .chloggen/update-dotnet.yaml index 52756797f..9680911a4 100644 --- a/.chloggen/operator-instrumentation-update.yaml +++ b/.chloggen/update-dotnet.yaml @@ -1,12 +1,12 @@ # One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' -change_type: breaking +change_type: enhancement # The name of the component, or a single word describing the area of concern, (e.g. agent, clusterReceiver, gateway, operator, chart, other) component: operator # A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). -note: Operator Helm values previously under `.Values.operator.instrumentation.spec.*` have been moved to `.Values.instrumentation.*` +note: Bump dotnet to v1.8.0 in helm-charts/splunk-otel-collector/values.yaml # One or more tracking issues related to the change -issues: [1436] +issues: [1538] # (Optional) One or more lines of additional information to render under the primary note. # These lines will be padded with 2 spaces and then inserted directly into the document. # Use pipe (|) for multiline entries. -subtext: If you use custom values under `.Values.operator.instrumentation.spec.*` please review the [upgrade guidelines](https://github.com/signalfx/splunk-otel-collector-chart/blob/main/UPGRADING.md#01055-01080) +subtext: diff --git a/.chloggen/update-java.yaml b/.chloggen/update-java.yaml new file mode 100644 index 000000000..878100c50 --- /dev/null +++ b/.chloggen/update-java.yaml @@ -0,0 +1,12 @@ +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement +# The name of the component, or a single word describing the area of concern, (e.g. agent, clusterReceiver, gateway, operator, chart, other) +component: operator +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Bump java to v2.10.0 in helm-charts/splunk-otel-collector/values.yaml +# One or more tracking issues related to the change +issues: [1551] +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: diff --git a/.chloggen/update-nodejs.yaml b/.chloggen/update-nodejs.yaml new file mode 100644 index 000000000..a31613a57 --- /dev/null +++ b/.chloggen/update-nodejs.yaml @@ -0,0 +1,12 @@ +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement +# The name of the component, or a single word describing the area of concern, (e.g. agent, clusterReceiver, gateway, operator, chart, other) +component: operator +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Bump nodejs to v2.15.0 in helm-charts/splunk-otel-collector/values.yaml +# One or more tracking issues related to the change +issues: [1558] +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: diff --git a/.chloggen/update-service-telemetry-endpoint.yaml b/.chloggen/update-service-telemetry-endpoint.yaml new file mode 100644 index 000000000..2d1aa723e --- /dev/null +++ b/.chloggen/update-service-telemetry-endpoint.yaml @@ -0,0 +1,12 @@ +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement +# The name of the component, or a single word describing the area of concern, (e.g. agent, clusterReceiver, gateway, operator, chart, other) +component: agent, clusterReceiver, gateway +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Update config for scraping internal metrics to use new config interface and loopback address. +# One or more tracking issues related to the change +issues: [1573] +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: "This also drops redundant attributes reported with the internal metrics: `net.host.name` and `server.address`" diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index f77f7aa72..184de342c 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -17,7 +17,7 @@ jobs: steps: - name: "CLA Assistant" if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') || github.event_name == 'pull_request_target' - uses: cla-assistant/github-action@v2.5.1 + uses: cla-assistant/github-action@v2.6.1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} PERSONAL_ACCESS_TOKEN: ${{ secrets.PAT_CLATOOL }} diff --git a/.github/workflows/functional_test.yaml b/.github/workflows/functional_test.yaml index da585c739..729c64c28 100644 --- a/.github/workflows/functional_test.yaml +++ b/.github/workflows/functional_test.yaml @@ -5,7 +5,7 @@ on: jobs: functional-test: - name: Kubernetes ${{ matrix.kubernetes_version }} ${{ matrix.container_runtime }} + name: K8s ${{ matrix.kubernetes_version }} ${{ matrix.container_runtime }}, Splunk ${{ matrix.splunk_version }} runs-on: ubuntu-20.04 strategy: fail-fast: false @@ -24,6 +24,9 @@ jobs: - "docker" - "containerd" - "cri-o" + splunk_version: + - 9.3.0 + - 8.2.9 env: CI_SPLUNK_PORT: 8089 CI_SPLUNK_USERNAME: admin @@ -33,6 +36,7 @@ jobs: CI_INDEX_METRICS: ci_metrics CONTAINER_RUNTIME: ${{ matrix.container_runtime }} KUBERNETES_VERSION: ${{ matrix.kubernetes_version }} + SPLUNK_VERSION: ${{ matrix.splunk_version }} MINIKUBE_VERSION: latest steps: @@ -70,6 +74,11 @@ jobs: until kubectl -n default get serviceaccount default -o name; do sleep 1; done + # set splunk version, hec_token, splunk password in k8s-splunk.yaml file + sed -i "s/splunk:9.3.0/splunk:${SPLUNK_VERSION}/g" ci_scripts/k8s-splunk.yml + sed -i "s/value: helloworld/value: ${CI_SPLUNK_PASSWORD}/g" ci_scripts/k8s-splunk.yml + sed -i "s/value: 00000000-0000-0000-0000-0000000000000/value: ${CI_SPLUNK_HEC_TOKEN}/g" ci_scripts/k8s-splunk.yml + cat ci_scripts/k8s-splunk.yml # Install Splunk on minikube kubectl apply -f ci_scripts/k8s-splunk.yml # Wait until splunk is ready diff --git a/.github/workflows/functional_test_v2.yaml b/.github/workflows/functional_test_v2.yaml index e29791b43..31b17d8da 100644 --- a/.github/workflows/functional_test_v2.yaml +++ b/.github/workflows/functional_test_v2.yaml @@ -6,8 +6,12 @@ on: branches: [ main ] workflow_dispatch: inputs: - UPDATE_EXPECTED_RESULTS: - description: 'Set this to true to update expected results and collect updated test output as a Github workflow artifact.' + UPLOAD_UPDATED_EXPECTED_RESULTS: + description: 'Set this to true to upload updated golden file expected results and upload these results as a Github workflow artifact.' + required: false + default: false + UPLOAD_KUBERNETES_DEBUG_INFO: + description: 'Set this to true to collect the debug info of the k8s cluster and upload this info as a Github workflow artifact.' required: false default: false @@ -20,6 +24,8 @@ jobs: env: KUBECONFIG: /tmp/kube-config-splunk-otel-collector-chart-functional-testing KUBE_TEST_ENV: kind + UPLOAD_UPDATED_EXPECTED_RESULTS: ${{ github.event.inputs.UPLOAD_UPDATED_EXPECTED_RESULTS || 'false' }} + UPLOAD_KUBERNETES_DEBUG_INFO: ${{ github.event.inputs.UPLOAD_KUBERNETES_DEBUG_INFO || 'false' }} strategy: fail-fast: false matrix: @@ -36,6 +42,7 @@ jobs: test-job: - functional - histogram + - configuration_switching runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -53,7 +60,7 @@ jobs: ~/go/pkg/mod key: go-cache-${{ runner.os }}-${{ hashFiles('**/go.sum') }} - name: Create kind cluster - uses: helm/kind-action@v1.10.0 + uses: helm/kind-action@v1.11.0 with: node_image: kindest/node:${{ matrix.k8s-version }} kubectl_version: ${{ matrix.k8s-version }} @@ -69,14 +76,28 @@ jobs: run: | make cert-manager - name: run functional tests + id: run-functional-tests env: K8S_VERSION: ${{ matrix.k8s-version }} - UPDATE_EXPECTED_RESULTS: ${{ github.event.inputs.UPDATE_EXPECTED_RESULTS || 'false' }} run: | cd functional_tests - TEARDOWN_BEFORE_SETUP=true UPDATE_EXPECTED_RESULTS=${{ env.UPDATE_EXPECTED_RESULTS }} go test -v -tags ${{ matrix.test-job }} - - name: 'Upload test results' - if: failure() && env.UPDATE_EXPECTED_RESULTS == 'true' + TEARDOWN_BEFORE_SETUP=true UPDATE_EXPECTED_RESULTS=${{ env.UPLOAD_UPDATED_EXPECTED_RESULTS }} go test -v -tags ${{ matrix.test-job }} + - name: Collect Kubernetes Cluster debug info on failure + if: always() && (steps.run-functional-tests.outcome == 'failure' || env.UPLOAD_KUBERNETES_DEBUG_INFO == 'true') + id: collect-debug-info + run: | + echo "Functional tests failed. Collecting debug info for current state of the Kubernetes cluster..." + cd tools + ./splunk_kubernetes_debug_info.sh + - name: Upload Kubernetes Cluster debug info + if: always() && (steps.run-functional-tests.outcome == 'failure' || env.UPLOAD_KUBERNETES_DEBUG_INFO == 'true') + uses: actions/upload-artifact@v4 + with: + name: k8s-debug-info-${{ matrix.test-job }}-${{ matrix.k8s-version }} + path: tools/splunk_kubernetes_debug_info_* + retention-days: 5 + - name: Upload test results + if: always() && env.UPLOAD_UPDATED_EXPECTED_RESULTS == 'true' uses: actions/upload-artifact@v4 with: name: functional_tests-${{ matrix.test-job }}-${{ matrix.k8s-version }} @@ -228,14 +249,14 @@ jobs: ~/go/bin ~/go/pkg/mod key: go-cache-${{ runner.os }}-${{ hashFiles('**/go.sum') }} - - uses: 'google-github-actions/auth@v2.1.5' + - uses: 'google-github-actions/auth@v2.1.7' with: project_id: ${{ secrets.GKE_PROJECT }} credentials_json: ${{ secrets.GKE_SA_KEY }} - - uses: google-github-actions/setup-gcloud@v2.1.1 + - uses: google-github-actions/setup-gcloud@v2.1.2 with: project_id: ${{ secrets.GKE_PROJECT }} - - uses: google-github-actions/get-gke-credentials@v2.2.1 + - uses: google-github-actions/get-gke-credentials@v2.3.0 with: cluster_name: ${{ secrets.GKE_AUTOPILOT_CLUSTER }} location: ${{ secrets.GKE_REGION }} @@ -291,14 +312,14 @@ jobs: ~/go/bin ~/go/pkg/mod key: go-cache-${{ runner.os }}-${{ hashFiles('**/go.sum') }} - - uses: 'google-github-actions/auth@v2.1.5' + - uses: 'google-github-actions/auth@v2.1.7' with: project_id: ${{ secrets.GKE_PROJECT }} credentials_json: ${{ secrets.GKE_SA_KEY }} - - uses: google-github-actions/setup-gcloud@v2.1.1 + - uses: google-github-actions/setup-gcloud@v2.1.2 with: project_id: ${{ secrets.GKE_PROJECT }} - - uses: google-github-actions/get-gke-credentials@v2.2.1 + - uses: google-github-actions/get-gke-credentials@v2.3.0 with: cluster_name: ${{ secrets.GKE_AUTOPILOT_CLUSTER }} location: ${{ secrets.GKE_REGION }} @@ -347,7 +368,7 @@ jobs: uses: azure/use-kubelogin@v1.2 with: kubelogin-version: "v0.0.24" - - uses: azure/login@v2.1.1 + - uses: azure/login@v2.2.0 with: creds: ${{ secrets.AZURE_CREDENTIALS }} - uses: azure/aks-set-context@v4 @@ -365,3 +386,43 @@ jobs: run: | cd functional_tests TEARDOWN_BEFORE_SETUP=true go test -v -tags functional + + gce-autopilot-test: + name: Test helm install in GCE (kops) - credentials needed + needs: kubernetes-test + if: github.event.pull_request.head.repo.full_name == github.repository + concurrency: + group: gce-access + env: + KUBE_TEST_ENV: gce + SKIP_TESTS: "true" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: ~1.21.8 + cache: false + - name: Cache Go + id: go-cache + timeout-minutes: 5 + uses: actions/cache@v4 + with: + path: | + ~/go/bin + ~/go/pkg/mod + key: go-cache-${{ runner.os }}-${{ hashFiles('**/go.sum') }} + - name: Update dependencies + run: | + make dep-update + - name: Set kubeconfig + run: echo "$GCE_KUBECONFIG" > /tmp/kubeconfig + env: + GCE_KUBECONFIG: ${{ secrets.GCE_KUBECONFIG }} + - name: run functional tests + env: + HOST_ENDPOINT: 0.0.0.0 + KUBECONFIG: /tmp/kubeconfig + run: | + cd functional_tests + TEARDOWN_BEFORE_SETUP=true go test -v -tags functional diff --git a/.github/workflows/helm-test.yaml b/.github/workflows/helm-test.yaml index ca23ef822..7fdc0c1fa 100644 --- a/.github/workflows/helm-test.yaml +++ b/.github/workflows/helm-test.yaml @@ -44,7 +44,7 @@ jobs: make unittest - name: Create kind cluster - uses: helm/kind-action@v1.10.0 + uses: helm/kind-action@v1.11.0 # Only build a kind cluster if there are chart changes to test. if: steps.list-changed.outputs.changed == 'true' diff --git a/.github/workflows/migration_tests.yaml b/.github/workflows/migration_tests.yaml index 8dd3c0958..ef9767d90 100644 --- a/.github/workflows/migration_tests.yaml +++ b/.github/workflows/migration_tests.yaml @@ -42,7 +42,7 @@ jobs: ~/go/pkg/mod key: go-cache-${{ runner.os }}-${{ hashFiles('**/go.sum') }} - name: Create kind cluster - uses: helm/kind-action@v1.10.0 + uses: helm/kind-action@v1.11.0 with: node_image: kindest/node:${{ matrix.k8s-version }} kubectl_version: ${{ matrix.k8s-version }} diff --git a/.gitignore b/.gitignore index 893e47be6..843d2761e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ .idea *.iml .DS_Store +*splunk_kubernetes_debug_info_* # Helm **/charts/*.tgz diff --git a/CHANGELOG.md b/CHANGELOG.md index 62ab3a2d2..f162e2766 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,108 @@ +## [0.113.0] - 2024-11-22 + +This Splunk OpenTelemetry Collector for Kubernetes release adopts the [Splunk OpenTelemetry Collector v0.113.0](https://github.com/signalfx/splunk-otel-collector/releases/tag/v0.113.0). + +### 🧰 Bug fixes 🧰 + +- `agent`: Fixes hostmetrics receiver to use the correct mount path of the host's filesystem from inside the container ([#1547](https://github.com/signalfx/splunk-otel-collector-chart/pull/1547)) +- `agent`: Exclude scraping filesystem metrics from mounts that are not accessible from inside the container to avoid scraping errors. ([#1550](https://github.com/signalfx/splunk-otel-collector-chart/pull/1550)) + +## [0.112.1] - 2024-11-20 + +This Splunk OpenTelemetry Collector for Kubernetes release adopts the [Splunk OpenTelemetry Collector v0.112.0](https://github.com/signalfx/splunk-otel-collector/releases/tag/v0.112.0). + +### 🧰 Bug fixes 🧰 + +- `agent`: Fix bug where hostmetrics receiver was failing to scrape the filesystem ([#1533](https://github.com/signalfx/splunk-otel-collector-chart/pull/1533)) +- `operator`: Fix bug where sometimes Instrumentation opentelemetry.io/v1alpha1 can be installed too early ([#1544](https://github.com/signalfx/splunk-otel-collector-chart/pull/1544)) + +## [0.112.0] - 2024-11-07 + +This Splunk OpenTelemetry Collector for Kubernetes release adopts the [Splunk OpenTelemetry Collector v0.112.0](https://github.com/signalfx/splunk-otel-collector/releases/tag/v0.112.0). + +### πŸ›‘ Breaking changes πŸ›‘ + +- `agent, gateway, chart`: update default traces exporter to otlphttp ([#1518](https://github.com/signalfx/splunk-otel-collector-chart/pull/1518)) + If you use the sapm exporter with custom settings, you have two options: + - Migrate your sapm settings to the new otlphttp exporter. + - Retain SAPM settings by moving them to your agent.config or gateway.config overrides to ensure they remain effective. + +### πŸ’‘ Enhancements πŸ’‘ + +- `operator`: Bump operator to 0.71.2 in helm-charts/splunk-otel-collector/Chart.yaml ([#1511](https://github.com/signalfx/splunk-otel-collector-chart/pull/1511)) +- `operator`: Bump java to v2.9.0 in helm-charts/splunk-otel-collector/values.yaml ([#1509](https://github.com/signalfx/splunk-otel-collector-chart/pull/1509)) +- `operator`: Bump nodejs to v2.14.0 in helm-charts/splunk-otel-collector/values.yaml ([#1519](https://github.com/signalfx/splunk-otel-collector-chart/pull/1519)) + +## [0.111.0] - 2024-10-12 + +This Splunk OpenTelemetry Collector for Kubernetes release adopts the [Splunk OpenTelemetry Collector v0.111.0](https://github.com/signalfx/splunk-otel-collector/releases/tag/v0.111.0). + +### 🚩 Deprecations 🚩 + +- `chart`: Added a note anbout the deprecation of the `fluentd` option in the chart ([#1460](https://github.com/signalfx/splunk-otel-collector-chart/pull/1460)) + +### πŸ’‘ Enhancements πŸ’‘ + +- `chart`: Propagated "sourcetype" to work for metrics and traces ([#1376](https://github.com/signalfx/splunk-otel-collector-chart/pull/1376)) +- `agent`: The agent is now deployed with a Kubernetes service for receiving telemetry data by default ([#1485](https://github.com/signalfx/splunk-otel-collector-chart/pull/1485)) +- `operator`: Bump dotnet to v1.7.0 in helm-charts/splunk-otel-collector/values.yaml ([#1474](https://github.com/signalfx/splunk-otel-collector-chart/pull/1474)) +- `operator`: Bump nodejs to v2.13.0 in helm-charts/splunk-otel-collector/values.yaml ([#1470](https://github.com/signalfx/splunk-otel-collector-chart/pull/1470)) + +### 🧰 Bug fixes 🧰 + +- `agent`: Add k8s.node.name attribute to discovered service entities to fix broken link in the UI. ([#1494](https://github.com/signalfx/splunk-otel-collector-chart/pull/1494)) + +## [0.110.0] - 2024-09-27 + +This Splunk OpenTelemetry Collector for Kubernetes release adopts the [Splunk OpenTelemetry Collector v0.110.0](https://github.com/signalfx/splunk-otel-collector/releases/tag/v0.110.0). + +### πŸ’‘ Enhancements πŸ’‘ + +- `template`: Add default metadata key for token to batch processor ([#1467](https://github.com/signalfx/splunk-otel-collector-chart/pull/1467)) + Add default metadata key for token to batch processor. + This will allow the token to be retrieved from the context. When SAPM is deprecated and + OTLP used, this will be the normal mode of operation. +- `operator`: Bump operator to 0.56.0 in helm-charts/splunk-otel-collector/Chart.yaml ([#1446](https://github.com/signalfx/splunk-otel-collector-chart/pull/1446)) +- `operator`: Bump java to v2.8.1 in helm-charts/splunk-otel-collector/values.yaml ([#1458](https://github.com/signalfx/splunk-otel-collector-chart/pull/1458)) + +### 🧰 Bug fixes 🧰 + +- `agent`: use root_path to configure the hostmetricsreceiver, instead of environment variables. ([#1462](https://github.com/signalfx/splunk-otel-collector-chart/pull/1462)) + +## [0.109.0] - 2024-09-17 + +This Splunk OpenTelemetry Collector for Kubernetes release adopts the [Splunk OpenTelemetry Collector v0.109.0](https://github.com/signalfx/splunk-otel-collector/releases/tag/v0.109.0). + +### πŸ›‘ Breaking changes πŸ›‘ + +- `operator`: Operator Helm values previously under `.Values.operator.instrumentation.spec.*` have been moved to `.Values.instrumentation.*` ([#1436](https://github.com/signalfx/splunk-otel-collector-chart/pull/1436)) + If you use custom values under `.Values.operator.instrumentation.spec.*` please review the [upgrade guidelines](https://github.com/signalfx/splunk-otel-collector-chart/blob/main/UPGRADING.md#01055-01080) + +### πŸ’‘ Enhancements πŸ’‘ + +- `agent`: Added `fsyncFlag` configuration to allow users to enable fsync on the filestorage. ([#1425](https://github.com/signalfx/splunk-otel-collector-chart/pull/1425)) +- `agent`: Add a feature gate `useControlPlaneMetricsHistogramData` ([#1372](https://github.com/signalfx/splunk-otel-collector-chart/pull/1372)) + This feature gate allows to gather control plane metrics and send them as histogram data to Observability Cloud. + This is an experimental feature under heavy development. + +- `agent`: Add base configuration to support the new continuous discovery mechanism. ([#1455](https://github.com/signalfx/splunk-otel-collector-chart/pull/1455)) + The new continuous discovery mechanism is disabled by default. To enable it, set the following values in your configuration: + ```yaml + agent: + discovery: + enabled: true + featureGates: splunk.continuousDiscovery + ``` + +- `operator`: Bump nodejs to v2.12.0 in helm-charts/splunk-otel-collector/values.yaml ([#1434](https://github.com/signalfx/splunk-otel-collector-chart/pull/1434)) + +### 🧰 Bug fixes 🧰 + +- `targetAllocator`: Fix the name of the service account token given when featureGates.explicitMountServiceAccountToken is true ([#1427](https://github.com/signalfx/splunk-otel-collector-chart/pull/1427)) + ## [0.105.5] - 2024-08-28 This Splunk OpenTelemetry Collector for Kubernetes release adopts the [Splunk OpenTelemetry Collector v0.105.0](https://github.com/signalfx/splunk-otel-collector/releases/tag/v0.105.0). diff --git a/Makefile b/Makefile index bdacabab2..595a3b1f6 100644 --- a/Makefile +++ b/Makefile @@ -43,6 +43,12 @@ install-tools: ## Install tools (macOS/Linux) .PHONY: dep-update dep-update: ## Fetch Helm chart dependency repositories, build the Helm chart with the dependencies specified in the Chart.yaml @{ \ + DIR=helm-charts/splunk-otel-collector ;\ + LOCK_FILE=$$DIR/Chart.lock ;\ + if [ -f "$$LOCK_FILE" ] ; then \ + echo "Removing existing Chart.lock file..."; \ + rm -f "$$LOCK_FILE" || exit 1; \ + fi ;\ if ! (helm repo list | grep -q open-telemetry) ; then \ helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts || exit 1; \ fi ;\ @@ -51,10 +57,13 @@ dep-update: ## Fetch Helm chart dependency repositories, build the Helm chart wi fi ;\ helm repo update open-telemetry jetstack || exit 1; \ DEP_OK=true ;\ - DIR=helm-charts/splunk-otel-collector ;\ if ! helm dependencies list $$DIR | grep open-telemetry | grep -q ok ; then DEP_OK=false ; fi ;\ if ! helm dependencies list $$DIR | grep jetstack | grep -q ok ; then DEP_OK=false ; fi ;\ if [ "$$DEP_OK" = "false" ] ; then helm dependencies update $$DIR || exit 1; fi ;\ + if [ -f "$$LOCK_FILE" ] ; then \ + echo "Removing Chart.lock file post-update..."; \ + rm -f "$$LOCK_FILE" || exit 1; \ + fi ;\ } # Example Usage: diff --git a/README.md b/README.md index 62a439fc1..884c5d10f 100644 --- a/README.md +++ b/README.md @@ -84,6 +84,8 @@ This distribution currently supports: exporter](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/splunkhecexporter). - Interested in sending logs to Splunk Cloud via OTLP for improved backpressure handling and performance? [Sign up for the preview](https://voc.splunk.com/preview/otlp) today! +**Fluentd logs engine is now deprecated and will reach End Of Support in October 2025.** Migrating to the native OTEL logs engine before this date is strongly recommended. + ## Supported Kubernetes distributions The Helm chart works with default configurations of the main Kubernetes distributions. Use actively supported versions: diff --git a/UPGRADING.md b/UPGRADING.md index 774f3e5f4..41f83040d 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -44,7 +44,7 @@ to update your custom dashboards, detectors, or alerts using Java application te ### Breaking Changes Overview - Runtime metrics will now be enabled by default, this can increase the number of metrics collected. - The default protocol changed from gRPC to http/protobuf. For custom Java exporter endpoint -configurations, verify that you’re sending data to http/protobuf endpoints like this [example](https://github.com/signalfx/splunk-otel-collector-chart/blob/splunk-otel-collector-0.107.0/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/instrumentation.yaml#L59). +configurations, verify that you’re sending data to http/protobuf endpoints like this [example](https://github.com/signalfx/splunk-otel-collector-chart/blob/splunk-otel-collector-0.105.4/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/instrumentation.yaml#L59). - Span Attribute Name Changes: | Old Attribute (1.x) | New Attribute (2.x) | diff --git a/ci_scripts/k8s-splunk.yml b/ci_scripts/k8s-splunk.yml index 3cd697ea5..0287fcf1f 100644 --- a/ci_scripts/k8s-splunk.yml +++ b/ci_scripts/k8s-splunk.yml @@ -60,7 +60,7 @@ spec: runAsGroup: 0 containers: - name: splunk - image: docker.io/splunk/splunk:8.2.0 + image: docker.io/splunk/splunk:9.3.0 ports: - name: web-interface containerPort: 8000 diff --git a/docs/advanced-configuration.md b/docs/advanced-configuration.md index c738ea52d..c16cf15f4 100644 --- a/docs/advanced-configuration.md +++ b/docs/advanced-configuration.md @@ -559,6 +559,7 @@ Manage Splunk OTel Collector Logging with these supported annotations. * Use `splunk.com/index` annotation on pod and/or namespace to tell which Splunk platform indexes to ingest to. Pod annotation will take precedence over namespace annotation when both are annotated. For example, the following command will make logs from `kube-system` namespace to be sent to `k8s_events` index: `kubectl annotate namespace kube-system splunk.com/index=k8s_events` + **Please Note:** Cluster receiver supports only namespace index annotations, pod index annotations are not supported. * Use `splunk.com/metricsIndex` annotation on pod and/or namespace to tell which Splunk platform metric indexes to ingest to. Pod annotation will take precedence over namespace annotation when both are annotated. * Filter logs using pod and/or namespace annotation * If `logsCollection.containers.useSplunkIncludeAnnotation` is `false` (default: false), set `splunk.com/exclude` annotation to `true` on pod and/or namespace to exclude its logs from ingested. diff --git a/docs/auto-instrumentation-install.md b/docs/auto-instrumentation-install.md index 8af06f5ef..7a07132e4 100644 --- a/docs/auto-instrumentation-install.md +++ b/docs/auto-instrumentation-install.md @@ -401,6 +401,7 @@ provides best effort support with issues related to native OpenTelemetry instrum | nodejs | OpenTelemetry | Available | Yes | Mostly | [Link](https://github.com/open-telemetry/opentelemetry-nodejs-instrumentation) | ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodes | | python | OpenTelemetry | Available | Needs Validation | | [Link](https://github.com/open-telemetry/opentelemetry-java-instrumentation) | ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java | | apache-httpd | OpenTelemetry | Available | Needs Validation | | [Link](https://github.com/open-telemetry/opentelemetry-apache-httpd-instrumentation) | ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-apache-httpd | +| nginx | OpenTelemetry | Available | Needs Validation | | [Link](https://github.com/open-telemetry/opentelemetry-apache-httpd-instrumentation) | ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-apache-httpd | ### Documentation Resources diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index f4b677c6d..22b39ae18 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -2,12 +2,11 @@ Find the official troubleshooting documentation for the Splunk Distribution of the OpenTelemetry Collector for Kubernetes at: -* [Troubleshoot the Collector for Kubernetes](https://docs.splunk.com/observability/en/gdi/opentelemetry/collector-kubernetes/troubleshoot-k8s.html) -* [Troubleshoot the Collector for Kubernetes containers](https://docs.splunk.com/observability/en/gdi/opentelemetry/collector-kubernetes/troubleshoot-k8s-container.html) +* [Troubleshoot the Collector for Kubernetes](https://docs.splunk.com/observability/en/gdi/opentelemetry/collector-kubernetes/k8s-troubleshooting/troubleshoot-k8s-landing.html) +* [Troubleshoot the Collector for Kubernetes containers](https://docs.splunk.com/observability/en/gdi/opentelemetry/troubleshooting.html) -For general troubleshooting of the Splunk Distribution of the OpenTelemetry Collector see [Splunk OpenTelemetry Collector troubleshooting documentation](https://github.com/signalfx/splunk-otel-collector/blob/main/docs/troubleshooting.md](https://docs.splunk.com/observability/en/gdi/opentelemetry/troubleshooting.html). +For general troubleshooting of the Splunk Distribution of the OpenTelemetry Collector see [Splunk OpenTelemetry Collector troubleshooting documentation](https://docs.splunk.com/observability/en/gdi/opentelemetry/troubleshooting.html). -You might review the [OpenTelemetry Collector troubleshooting documentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/troubleshooting.md) as well. ## Gathering Support Information diff --git a/examples/add-filter-processor/rendered_manifests/clusterRole.yaml b/examples/add-filter-processor/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/add-filter-processor/rendered_manifests/clusterRole.yaml +++ b/examples/add-filter-processor/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/add-filter-processor/rendered_manifests/clusterRoleBinding.yaml b/examples/add-filter-processor/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/add-filter-processor/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/add-filter-processor/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/add-filter-processor/rendered_manifests/configmap-agent.yaml b/examples/add-filter-processor/rendered_manifests/configmap-agent.yaml index 0dc0a7e28..6b5d30f9d 100644 --- a/examples/add-filter-processor/rendered_manifests/configmap-agent.yaml +++ b/examples/add-filter-processor/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -43,7 +49,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/exclude_all_telemetry_data_from_namespace: logs: exclude: @@ -296,10 +304,15 @@ data: endpoint: 0.0.0.0:8006 hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -321,6 +334,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -347,7 +361,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: smartagent/coredns: @@ -427,6 +441,16 @@ data: - filelog - fluentforward - otlp + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - signalfx @@ -454,7 +478,7 @@ data: - prometheus/agent traces: exporters: - - sapm + - otlphttp - signalfx processors: - memory_limiter @@ -469,4 +493,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/add-filter-processor/rendered_manifests/configmap-cluster-receiver.yaml b/examples/add-filter-processor/rendered_manifests/configmap-cluster-receiver.yaml index 5267ca292..69e8e29f3 100644 --- a/examples/add-filter-processor/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/add-filter-processor/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -91,7 +91,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -119,4 +119,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/add-filter-processor/rendered_manifests/daemonset.yaml b/examples/add-filter-processor/rendered_manifests/daemonset.yaml index 338ed96ea..5a0c87055 100644 --- a/examples/add-filter-processor/rendered_manifests/daemonset.yaml +++ b/examples/add-filter-processor/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: 6fe8488db7c37cac8ddc2ab6eb259a11e79be1e0b30d2deccd2a8e01610d7d15 + checksum/config: e62f6ee4ef1ba1d8771d4056077ed10afe5d3bc6b4052980b2a7cc9f15a2bcdd kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,14 +41,21 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists initContainers: - name: migrate-checkpoint - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent command: ["/migratecheckpoint"] securityContext: @@ -118,7 +125,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent securityContext: runAsUser: 0 @@ -156,24 +163,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/add-filter-processor/rendered_manifests/deployment-cluster-receiver.yaml b/examples/add-filter-processor/rendered_manifests/deployment-cluster-receiver.yaml index 409e578cd..256cfb004 100644 --- a/examples/add-filter-processor/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/add-filter-processor/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: b3af0a34b5e344447d0ee02fd0477bb8f57cfb689bee60b44dbae23de5e6d7ff + checksum/config: ff5ed57ffd6eaabc34c61f909a53c3ead6cf13539461c69ae410cc955d467a21 spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/add-filter-processor/rendered_manifests/secret-splunk.yaml b/examples/add-filter-processor/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/add-filter-processor/rendered_manifests/secret-splunk.yaml +++ b/examples/add-filter-processor/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/add-filter-processor/rendered_manifests/service-agent.yaml b/examples/add-filter-processor/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..5f693ffc0 --- /dev/null +++ b/examples/add-filter-processor/rendered_manifests/service-agent.yaml @@ -0,0 +1,59 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: fluentforward + port: 8006 + targetPort: fluentforward + protocol: TCP + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/add-filter-processor/rendered_manifests/serviceAccount.yaml b/examples/add-filter-processor/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/add-filter-processor/rendered_manifests/serviceAccount.yaml +++ b/examples/add-filter-processor/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/add-kafkametrics-receiver/rendered_manifests/clusterRole.yaml b/examples/add-kafkametrics-receiver/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/add-kafkametrics-receiver/rendered_manifests/clusterRole.yaml +++ b/examples/add-kafkametrics-receiver/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/add-kafkametrics-receiver/rendered_manifests/clusterRoleBinding.yaml b/examples/add-kafkametrics-receiver/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/add-kafkametrics-receiver/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/add-kafkametrics-receiver/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/add-kafkametrics-receiver/rendered_manifests/configmap-agent.yaml b/examples/add-kafkametrics-receiver/rendered_manifests/configmap-agent.yaml index cdb22caea..4e7af223a 100644 --- a/examples/add-kafkametrics-receiver/rendered_manifests/configmap-agent.yaml +++ b/examples/add-kafkametrics-receiver/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -43,7 +49,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -245,10 +253,15 @@ data: endpoint: 0.0.0.0:8006 hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -277,6 +290,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -303,7 +317,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: smartagent/coredns: @@ -381,6 +395,16 @@ data: - filelog - fluentforward - otlp + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - signalfx @@ -404,7 +428,7 @@ data: - prometheus/agent traces: exporters: - - sapm + - otlphttp - signalfx processors: - memory_limiter @@ -419,4 +443,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/add-kafkametrics-receiver/rendered_manifests/configmap-cluster-receiver.yaml b/examples/add-kafkametrics-receiver/rendered_manifests/configmap-cluster-receiver.yaml index 5267ca292..69e8e29f3 100644 --- a/examples/add-kafkametrics-receiver/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/add-kafkametrics-receiver/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -91,7 +91,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -119,4 +119,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/add-kafkametrics-receiver/rendered_manifests/daemonset.yaml b/examples/add-kafkametrics-receiver/rendered_manifests/daemonset.yaml index 37119bbf4..481f8df2f 100644 --- a/examples/add-kafkametrics-receiver/rendered_manifests/daemonset.yaml +++ b/examples/add-kafkametrics-receiver/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: bfb54db69288c8e9f42aaabc9d594027e4b720724c74e88757e47b9c88f0c4f6 + checksum/config: e8ae8de67a024a79ab31a0ac261fdb0c85fd63b509681242d93917bfc29b8a76 kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,14 +41,21 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists initContainers: - name: migrate-checkpoint - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent command: ["/migratecheckpoint"] securityContext: @@ -118,7 +125,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent securityContext: runAsUser: 0 @@ -156,24 +163,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/add-kafkametrics-receiver/rendered_manifests/deployment-cluster-receiver.yaml b/examples/add-kafkametrics-receiver/rendered_manifests/deployment-cluster-receiver.yaml index 409e578cd..256cfb004 100644 --- a/examples/add-kafkametrics-receiver/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/add-kafkametrics-receiver/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: b3af0a34b5e344447d0ee02fd0477bb8f57cfb689bee60b44dbae23de5e6d7ff + checksum/config: ff5ed57ffd6eaabc34c61f909a53c3ead6cf13539461c69ae410cc955d467a21 spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/add-kafkametrics-receiver/rendered_manifests/secret-splunk.yaml b/examples/add-kafkametrics-receiver/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/add-kafkametrics-receiver/rendered_manifests/secret-splunk.yaml +++ b/examples/add-kafkametrics-receiver/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/add-kafkametrics-receiver/rendered_manifests/service-agent.yaml b/examples/add-kafkametrics-receiver/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..5f693ffc0 --- /dev/null +++ b/examples/add-kafkametrics-receiver/rendered_manifests/service-agent.yaml @@ -0,0 +1,59 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: fluentforward + port: 8006 + targetPort: fluentforward + protocol: TCP + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/add-kafkametrics-receiver/rendered_manifests/serviceAccount.yaml b/examples/add-kafkametrics-receiver/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/add-kafkametrics-receiver/rendered_manifests/serviceAccount.yaml +++ b/examples/add-kafkametrics-receiver/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/add-receiver-creator/rendered_manifests/clusterRole.yaml b/examples/add-receiver-creator/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/add-receiver-creator/rendered_manifests/clusterRole.yaml +++ b/examples/add-receiver-creator/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/add-receiver-creator/rendered_manifests/clusterRoleBinding.yaml b/examples/add-receiver-creator/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/add-receiver-creator/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/add-receiver-creator/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/add-receiver-creator/rendered_manifests/configmap-agent.yaml b/examples/add-receiver-creator/rendered_manifests/configmap-agent.yaml index 3053ec2f6..a4d42d6f7 100644 --- a/examples/add-receiver-creator/rendered_manifests/configmap-agent.yaml +++ b/examples/add-receiver-creator/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -35,7 +41,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -127,10 +135,15 @@ data: receivers: hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -152,6 +165,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -178,7 +192,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: postgresql: @@ -255,6 +269,16 @@ data: - k8s_observer - zpages pipelines: + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - signalfx @@ -282,7 +306,7 @@ data: - prometheus/agent traces: exporters: - - sapm + - otlphttp - signalfx processors: - memory_limiter @@ -297,4 +321,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/add-receiver-creator/rendered_manifests/configmap-cluster-receiver.yaml b/examples/add-receiver-creator/rendered_manifests/configmap-cluster-receiver.yaml index 5267ca292..69e8e29f3 100644 --- a/examples/add-receiver-creator/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/add-receiver-creator/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -91,7 +91,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -119,4 +119,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/add-receiver-creator/rendered_manifests/daemonset.yaml b/examples/add-receiver-creator/rendered_manifests/daemonset.yaml index a024d1d4a..3b4085d2e 100644 --- a/examples/add-receiver-creator/rendered_manifests/daemonset.yaml +++ b/examples/add-receiver-creator/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: 7ceb9c933d99ac7fc06865b198dfbb1639b38db9cc1f22c0eb5cb56b9296b308 + checksum/config: 46b8ba55ad9b9c54254be9211b0396df3cd0d3a71dbc20fb3ba8893b6318ad88 kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,11 +41,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists containers: - name: otel-collector command: @@ -79,7 +86,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB @@ -115,24 +122,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/add-receiver-creator/rendered_manifests/deployment-cluster-receiver.yaml b/examples/add-receiver-creator/rendered_manifests/deployment-cluster-receiver.yaml index 409e578cd..256cfb004 100644 --- a/examples/add-receiver-creator/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/add-receiver-creator/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: b3af0a34b5e344447d0ee02fd0477bb8f57cfb689bee60b44dbae23de5e6d7ff + checksum/config: ff5ed57ffd6eaabc34c61f909a53c3ead6cf13539461c69ae410cc955d467a21 spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/add-receiver-creator/rendered_manifests/secret-splunk.yaml b/examples/add-receiver-creator/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/add-receiver-creator/rendered_manifests/secret-splunk.yaml +++ b/examples/add-receiver-creator/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/add-receiver-creator/rendered_manifests/service-agent.yaml b/examples/add-receiver-creator/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..95c885c81 --- /dev/null +++ b/examples/add-receiver-creator/rendered_manifests/service-agent.yaml @@ -0,0 +1,55 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/add-receiver-creator/rendered_manifests/serviceAccount.yaml b/examples/add-receiver-creator/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/add-receiver-creator/rendered_manifests/serviceAccount.yaml +++ b/examples/add-receiver-creator/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/add-sampler/rendered_manifests/clusterRole.yaml b/examples/add-sampler/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/add-sampler/rendered_manifests/clusterRole.yaml +++ b/examples/add-sampler/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/add-sampler/rendered_manifests/clusterRoleBinding.yaml b/examples/add-sampler/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/add-sampler/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/add-sampler/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/add-sampler/rendered_manifests/configmap-agent.yaml b/examples/add-sampler/rendered_manifests/configmap-agent.yaml index 148cab539..94249f154 100644 --- a/examples/add-sampler/rendered_manifests/configmap-agent.yaml +++ b/examples/add-sampler/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -35,7 +41,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -130,10 +138,15 @@ data: receivers: hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -155,6 +168,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -181,7 +195,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: smartagent/coredns: @@ -243,6 +257,16 @@ data: - k8s_observer - zpages pipelines: + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - signalfx @@ -270,7 +294,7 @@ data: - prometheus/agent traces: exporters: - - sapm + - otlphttp - signalfx processors: - memory_limiter @@ -286,4 +310,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/add-sampler/rendered_manifests/configmap-cluster-receiver.yaml b/examples/add-sampler/rendered_manifests/configmap-cluster-receiver.yaml index 5267ca292..69e8e29f3 100644 --- a/examples/add-sampler/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/add-sampler/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -91,7 +91,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -119,4 +119,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/add-sampler/rendered_manifests/daemonset.yaml b/examples/add-sampler/rendered_manifests/daemonset.yaml index 21e78ba27..5e21ce77b 100644 --- a/examples/add-sampler/rendered_manifests/daemonset.yaml +++ b/examples/add-sampler/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: 01e45f5e1ba9251c0476e935360970e28b07321eb860fea41723ec8a3afc5064 + checksum/config: 4a6276e318600d9ea3e76c91b6670f201c626ba375a03af94eac9d8efcf71cdb kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,11 +41,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists containers: - name: otel-collector command: @@ -79,7 +86,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB @@ -115,24 +122,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/add-sampler/rendered_manifests/deployment-cluster-receiver.yaml b/examples/add-sampler/rendered_manifests/deployment-cluster-receiver.yaml index 409e578cd..256cfb004 100644 --- a/examples/add-sampler/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/add-sampler/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: b3af0a34b5e344447d0ee02fd0477bb8f57cfb689bee60b44dbae23de5e6d7ff + checksum/config: ff5ed57ffd6eaabc34c61f909a53c3ead6cf13539461c69ae410cc955d467a21 spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/add-sampler/rendered_manifests/secret-splunk.yaml b/examples/add-sampler/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/add-sampler/rendered_manifests/secret-splunk.yaml +++ b/examples/add-sampler/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/add-sampler/rendered_manifests/service-agent.yaml b/examples/add-sampler/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..95c885c81 --- /dev/null +++ b/examples/add-sampler/rendered_manifests/service-agent.yaml @@ -0,0 +1,55 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/add-sampler/rendered_manifests/serviceAccount.yaml b/examples/add-sampler/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/add-sampler/rendered_manifests/serviceAccount.yaml +++ b/examples/add-sampler/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/autodetect-istio/rendered_manifests/clusterRole.yaml b/examples/autodetect-istio/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/autodetect-istio/rendered_manifests/clusterRole.yaml +++ b/examples/autodetect-istio/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/autodetect-istio/rendered_manifests/clusterRoleBinding.yaml b/examples/autodetect-istio/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/autodetect-istio/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/autodetect-istio/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/autodetect-istio/rendered_manifests/configmap-agent.yaml b/examples/autodetect-istio/rendered_manifests/configmap-agent.yaml index 3ed09aaea..f66f46e4e 100644 --- a/examples/autodetect-istio/rendered_manifests/configmap-agent.yaml +++ b/examples/autodetect-istio/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -61,7 +67,9 @@ data: match_type: regexp metric_names: - istio_.* - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -280,10 +288,15 @@ data: endpoint: 0.0.0.0:8006 hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -305,6 +318,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -331,7 +345,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: prometheus_simple: @@ -418,6 +432,16 @@ data: - filelog - fluentforward - otlp + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - signalfx @@ -446,7 +470,7 @@ data: - prometheus/agent traces: exporters: - - sapm + - otlphttp - signalfx processors: - memory_limiter @@ -461,4 +485,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/autodetect-istio/rendered_manifests/configmap-cluster-receiver.yaml b/examples/autodetect-istio/rendered_manifests/configmap-cluster-receiver.yaml index 5267ca292..69e8e29f3 100644 --- a/examples/autodetect-istio/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/autodetect-istio/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -91,7 +91,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -119,4 +119,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/autodetect-istio/rendered_manifests/daemonset.yaml b/examples/autodetect-istio/rendered_manifests/daemonset.yaml index 84b57b3be..646b0deb1 100644 --- a/examples/autodetect-istio/rendered_manifests/daemonset.yaml +++ b/examples/autodetect-istio/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: 95d77cfd6fd31e9c356984ef94500390b2963353462ec2df06051a78a32d9918 + checksum/config: c22db802040791e7ed72c236060ae18ce39720b23c968d35bd6d8aac1fb07295 kubectl.kubernetes.io/default-container: otel-collector sidecar.istio.io/inject: "false" spec: @@ -42,14 +42,21 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists initContainers: - name: migrate-checkpoint - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent command: ["/migratecheckpoint"] securityContext: @@ -119,7 +126,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent securityContext: runAsUser: 0 @@ -157,24 +164,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/autodetect-istio/rendered_manifests/deployment-cluster-receiver.yaml b/examples/autodetect-istio/rendered_manifests/deployment-cluster-receiver.yaml index 2494efc01..d7d8f4d85 100644 --- a/examples/autodetect-istio/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/autodetect-istio/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: b3af0a34b5e344447d0ee02fd0477bb8f57cfb689bee60b44dbae23de5e6d7ff + checksum/config: ff5ed57ffd6eaabc34c61f909a53c3ead6cf13539461c69ae410cc955d467a21 sidecar.istio.io/inject: "false" spec: serviceAccountName: default-splunk-otel-collector @@ -42,7 +42,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/autodetect-istio/rendered_manifests/secret-splunk.yaml b/examples/autodetect-istio/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/autodetect-istio/rendered_manifests/secret-splunk.yaml +++ b/examples/autodetect-istio/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/autodetect-istio/rendered_manifests/service-agent.yaml b/examples/autodetect-istio/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..5f693ffc0 --- /dev/null +++ b/examples/autodetect-istio/rendered_manifests/service-agent.yaml @@ -0,0 +1,59 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: fluentforward + port: 8006 + targetPort: fluentforward + protocol: TCP + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/autodetect-istio/rendered_manifests/serviceAccount.yaml b/examples/autodetect-istio/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/autodetect-istio/rendered_manifests/serviceAccount.yaml +++ b/examples/autodetect-istio/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/collector-agent-only/rendered_manifests/clusterRole.yaml b/examples/collector-agent-only/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/collector-agent-only/rendered_manifests/clusterRole.yaml +++ b/examples/collector-agent-only/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/collector-agent-only/rendered_manifests/clusterRoleBinding.yaml b/examples/collector-agent-only/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/collector-agent-only/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/collector-agent-only/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/collector-agent-only/rendered_manifests/configmap-agent.yaml b/examples/collector-agent-only/rendered_manifests/configmap-agent.yaml index 741292002..5ae71d8b5 100644 --- a/examples/collector-agent-only/rendered_manifests/configmap-agent.yaml +++ b/examples/collector-agent-only/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -35,7 +41,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -127,10 +135,15 @@ data: receivers: hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -152,6 +165,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -178,7 +192,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: smartagent/coredns: @@ -240,6 +254,16 @@ data: - k8s_observer - zpages pipelines: + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - signalfx @@ -267,7 +291,7 @@ data: - prometheus/agent traces: exporters: - - sapm + - otlphttp - signalfx processors: - memory_limiter @@ -282,4 +306,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/collector-agent-only/rendered_manifests/daemonset.yaml b/examples/collector-agent-only/rendered_manifests/daemonset.yaml index db7b3d22b..d54237eba 100644 --- a/examples/collector-agent-only/rendered_manifests/daemonset.yaml +++ b/examples/collector-agent-only/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: 60aa06bfcb080c769f77047ef5c7d14232089748eaa54188d6d37144b3d00e75 + checksum/config: 1d6330c88154a12a7ef72f47ff1fe38ad8ab08eb47f5116a2b16c13f735ade30 kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,11 +41,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists containers: - name: otel-collector command: @@ -79,7 +86,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB @@ -115,24 +122,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/collector-agent-only/rendered_manifests/secret-splunk.yaml b/examples/collector-agent-only/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/collector-agent-only/rendered_manifests/secret-splunk.yaml +++ b/examples/collector-agent-only/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/collector-agent-only/rendered_manifests/service-agent.yaml b/examples/collector-agent-only/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..95c885c81 --- /dev/null +++ b/examples/collector-agent-only/rendered_manifests/service-agent.yaml @@ -0,0 +1,55 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/collector-agent-only/rendered_manifests/serviceAccount.yaml b/examples/collector-agent-only/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/collector-agent-only/rendered_manifests/serviceAccount.yaml +++ b/examples/collector-agent-only/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/collector-all-modes/rendered_manifests/clusterRole.yaml b/examples/collector-all-modes/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/collector-all-modes/rendered_manifests/clusterRole.yaml +++ b/examples/collector-all-modes/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/collector-all-modes/rendered_manifests/clusterRoleBinding.yaml b/examples/collector-all-modes/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/collector-all-modes/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/collector-all-modes/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/collector-all-modes/rendered_manifests/configmap-agent.yaml b/examples/collector-all-modes/rendered_manifests/configmap-agent.yaml index 35a33720e..a564047d4 100644 --- a/examples/collector-all-modes/rendered_manifests/configmap-agent.yaml +++ b/examples/collector-all-modes/rendered_manifests/configmap-agent.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -22,6 +22,10 @@ data: endpoint: default-splunk-otel-collector:4317 tls: insecure: true + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: http://default-splunk-otel-collector:6060 @@ -36,7 +40,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token k8sattributes: extract: annotations: @@ -113,10 +119,15 @@ data: receivers: hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -138,6 +149,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -164,7 +176,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: smartagent/coredns: @@ -226,6 +238,16 @@ data: - k8s_observer - zpages pipelines: + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - otlp @@ -268,4 +290,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/collector-all-modes/rendered_manifests/configmap-cluster-receiver.yaml b/examples/collector-all-modes/rendered_manifests/configmap-cluster-receiver.yaml index 5267ca292..69e8e29f3 100644 --- a/examples/collector-all-modes/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/collector-all-modes/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -91,7 +91,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -119,4 +119,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/collector-all-modes/rendered_manifests/configmap-gateway.yaml b/examples/collector-all-modes/rendered_manifests/configmap-gateway.yaml index 631a4d100..82551808e 100644 --- a/examples/collector-all-modes/rendered_manifests/configmap-gateway.yaml +++ b/examples/collector-all-modes/rendered_manifests/configmap-gateway.yaml @@ -7,22 +7,24 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp sending_queue: num_consumers: 32 + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -37,7 +39,9 @@ data: endpoint: https://api.CHANGEME.signalfx.com zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -157,7 +161,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 signalfx: access_token_passthrough: true endpoint: 0.0.0.0:9943 @@ -200,7 +204,7 @@ data: - prometheus/collector traces: exporters: - - sapm + - otlphttp processors: - memory_limiter - k8sattributes @@ -212,4 +216,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/collector-all-modes/rendered_manifests/daemonset.yaml b/examples/collector-all-modes/rendered_manifests/daemonset.yaml index ef91ae507..10f0384b7 100644 --- a/examples/collector-all-modes/rendered_manifests/daemonset.yaml +++ b/examples/collector-all-modes/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: 6550d2eec588231adb9c7fd5cf870cf0b3df06cd6047104a59cbdda87ded9abf + checksum/config: aa6f2aba1c3d388e69538aef580c1ab673bd1bbc263a1ecccb99a7cd76c2e25d kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,11 +41,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists containers: - name: otel-collector command: @@ -79,7 +86,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB @@ -115,24 +122,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/collector-all-modes/rendered_manifests/deployment-cluster-receiver.yaml b/examples/collector-all-modes/rendered_manifests/deployment-cluster-receiver.yaml index 409e578cd..256cfb004 100644 --- a/examples/collector-all-modes/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/collector-all-modes/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: b3af0a34b5e344447d0ee02fd0477bb8f57cfb689bee60b44dbae23de5e6d7ff + checksum/config: ff5ed57ffd6eaabc34c61f909a53c3ead6cf13539461c69ae410cc955d467a21 spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/collector-all-modes/rendered_manifests/deployment-gateway.yaml b/examples/collector-all-modes/rendered_manifests/deployment-gateway.yaml index 340315fb9..2462e8718 100644 --- a/examples/collector-all-modes/rendered_manifests/deployment-gateway.yaml +++ b/examples/collector-all-modes/rendered_manifests/deployment-gateway.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-collector @@ -31,7 +31,7 @@ spec: component: otel-collector release: default annotations: - checksum/config: 423f15849c8a40434fe5f46f3998ce1bb49dfc33bc797125966abdc2850c2937 + checksum/config: 9a18e1abec41b07245232e29f2462870e5bda91ddb7df7775e434c1d393f5c7f spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/collector-all-modes/rendered_manifests/secret-splunk.yaml b/examples/collector-all-modes/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/collector-all-modes/rendered_manifests/secret-splunk.yaml +++ b/examples/collector-all-modes/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/collector-all-modes/rendered_manifests/service-agent.yaml b/examples/collector-all-modes/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..95c885c81 --- /dev/null +++ b/examples/collector-all-modes/rendered_manifests/service-agent.yaml @@ -0,0 +1,55 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/collector-all-modes/rendered_manifests/service.yaml b/examples/collector-all-modes/rendered_manifests/service.yaml index 0b1743e48..dc9a8aa72 100644 --- a/examples/collector-all-modes/rendered_manifests/service.yaml +++ b/examples/collector-all-modes/rendered_manifests/service.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-collector diff --git a/examples/collector-all-modes/rendered_manifests/serviceAccount.yaml b/examples/collector-all-modes/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/collector-all-modes/rendered_manifests/serviceAccount.yaml +++ b/examples/collector-all-modes/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/collector-cluster-receiver-only/rendered_manifests/clusterRole.yaml b/examples/collector-cluster-receiver-only/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/collector-cluster-receiver-only/rendered_manifests/clusterRole.yaml +++ b/examples/collector-cluster-receiver-only/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/collector-cluster-receiver-only/rendered_manifests/clusterRoleBinding.yaml b/examples/collector-cluster-receiver-only/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/collector-cluster-receiver-only/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/collector-cluster-receiver-only/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/collector-cluster-receiver-only/rendered_manifests/configmap-cluster-receiver.yaml b/examples/collector-cluster-receiver-only/rendered_manifests/configmap-cluster-receiver.yaml index fd5b00ff7..f6b9c8b7b 100644 --- a/examples/collector-cluster-receiver-only/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/collector-cluster-receiver-only/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -36,6 +36,23 @@ data: processors: batch: send_batch_max_size: 32768 + k8sattributes/clusterReceiver: + extract: + metadata: + - k8s.namespace.name + - k8s.node.name + - k8s.pod.name + - k8s.pod.uid + - container.id + - container.image.name + - container.image.tag + pod_association: + - sources: + - from: resource_attribute + name: k8s.namespace.name + - sources: + - from: resource_attribute + name: k8s.node.name memory_limiter: check_interval: 2s limit_mib: ${SPLUNK_MEMORY_LIMIT_MIB} @@ -116,7 +133,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -130,6 +147,7 @@ data: - resourcedetection - resource - transform/add_sourcetype + - k8sattributes/clusterReceiver receivers: - k8sobjects metrics: @@ -155,4 +173,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/collector-cluster-receiver-only/rendered_manifests/deployment-cluster-receiver.yaml b/examples/collector-cluster-receiver-only/rendered_manifests/deployment-cluster-receiver.yaml index ebf02e93d..b2b1a4562 100644 --- a/examples/collector-cluster-receiver-only/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/collector-cluster-receiver-only/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: 1f1c7346c8d9e4b47e9aacf0e6b20477dcc0db2f7803b5d2db978eef5165f619 + checksum/config: 16eb789f621f6f691683ae1d9872e6221975434b8cafa68b14aa399c3d07f46e spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/collector-cluster-receiver-only/rendered_manifests/secret-splunk.yaml b/examples/collector-cluster-receiver-only/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/collector-cluster-receiver-only/rendered_manifests/secret-splunk.yaml +++ b/examples/collector-cluster-receiver-only/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/collector-cluster-receiver-only/rendered_manifests/serviceAccount.yaml b/examples/collector-cluster-receiver-only/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/collector-cluster-receiver-only/rendered_manifests/serviceAccount.yaml +++ b/examples/collector-cluster-receiver-only/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/collector-gateway-only/rendered_manifests/clusterRole.yaml b/examples/collector-gateway-only/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/collector-gateway-only/rendered_manifests/clusterRole.yaml +++ b/examples/collector-gateway-only/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/collector-gateway-only/rendered_manifests/clusterRoleBinding.yaml b/examples/collector-gateway-only/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/collector-gateway-only/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/collector-gateway-only/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/collector-gateway-only/rendered_manifests/configmap-gateway.yaml b/examples/collector-gateway-only/rendered_manifests/configmap-gateway.yaml index 631a4d100..82551808e 100644 --- a/examples/collector-gateway-only/rendered_manifests/configmap-gateway.yaml +++ b/examples/collector-gateway-only/rendered_manifests/configmap-gateway.yaml @@ -7,22 +7,24 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp sending_queue: num_consumers: 32 + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -37,7 +39,9 @@ data: endpoint: https://api.CHANGEME.signalfx.com zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -157,7 +161,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 signalfx: access_token_passthrough: true endpoint: 0.0.0.0:9943 @@ -200,7 +204,7 @@ data: - prometheus/collector traces: exporters: - - sapm + - otlphttp processors: - memory_limiter - k8sattributes @@ -212,4 +216,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/collector-gateway-only/rendered_manifests/deployment-gateway.yaml b/examples/collector-gateway-only/rendered_manifests/deployment-gateway.yaml index 36bf64fa2..218ddf2e4 100644 --- a/examples/collector-gateway-only/rendered_manifests/deployment-gateway.yaml +++ b/examples/collector-gateway-only/rendered_manifests/deployment-gateway.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-collector @@ -31,7 +31,7 @@ spec: component: otel-collector release: default annotations: - checksum/config: 423f15849c8a40434fe5f46f3998ce1bb49dfc33bc797125966abdc2850c2937 + checksum/config: 9a18e1abec41b07245232e29f2462870e5bda91ddb7df7775e434c1d393f5c7f spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/collector-gateway-only/rendered_manifests/secret-splunk.yaml b/examples/collector-gateway-only/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/collector-gateway-only/rendered_manifests/secret-splunk.yaml +++ b/examples/collector-gateway-only/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/collector-gateway-only/rendered_manifests/service.yaml b/examples/collector-gateway-only/rendered_manifests/service.yaml index 0b1743e48..dc9a8aa72 100644 --- a/examples/collector-gateway-only/rendered_manifests/service.yaml +++ b/examples/collector-gateway-only/rendered_manifests/service.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-collector diff --git a/examples/collector-gateway-only/rendered_manifests/serviceAccount.yaml b/examples/collector-gateway-only/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/collector-gateway-only/rendered_manifests/serviceAccount.yaml +++ b/examples/collector-gateway-only/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/controlplane-histogram-metrics/rendered_manifests/clusterRole.yaml b/examples/controlplane-histogram-metrics/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/controlplane-histogram-metrics/rendered_manifests/clusterRole.yaml +++ b/examples/controlplane-histogram-metrics/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/controlplane-histogram-metrics/rendered_manifests/clusterRoleBinding.yaml b/examples/controlplane-histogram-metrics/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/controlplane-histogram-metrics/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/controlplane-histogram-metrics/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/controlplane-histogram-metrics/rendered_manifests/configmap-agent.yaml b/examples/controlplane-histogram-metrics/rendered_manifests/configmap-agent.yaml index d42301cc2..30b28594b 100644 --- a/examples/controlplane-histogram-metrics/rendered_manifests/configmap-agent.yaml +++ b/examples/controlplane-histogram-metrics/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -40,7 +46,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -132,10 +140,15 @@ data: receivers: hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -157,6 +170,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -183,7 +197,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: prometheus/coredns: @@ -270,6 +284,16 @@ data: - k8s_observer - zpages pipelines: + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - signalfx @@ -307,7 +331,7 @@ data: - receiver_creator traces: exporters: - - sapm + - otlphttp - signalfx processors: - memory_limiter @@ -322,4 +346,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/controlplane-histogram-metrics/rendered_manifests/configmap-cluster-receiver.yaml b/examples/controlplane-histogram-metrics/rendered_manifests/configmap-cluster-receiver.yaml index 5267ca292..69e8e29f3 100644 --- a/examples/controlplane-histogram-metrics/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/controlplane-histogram-metrics/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -91,7 +91,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -119,4 +119,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/controlplane-histogram-metrics/rendered_manifests/daemonset.yaml b/examples/controlplane-histogram-metrics/rendered_manifests/daemonset.yaml index b7bb0e70e..6b2c75d5b 100644 --- a/examples/controlplane-histogram-metrics/rendered_manifests/daemonset.yaml +++ b/examples/controlplane-histogram-metrics/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: 3d3238906a6339f6ddaa693ce1e5755ed3fac22e10651706d18e061a300ce60e + checksum/config: 71ea8369e9e1803325fc72bbcfcf4a28339570a0c421c74677a752722f8a5cb6 kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,11 +41,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists containers: - name: otel-collector command: @@ -79,7 +86,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB @@ -115,24 +122,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/controlplane-histogram-metrics/rendered_manifests/deployment-cluster-receiver.yaml b/examples/controlplane-histogram-metrics/rendered_manifests/deployment-cluster-receiver.yaml index 409e578cd..256cfb004 100644 --- a/examples/controlplane-histogram-metrics/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/controlplane-histogram-metrics/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: b3af0a34b5e344447d0ee02fd0477bb8f57cfb689bee60b44dbae23de5e6d7ff + checksum/config: ff5ed57ffd6eaabc34c61f909a53c3ead6cf13539461c69ae410cc955d467a21 spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/controlplane-histogram-metrics/rendered_manifests/secret-splunk.yaml b/examples/controlplane-histogram-metrics/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/controlplane-histogram-metrics/rendered_manifests/secret-splunk.yaml +++ b/examples/controlplane-histogram-metrics/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/controlplane-histogram-metrics/rendered_manifests/service-agent.yaml b/examples/controlplane-histogram-metrics/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..95c885c81 --- /dev/null +++ b/examples/controlplane-histogram-metrics/rendered_manifests/service-agent.yaml @@ -0,0 +1,55 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/controlplane-histogram-metrics/rendered_manifests/serviceAccount.yaml b/examples/controlplane-histogram-metrics/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/controlplane-histogram-metrics/rendered_manifests/serviceAccount.yaml +++ b/examples/controlplane-histogram-metrics/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/crio-logging/rendered_manifests/clusterRole.yaml b/examples/crio-logging/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/crio-logging/rendered_manifests/clusterRole.yaml +++ b/examples/crio-logging/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/crio-logging/rendered_manifests/clusterRoleBinding.yaml b/examples/crio-logging/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/crio-logging/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/crio-logging/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/crio-logging/rendered_manifests/configmap-agent.yaml b/examples/crio-logging/rendered_manifests/configmap-agent.yaml index 741292002..5ae71d8b5 100644 --- a/examples/crio-logging/rendered_manifests/configmap-agent.yaml +++ b/examples/crio-logging/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -35,7 +41,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -127,10 +135,15 @@ data: receivers: hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -152,6 +165,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -178,7 +192,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: smartagent/coredns: @@ -240,6 +254,16 @@ data: - k8s_observer - zpages pipelines: + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - signalfx @@ -267,7 +291,7 @@ data: - prometheus/agent traces: exporters: - - sapm + - otlphttp - signalfx processors: - memory_limiter @@ -282,4 +306,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/crio-logging/rendered_manifests/configmap-cluster-receiver.yaml b/examples/crio-logging/rendered_manifests/configmap-cluster-receiver.yaml index 5267ca292..69e8e29f3 100644 --- a/examples/crio-logging/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/crio-logging/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -91,7 +91,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -119,4 +119,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/crio-logging/rendered_manifests/daemonset.yaml b/examples/crio-logging/rendered_manifests/daemonset.yaml index db7b3d22b..d54237eba 100644 --- a/examples/crio-logging/rendered_manifests/daemonset.yaml +++ b/examples/crio-logging/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: 60aa06bfcb080c769f77047ef5c7d14232089748eaa54188d6d37144b3d00e75 + checksum/config: 1d6330c88154a12a7ef72f47ff1fe38ad8ab08eb47f5116a2b16c13f735ade30 kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,11 +41,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists containers: - name: otel-collector command: @@ -79,7 +86,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB @@ -115,24 +122,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/crio-logging/rendered_manifests/deployment-cluster-receiver.yaml b/examples/crio-logging/rendered_manifests/deployment-cluster-receiver.yaml index 409e578cd..256cfb004 100644 --- a/examples/crio-logging/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/crio-logging/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: b3af0a34b5e344447d0ee02fd0477bb8f57cfb689bee60b44dbae23de5e6d7ff + checksum/config: ff5ed57ffd6eaabc34c61f909a53c3ead6cf13539461c69ae410cc955d467a21 spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/crio-logging/rendered_manifests/secret-splunk.yaml b/examples/crio-logging/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/crio-logging/rendered_manifests/secret-splunk.yaml +++ b/examples/crio-logging/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/crio-logging/rendered_manifests/service-agent.yaml b/examples/crio-logging/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..95c885c81 --- /dev/null +++ b/examples/crio-logging/rendered_manifests/service-agent.yaml @@ -0,0 +1,55 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/crio-logging/rendered_manifests/serviceAccount.yaml b/examples/crio-logging/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/crio-logging/rendered_manifests/serviceAccount.yaml +++ b/examples/crio-logging/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/default/rendered_manifests/clusterRole.yaml b/examples/default/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/default/rendered_manifests/clusterRole.yaml +++ b/examples/default/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/default/rendered_manifests/clusterRoleBinding.yaml b/examples/default/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/default/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/default/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/default/rendered_manifests/configmap-agent.yaml b/examples/default/rendered_manifests/configmap-agent.yaml index 741292002..5ae71d8b5 100644 --- a/examples/default/rendered_manifests/configmap-agent.yaml +++ b/examples/default/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -35,7 +41,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -127,10 +135,15 @@ data: receivers: hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -152,6 +165,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -178,7 +192,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: smartagent/coredns: @@ -240,6 +254,16 @@ data: - k8s_observer - zpages pipelines: + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - signalfx @@ -267,7 +291,7 @@ data: - prometheus/agent traces: exporters: - - sapm + - otlphttp - signalfx processors: - memory_limiter @@ -282,4 +306,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/default/rendered_manifests/configmap-cluster-receiver.yaml b/examples/default/rendered_manifests/configmap-cluster-receiver.yaml index 5267ca292..69e8e29f3 100644 --- a/examples/default/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/default/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -91,7 +91,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -119,4 +119,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/default/rendered_manifests/daemonset.yaml b/examples/default/rendered_manifests/daemonset.yaml index db7b3d22b..d54237eba 100644 --- a/examples/default/rendered_manifests/daemonset.yaml +++ b/examples/default/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: 60aa06bfcb080c769f77047ef5c7d14232089748eaa54188d6d37144b3d00e75 + checksum/config: 1d6330c88154a12a7ef72f47ff1fe38ad8ab08eb47f5116a2b16c13f735ade30 kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,11 +41,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists containers: - name: otel-collector command: @@ -79,7 +86,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB @@ -115,24 +122,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/default/rendered_manifests/deployment-cluster-receiver.yaml b/examples/default/rendered_manifests/deployment-cluster-receiver.yaml index 409e578cd..256cfb004 100644 --- a/examples/default/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/default/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: b3af0a34b5e344447d0ee02fd0477bb8f57cfb689bee60b44dbae23de5e6d7ff + checksum/config: ff5ed57ffd6eaabc34c61f909a53c3ead6cf13539461c69ae410cc955d467a21 spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/default/rendered_manifests/secret-splunk.yaml b/examples/default/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/default/rendered_manifests/secret-splunk.yaml +++ b/examples/default/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/default/rendered_manifests/service-agent.yaml b/examples/default/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..95c885c81 --- /dev/null +++ b/examples/default/rendered_manifests/service-agent.yaml @@ -0,0 +1,55 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/default/rendered_manifests/serviceAccount.yaml b/examples/default/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/default/rendered_manifests/serviceAccount.yaml +++ b/examples/default/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/disable-persistence-queue-traces/rendered_manifests/clusterRole.yaml b/examples/disable-persistence-queue-traces/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/disable-persistence-queue-traces/rendered_manifests/clusterRole.yaml +++ b/examples/disable-persistence-queue-traces/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/disable-persistence-queue-traces/rendered_manifests/clusterRoleBinding.yaml b/examples/disable-persistence-queue-traces/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/disable-persistence-queue-traces/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/disable-persistence-queue-traces/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/disable-persistence-queue-traces/rendered_manifests/configmap-agent.yaml b/examples/disable-persistence-queue-traces/rendered_manifests/configmap-agent.yaml index a03fe6ac5..3cd681076 100644 --- a/examples/disable-persistence-queue-traces/rendered_manifests/configmap-agent.yaml +++ b/examples/disable-persistence-queue-traces/rendered_manifests/configmap-agent.yaml @@ -7,17 +7,21 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest..signalfx.com/v3/event splunk_hec/platform_logs: disable_compression: true endpoint: CHANGEME @@ -38,7 +42,7 @@ data: storage: file_storage/persistent_queue source: kubernetes splunk_app_name: splunk-otel-collector - splunk_app_version: 0.105.5 + splunk_app_version: 0.113.0 timeout: 10s tls: insecure_skip_verify: false @@ -62,7 +66,7 @@ data: storage: file_storage/persistent_queue source: kubernetes splunk_app_name: splunk-otel-collector - splunk_app_version: 0.105.5 + splunk_app_version: 0.113.0 timeout: 10s tls: insecure_skip_verify: false @@ -86,7 +90,7 @@ data: storage: null source: kubernetes splunk_app_name: splunk-otel-collector - splunk_app_version: 0.105.5 + splunk_app_version: 0.113.0 timeout: 10s tls: insecure_skip_verify: false @@ -104,7 +108,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -336,10 +342,15 @@ data: endpoint: 0.0.0.0:8006 hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -361,6 +372,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -387,7 +399,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: smartagent/coredns: @@ -463,6 +475,16 @@ data: - filelog - fluentforward - otlp + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - splunk_hec/platform_metrics @@ -505,4 +527,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/disable-persistence-queue-traces/rendered_manifests/configmap-cluster-receiver.yaml b/examples/disable-persistence-queue-traces/rendered_manifests/configmap-cluster-receiver.yaml index c6a663b85..08ca70f27 100644 --- a/examples/disable-persistence-queue-traces/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/disable-persistence-queue-traces/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -36,7 +36,7 @@ data: queue_size: 1000 source: kubernetes splunk_app_name: splunk-otel-collector - splunk_app_version: 0.105.5 + splunk_app_version: 0.113.0 timeout: 10s tls: insecure_skip_verify: false @@ -136,7 +136,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -166,4 +166,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/disable-persistence-queue-traces/rendered_manifests/daemonset.yaml b/examples/disable-persistence-queue-traces/rendered_manifests/daemonset.yaml index 99c291bcd..b3f2fbb34 100644 --- a/examples/disable-persistence-queue-traces/rendered_manifests/daemonset.yaml +++ b/examples/disable-persistence-queue-traces/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: 475f8c97681a13e8e30986fdc3baa71eee05a2f3e066c29e0777080a9a8a87c0 + checksum/config: 8f13ff66652ba33fe2a9aee4c1a2e43517bfe12d57093be5d675ef515f537d78 kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,14 +41,21 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists initContainers: - name: migrate-checkpoint - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent command: ["/migratecheckpoint"] securityContext: @@ -118,7 +125,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent securityContext: runAsUser: 0 @@ -156,24 +163,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_platform_hec_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/disable-persistence-queue-traces/rendered_manifests/deployment-cluster-receiver.yaml b/examples/disable-persistence-queue-traces/rendered_manifests/deployment-cluster-receiver.yaml index fe946f9eb..a39fcbcba 100644 --- a/examples/disable-persistence-queue-traces/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/disable-persistence-queue-traces/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: c238a58b0c8c81f81b1497dcff8d795a79788d35ee239ffafb7ad4f58dc6b09e + checksum/config: 31057c8fa63ede3a8e48bce75a40865619cdd244a7cc16ade3471013072fb603 spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/disable-persistence-queue-traces/rendered_manifests/secret-splunk.yaml b/examples/disable-persistence-queue-traces/rendered_manifests/secret-splunk.yaml index 9351af226..145b7e0b7 100644 --- a/examples/disable-persistence-queue-traces/rendered_manifests/secret-splunk.yaml +++ b/examples/disable-persistence-queue-traces/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/disable-persistence-queue-traces/rendered_manifests/service-agent.yaml b/examples/disable-persistence-queue-traces/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..5f693ffc0 --- /dev/null +++ b/examples/disable-persistence-queue-traces/rendered_manifests/service-agent.yaml @@ -0,0 +1,59 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: fluentforward + port: 8006 + targetPort: fluentforward + protocol: TCP + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/disable-persistence-queue-traces/rendered_manifests/serviceAccount.yaml b/examples/disable-persistence-queue-traces/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/disable-persistence-queue-traces/rendered_manifests/serviceAccount.yaml +++ b/examples/disable-persistence-queue-traces/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/discovery-mode/rendered_manifests/clusterRole.yaml b/examples/discovery-mode/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/discovery-mode/rendered_manifests/clusterRole.yaml +++ b/examples/discovery-mode/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/discovery-mode/rendered_manifests/clusterRoleBinding.yaml b/examples/discovery-mode/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/discovery-mode/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/discovery-mode/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/discovery-mode/rendered_manifests/configmap-agent.yaml b/examples/discovery-mode/rendered_manifests/configmap-agent.yaml index ace095d83..118fa48d3 100644 --- a/examples/discovery-mode/rendered_manifests/configmap-agent.yaml +++ b/examples/discovery-mode/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -35,7 +41,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -127,10 +135,15 @@ data: receivers: hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -152,6 +165,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -178,7 +192,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: smartagent/coredns: @@ -240,6 +254,16 @@ data: - k8s_observer - zpages pipelines: + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - signalfx @@ -267,7 +291,7 @@ data: - prometheus/agent traces: exporters: - - sapm + - otlphttp - signalfx processors: - memory_limiter @@ -282,7 +306,12 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 discovery.properties: | splunk.discovery: extensions: diff --git a/examples/discovery-mode/rendered_manifests/daemonset.yaml b/examples/discovery-mode/rendered_manifests/daemonset.yaml index 0fe0309e8..436041b41 100644 --- a/examples/discovery-mode/rendered_manifests/daemonset.yaml +++ b/examples/discovery-mode/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: 9cc9033772495e6a471def3989cbc3ca7f75918b86487f2ee6053cd3bacf9992 + checksum/config: 231a2f6df986952c3f8bee10614bcf75037622f942563f3efdcbfc5a8e132f0a kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,11 +41,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists containers: - name: otel-collector command: @@ -80,7 +87,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB @@ -116,24 +123,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo - name: REDIS_USERNAME valueFrom: secretKeyRef: diff --git a/examples/discovery-mode/rendered_manifests/secret-splunk.yaml b/examples/discovery-mode/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/discovery-mode/rendered_manifests/secret-splunk.yaml +++ b/examples/discovery-mode/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/discovery-mode/rendered_manifests/service-agent.yaml b/examples/discovery-mode/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..95c885c81 --- /dev/null +++ b/examples/discovery-mode/rendered_manifests/service-agent.yaml @@ -0,0 +1,55 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/discovery-mode/rendered_manifests/serviceAccount.yaml b/examples/discovery-mode/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/discovery-mode/rendered_manifests/serviceAccount.yaml +++ b/examples/discovery-mode/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/distribution-aks/rendered_manifests/clusterRole.yaml b/examples/distribution-aks/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/distribution-aks/rendered_manifests/clusterRole.yaml +++ b/examples/distribution-aks/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/distribution-aks/rendered_manifests/clusterRoleBinding.yaml b/examples/distribution-aks/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/distribution-aks/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/distribution-aks/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/distribution-aks/rendered_manifests/configmap-agent.yaml b/examples/distribution-aks/rendered_manifests/configmap-agent.yaml index bcf075c84..7e7d51b3e 100644 --- a/examples/distribution-aks/rendered_manifests/configmap-agent.yaml +++ b/examples/distribution-aks/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -35,7 +41,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -129,10 +137,15 @@ data: receivers: hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -154,6 +167,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -180,7 +194,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: null watch_observers: @@ -198,6 +212,16 @@ data: - k8s_observer - zpages pipelines: + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - signalfx @@ -225,7 +249,7 @@ data: - prometheus/agent traces: exporters: - - sapm + - otlphttp - signalfx processors: - memory_limiter @@ -240,4 +264,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/distribution-aks/rendered_manifests/configmap-cluster-receiver.yaml b/examples/distribution-aks/rendered_manifests/configmap-cluster-receiver.yaml index 3664a863a..0a8b3e54d 100644 --- a/examples/distribution-aks/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/distribution-aks/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -93,7 +93,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -121,4 +121,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/distribution-aks/rendered_manifests/daemonset.yaml b/examples/distribution-aks/rendered_manifests/daemonset.yaml index 1974c9cb0..a4e496a10 100644 --- a/examples/distribution-aks/rendered_manifests/daemonset.yaml +++ b/examples/distribution-aks/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: 4c6092608da5bb02ccf6dc4c81701525879cb2557bf880eb833f8bb6487f604c + checksum/config: b77d2f4051fab7484cd75e4c622c11aabd72af22dbf6f63412ef41d5e312a87f kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,11 +41,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists containers: - name: otel-collector command: @@ -79,7 +86,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB @@ -115,24 +122,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/distribution-aks/rendered_manifests/deployment-cluster-receiver.yaml b/examples/distribution-aks/rendered_manifests/deployment-cluster-receiver.yaml index 229414b01..fc277c460 100644 --- a/examples/distribution-aks/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/distribution-aks/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: 0ce76d63106907316e1acfdec5394192058bd81e8fdb7adc3ad6b85f8c136c22 + checksum/config: a26a387d06f8d9ff63215cdd2e7762321b8c1e63734b2af49f80874f5bf641f9 spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/distribution-aks/rendered_manifests/secret-splunk.yaml b/examples/distribution-aks/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/distribution-aks/rendered_manifests/secret-splunk.yaml +++ b/examples/distribution-aks/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/distribution-aks/rendered_manifests/service-agent.yaml b/examples/distribution-aks/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..95c885c81 --- /dev/null +++ b/examples/distribution-aks/rendered_manifests/service-agent.yaml @@ -0,0 +1,55 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/distribution-aks/rendered_manifests/serviceAccount.yaml b/examples/distribution-aks/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/distribution-aks/rendered_manifests/serviceAccount.yaml +++ b/examples/distribution-aks/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/distribution-eks-fargate/rendered_manifests/clusterRole.yaml b/examples/distribution-eks-fargate/rendered_manifests/clusterRole.yaml index c6efa6678..629190335 100644 --- a/examples/distribution-eks-fargate/rendered_manifests/clusterRole.yaml +++ b/examples/distribution-eks-fargate/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/distribution-eks-fargate/rendered_manifests/clusterRoleBinding.yaml b/examples/distribution-eks-fargate/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/distribution-eks-fargate/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/distribution-eks-fargate/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/distribution-eks-fargate/rendered_manifests/configmap-cluster-receiver-node-discoverer-script.yaml b/examples/distribution-eks-fargate/rendered_manifests/configmap-cluster-receiver-node-discoverer-script.yaml index 72f6bf83e..fba104d0e 100644 --- a/examples/distribution-eks-fargate/rendered_manifests/configmap-cluster-receiver-node-discoverer-script.yaml +++ b/examples/distribution-eks-fargate/rendered_manifests/configmap-cluster-receiver-node-discoverer-script.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: diff --git a/examples/distribution-eks-fargate/rendered_manifests/configmap-cluster-receiver.yaml b/examples/distribution-eks-fargate/rendered_manifests/configmap-cluster-receiver.yaml index 7a866e94a..d1b77565f 100644 --- a/examples/distribution-eks-fargate/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/distribution-eks-fargate/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -96,7 +96,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: kubeletstats: @@ -150,4 +150,9 @@ data: - receiver_creator telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/distribution-eks-fargate/rendered_manifests/configmap-gateway.yaml b/examples/distribution-eks-fargate/rendered_manifests/configmap-gateway.yaml index 6cdd3d339..710017fe8 100644 --- a/examples/distribution-eks-fargate/rendered_manifests/configmap-gateway.yaml +++ b/examples/distribution-eks-fargate/rendered_manifests/configmap-gateway.yaml @@ -7,22 +7,24 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp sending_queue: num_consumers: 32 + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -37,7 +39,9 @@ data: endpoint: https://api.CHANGEME.signalfx.com zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -159,7 +163,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 signalfx: access_token_passthrough: true endpoint: 0.0.0.0:9943 @@ -202,7 +206,7 @@ data: - prometheus/collector traces: exporters: - - sapm + - otlphttp processors: - memory_limiter - k8sattributes @@ -214,4 +218,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/distribution-eks-fargate/rendered_manifests/deployment-cluster-receiver.yaml b/examples/distribution-eks-fargate/rendered_manifests/deployment-cluster-receiver.yaml index 7d240049d..16aac9b30 100644 --- a/examples/distribution-eks-fargate/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/distribution-eks-fargate/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -33,7 +33,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: 963c3da7cae0d79993297dbae95478b454e69995b3d94e2eb18ce013f0f621df + checksum/config: 93de5553d98ca8c1308763682892e9d8f85066cd76192936be78510a08d5d4f2 spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -74,7 +74,7 @@ spec: command: - /otelcol - --config=/splunk-messages/config.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/distribution-eks-fargate/rendered_manifests/deployment-gateway.yaml b/examples/distribution-eks-fargate/rendered_manifests/deployment-gateway.yaml index 162de7a09..2f4a506a5 100644 --- a/examples/distribution-eks-fargate/rendered_manifests/deployment-gateway.yaml +++ b/examples/distribution-eks-fargate/rendered_manifests/deployment-gateway.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-collector @@ -31,7 +31,7 @@ spec: component: otel-collector release: default annotations: - checksum/config: 23dcb44539700d8387ab3ec1270e929b2ef889aa77edfce9737da382407f7e29 + checksum/config: e3a492daf843c7412ff52604bb5ad1197601fe57b94f27527f030b9671000ecd spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/distribution-eks-fargate/rendered_manifests/secret-splunk.yaml b/examples/distribution-eks-fargate/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/distribution-eks-fargate/rendered_manifests/secret-splunk.yaml +++ b/examples/distribution-eks-fargate/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/distribution-eks-fargate/rendered_manifests/service.yaml b/examples/distribution-eks-fargate/rendered_manifests/service.yaml index 0b1743e48..dc9a8aa72 100644 --- a/examples/distribution-eks-fargate/rendered_manifests/service.yaml +++ b/examples/distribution-eks-fargate/rendered_manifests/service.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-collector diff --git a/examples/distribution-eks-fargate/rendered_manifests/serviceAccount.yaml b/examples/distribution-eks-fargate/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/distribution-eks-fargate/rendered_manifests/serviceAccount.yaml +++ b/examples/distribution-eks-fargate/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/distribution-eks/rendered_manifests/clusterRole.yaml b/examples/distribution-eks/rendered_manifests/clusterRole.yaml index 4efc89686..cd1b8539b 100644 --- a/examples/distribution-eks/rendered_manifests/clusterRole.yaml +++ b/examples/distribution-eks/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/distribution-eks/rendered_manifests/clusterRoleBinding.yaml b/examples/distribution-eks/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/distribution-eks/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/distribution-eks/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/distribution-eks/rendered_manifests/configmap-agent.yaml b/examples/distribution-eks/rendered_manifests/configmap-agent.yaml index 0c272bc75..63ce89fe3 100644 --- a/examples/distribution-eks/rendered_manifests/configmap-agent.yaml +++ b/examples/distribution-eks/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -35,7 +41,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -129,10 +137,15 @@ data: receivers: hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -154,6 +167,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -180,7 +194,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: null watch_observers: @@ -198,6 +212,16 @@ data: - k8s_observer - zpages pipelines: + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - signalfx @@ -225,7 +249,7 @@ data: - prometheus/agent traces: exporters: - - sapm + - otlphttp - signalfx processors: - memory_limiter @@ -240,4 +264,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/distribution-eks/rendered_manifests/configmap-cluster-receiver.yaml b/examples/distribution-eks/rendered_manifests/configmap-cluster-receiver.yaml index c749ad7d7..1a44eaadc 100644 --- a/examples/distribution-eks/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/distribution-eks/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -111,7 +111,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -140,4 +140,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/distribution-eks/rendered_manifests/daemonset.yaml b/examples/distribution-eks/rendered_manifests/daemonset.yaml index 8dd4894e0..d25a6dc15 100644 --- a/examples/distribution-eks/rendered_manifests/daemonset.yaml +++ b/examples/distribution-eks/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: bb0f4b0af1d655da2b0e5f2b19db55ba2c7a6793d13e481735a63cfa6dc2e8a5 + checksum/config: 2c67fa0a01433844aaf5c723440d30f55c6c90dd05554e96e3a186279a3d603e kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,11 +41,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists containers: - name: otel-collector command: @@ -79,7 +86,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB @@ -115,24 +122,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/distribution-eks/rendered_manifests/deployment-cluster-receiver.yaml b/examples/distribution-eks/rendered_manifests/deployment-cluster-receiver.yaml index 71573ca2c..8719f68e1 100644 --- a/examples/distribution-eks/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/distribution-eks/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: 08e8a4de2ead873d0d428c0f5de775489c56c04d0f764399f599eab7be7b174d + checksum/config: f9a639dc73306fd666cd53308a3f5b491b20b70f2ff489b230d2f2a47731282a spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/distribution-eks/rendered_manifests/secret-splunk.yaml b/examples/distribution-eks/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/distribution-eks/rendered_manifests/secret-splunk.yaml +++ b/examples/distribution-eks/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/distribution-eks/rendered_manifests/service-agent.yaml b/examples/distribution-eks/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..95c885c81 --- /dev/null +++ b/examples/distribution-eks/rendered_manifests/service-agent.yaml @@ -0,0 +1,55 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/distribution-eks/rendered_manifests/serviceAccount.yaml b/examples/distribution-eks/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/distribution-eks/rendered_manifests/serviceAccount.yaml +++ b/examples/distribution-eks/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/distribution-gke-autopilot/rendered_manifests/clusterRole.yaml b/examples/distribution-gke-autopilot/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/distribution-gke-autopilot/rendered_manifests/clusterRole.yaml +++ b/examples/distribution-gke-autopilot/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/distribution-gke-autopilot/rendered_manifests/clusterRoleBinding.yaml b/examples/distribution-gke-autopilot/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/distribution-gke-autopilot/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/distribution-gke-autopilot/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/distribution-gke-autopilot/rendered_manifests/configmap-agent.yaml b/examples/distribution-gke-autopilot/rendered_manifests/configmap-agent.yaml index edd78667a..5a6bbd559 100644 --- a/examples/distribution-gke-autopilot/rendered_manifests/configmap-agent.yaml +++ b/examples/distribution-gke-autopilot/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -35,7 +41,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -128,10 +136,15 @@ data: receivers: hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -153,6 +166,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -179,7 +193,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: null watch_observers: @@ -197,6 +211,16 @@ data: - k8s_observer - zpages pipelines: + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - signalfx @@ -224,7 +248,7 @@ data: - prometheus/agent traces: exporters: - - sapm + - otlphttp - signalfx processors: - memory_limiter @@ -239,4 +263,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/distribution-gke-autopilot/rendered_manifests/configmap-cluster-receiver.yaml b/examples/distribution-gke-autopilot/rendered_manifests/configmap-cluster-receiver.yaml index e70adcfbd..d9ee2a899 100644 --- a/examples/distribution-gke-autopilot/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/distribution-gke-autopilot/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -92,7 +92,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -120,4 +120,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/distribution-gke-autopilot/rendered_manifests/daemonset.yaml b/examples/distribution-gke-autopilot/rendered_manifests/daemonset.yaml index 00b40dfd7..1c47a441d 100644 --- a/examples/distribution-gke-autopilot/rendered_manifests/daemonset.yaml +++ b/examples/distribution-gke-autopilot/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: bb624529222d55d820caffcef1d2db84c2c38d64875bb8ed85a6bd36b0f28dbf + checksum/config: e0fa06b0593a64c95a4cfcaf7516fab16808ac2985ac30f02d0549fff6cf6efc kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,11 +41,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists containers: - name: otel-collector command: @@ -79,7 +86,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB @@ -115,24 +122,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/distribution-gke-autopilot/rendered_manifests/deployment-cluster-receiver.yaml b/examples/distribution-gke-autopilot/rendered_manifests/deployment-cluster-receiver.yaml index 3de5b7590..5c04b932a 100644 --- a/examples/distribution-gke-autopilot/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/distribution-gke-autopilot/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: 2f27b7b568172618ff83b086911265c16138cbda060d005ecf75bcd8f766a26b + checksum/config: c79002621deb1b6a8c3d2fa7eeedb291558c59ad7c05d0e4ad9dd7dfd5e1050a spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/distribution-gke-autopilot/rendered_manifests/secret-splunk.yaml b/examples/distribution-gke-autopilot/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/distribution-gke-autopilot/rendered_manifests/secret-splunk.yaml +++ b/examples/distribution-gke-autopilot/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/distribution-gke-autopilot/rendered_manifests/service-agent.yaml b/examples/distribution-gke-autopilot/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..95c885c81 --- /dev/null +++ b/examples/distribution-gke-autopilot/rendered_manifests/service-agent.yaml @@ -0,0 +1,55 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/distribution-gke-autopilot/rendered_manifests/serviceAccount.yaml b/examples/distribution-gke-autopilot/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/distribution-gke-autopilot/rendered_manifests/serviceAccount.yaml +++ b/examples/distribution-gke-autopilot/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/distribution-gke/rendered_manifests/clusterRole.yaml b/examples/distribution-gke/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/distribution-gke/rendered_manifests/clusterRole.yaml +++ b/examples/distribution-gke/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/distribution-gke/rendered_manifests/clusterRoleBinding.yaml b/examples/distribution-gke/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/distribution-gke/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/distribution-gke/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/distribution-gke/rendered_manifests/configmap-agent.yaml b/examples/distribution-gke/rendered_manifests/configmap-agent.yaml index 84c342c74..553533cdf 100644 --- a/examples/distribution-gke/rendered_manifests/configmap-agent.yaml +++ b/examples/distribution-gke/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -35,7 +41,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -128,10 +136,15 @@ data: receivers: hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -153,6 +166,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -179,7 +193,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: null watch_observers: @@ -197,6 +211,16 @@ data: - k8s_observer - zpages pipelines: + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - signalfx @@ -224,7 +248,7 @@ data: - prometheus/agent traces: exporters: - - sapm + - otlphttp - signalfx processors: - memory_limiter @@ -239,4 +263,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/distribution-gke/rendered_manifests/configmap-cluster-receiver.yaml b/examples/distribution-gke/rendered_manifests/configmap-cluster-receiver.yaml index e70adcfbd..d9ee2a899 100644 --- a/examples/distribution-gke/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/distribution-gke/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -92,7 +92,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -120,4 +120,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/distribution-gke/rendered_manifests/daemonset.yaml b/examples/distribution-gke/rendered_manifests/daemonset.yaml index 5a89053d0..af75da528 100644 --- a/examples/distribution-gke/rendered_manifests/daemonset.yaml +++ b/examples/distribution-gke/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: 030f8ee67a03f7f0ed7b0ea7cd338b40a8c064063a847e0f9a5fb8787c4c7a46 + checksum/config: 747dcc2cf82c242f5c1e93afbe873cef0230ab630026acbef7afb4a170561fbf kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,11 +41,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists containers: - name: otel-collector command: @@ -79,7 +86,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB @@ -115,24 +122,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/distribution-gke/rendered_manifests/deployment-cluster-receiver.yaml b/examples/distribution-gke/rendered_manifests/deployment-cluster-receiver.yaml index 3de5b7590..5c04b932a 100644 --- a/examples/distribution-gke/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/distribution-gke/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: 2f27b7b568172618ff83b086911265c16138cbda060d005ecf75bcd8f766a26b + checksum/config: c79002621deb1b6a8c3d2fa7eeedb291558c59ad7c05d0e4ad9dd7dfd5e1050a spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/distribution-gke/rendered_manifests/secret-splunk.yaml b/examples/distribution-gke/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/distribution-gke/rendered_manifests/secret-splunk.yaml +++ b/examples/distribution-gke/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/distribution-gke/rendered_manifests/service-agent.yaml b/examples/distribution-gke/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..95c885c81 --- /dev/null +++ b/examples/distribution-gke/rendered_manifests/service-agent.yaml @@ -0,0 +1,55 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/distribution-gke/rendered_manifests/serviceAccount.yaml b/examples/distribution-gke/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/distribution-gke/rendered_manifests/serviceAccount.yaml +++ b/examples/distribution-gke/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/distribution-openshift/rendered_manifests/clusterRole.yaml b/examples/distribution-openshift/rendered_manifests/clusterRole.yaml index 2d3453f98..389bf8f0f 100644 --- a/examples/distribution-openshift/rendered_manifests/clusterRole.yaml +++ b/examples/distribution-openshift/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/distribution-openshift/rendered_manifests/clusterRoleBinding.yaml b/examples/distribution-openshift/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/distribution-openshift/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/distribution-openshift/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/distribution-openshift/rendered_manifests/configmap-agent.yaml b/examples/distribution-openshift/rendered_manifests/configmap-agent.yaml index db39ac3d2..57a068593 100644 --- a/examples/distribution-openshift/rendered_manifests/configmap-agent.yaml +++ b/examples/distribution-openshift/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -35,7 +41,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -127,10 +135,15 @@ data: receivers: hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -152,6 +165,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -178,7 +192,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: smartagent/coredns: @@ -248,6 +262,16 @@ data: - k8s_observer - zpages pipelines: + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - signalfx @@ -275,7 +299,7 @@ data: - prometheus/agent traces: exporters: - - sapm + - otlphttp - signalfx processors: - memory_limiter @@ -290,4 +314,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/distribution-openshift/rendered_manifests/configmap-cluster-receiver.yaml b/examples/distribution-openshift/rendered_manifests/configmap-cluster-receiver.yaml index ab44d4ae9..39e0951be 100644 --- a/examples/distribution-openshift/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/distribution-openshift/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -92,7 +92,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -120,4 +120,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/distribution-openshift/rendered_manifests/daemonset.yaml b/examples/distribution-openshift/rendered_manifests/daemonset.yaml index 51a7f9b6b..6bdee94e4 100644 --- a/examples/distribution-openshift/rendered_manifests/daemonset.yaml +++ b/examples/distribution-openshift/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: f97ad99df70c23e0ab394ba77fd457697f83537e4c9f5539a05ef3df5c4d0982 + checksum/config: 5604b19093ef3a5f7d732fe88084dac041d662b8db54be49883e68edf05b61a5 kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,11 +41,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists containers: - name: otel-collector command: @@ -79,7 +86,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB @@ -115,24 +122,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/distribution-openshift/rendered_manifests/deployment-cluster-receiver.yaml b/examples/distribution-openshift/rendered_manifests/deployment-cluster-receiver.yaml index e270c30f8..f278021c9 100644 --- a/examples/distribution-openshift/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/distribution-openshift/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: 8bb704ed2121c7d81a8ddf4ac31f86851e7afec8ddf313a21dd9e70050b539bf + checksum/config: f3c1444fc4a52e438505dda39fbaa9efceb923cbc38be5ac2c3b0eb90d2aa38d spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/distribution-openshift/rendered_manifests/secret-splunk.yaml b/examples/distribution-openshift/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/distribution-openshift/rendered_manifests/secret-splunk.yaml +++ b/examples/distribution-openshift/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/distribution-openshift/rendered_manifests/securityContextConstraints.yaml b/examples/distribution-openshift/rendered_manifests/securityContextConstraints.yaml index 82258c067..677c26ae9 100644 --- a/examples/distribution-openshift/rendered_manifests/securityContextConstraints.yaml +++ b/examples/distribution-openshift/rendered_manifests/securityContextConstraints.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm users: diff --git a/examples/distribution-openshift/rendered_manifests/service-agent.yaml b/examples/distribution-openshift/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..95c885c81 --- /dev/null +++ b/examples/distribution-openshift/rendered_manifests/service-agent.yaml @@ -0,0 +1,55 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/distribution-openshift/rendered_manifests/serviceAccount.yaml b/examples/distribution-openshift/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/distribution-openshift/rendered_manifests/serviceAccount.yaml +++ b/examples/distribution-openshift/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/clusterRole.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/clusterRole.yaml +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/clusterRoleBinding.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/configmap-agent.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/configmap-agent.yaml index dc37e9b0f..72c2e450e 100644 --- a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/configmap-agent.yaml +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.us0.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.us0.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.us0.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.us0.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.us0.signalfx.com @@ -43,7 +49,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -250,10 +258,15 @@ data: endpoint: 0.0.0.0:8006 hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -275,6 +288,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -301,7 +315,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: smartagent/coredns: @@ -380,6 +394,16 @@ data: - filelog - fluentforward - otlp + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - signalfx @@ -407,7 +431,7 @@ data: - prometheus/agent traces: exporters: - - sapm + - otlphttp - signalfx processors: - memory_limiter @@ -423,4 +447,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/configmap-cluster-receiver.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/configmap-cluster-receiver.yaml index 59c0c7875..203cfb1f8 100644 --- a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -91,7 +91,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -119,4 +119,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/daemonset.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/daemonset.yaml index df3157f46..11a62e36c 100644 --- a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/daemonset.yaml +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: 99b79fe28a085939d1da3489ad78866fbf5cb8264482a183c77c796c1107d973 + checksum/config: fffa19a4586067f00cfbcf33346aeb74293292766d08dbe53c8beb8f447cb4fc kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,14 +41,21 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists initContainers: - name: migrate-checkpoint - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent command: ["/migratecheckpoint"] securityContext: @@ -118,7 +125,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent securityContext: runAsUser: 0 @@ -156,24 +163,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/deployment-cluster-receiver.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/deployment-cluster-receiver.yaml index 87caacbd7..5a31e0166 100644 --- a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: 979c363f76d97a0bd2deb5d17f395a3a3fcbdc030198644ed9ed6b60be8f4e64 + checksum/config: beda1c4d66ee049ca6ac56a57faab5831c4108eb760853740bfe9782ceb235fb spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/admission-webhooks/operator-webhook-with-cert-manager.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/admission-webhooks/operator-webhook-with-cert-manager.yaml index 53234db23..bb97b3048 100644 --- a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/admission-webhooks/operator-webhook-with-cert-manager.yaml +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/admission-webhooks/operator-webhook-with-cert-manager.yaml @@ -6,11 +6,12 @@ metadata: annotations: cert-manager.io/inject-ca-from: default/default-operator-serving-cert labels: - helm.sh/chart: operator-0.49.1 + helm.sh/chart: operator-0.71.2 app.kubernetes.io/name: operator - app.kubernetes.io/version: "0.95.0" + app.kubernetes.io/version: "0.110.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default + app.kubernetes.io/component: webhook name: default-operator-mutation webhooks: @@ -43,15 +44,15 @@ webhooks: service: name: default-operator-webhook namespace: default - path: /mutate-opentelemetry-io-v1alpha1-opentelemetrycollector + path: /mutate-opentelemetry-io-v1beta1-opentelemetrycollector port: 443 failurePolicy: Fail - name: mopentelemetrycollector.kb.io + name: mopentelemetrycollectorbeta.kb.io rules: - apiGroups: - opentelemetry.io apiVersions: - - v1alpha1 + - v1beta1 operations: - CREATE - UPDATE @@ -77,7 +78,6 @@ webhooks: - v1 operations: - CREATE - - UPDATE resources: - pods scope: Namespaced @@ -91,11 +91,12 @@ metadata: annotations: cert-manager.io/inject-ca-from: default/default-operator-serving-cert labels: - helm.sh/chart: operator-0.49.1 + helm.sh/chart: operator-0.71.2 app.kubernetes.io/name: operator - app.kubernetes.io/version: "0.95.0" + app.kubernetes.io/version: "0.110.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default + app.kubernetes.io/component: webhook name: default-operator-validation webhooks: @@ -150,15 +151,15 @@ webhooks: service: name: default-operator-webhook namespace: default - path: /validate-opentelemetry-io-v1alpha1-opentelemetrycollector + path: /validate-opentelemetry-io-v1beta1-opentelemetrycollector port: 443 failurePolicy: Fail - name: vopentelemetrycollectorcreateupdate.kb.io + name: vopentelemetrycollectorcreateupdatebeta.kb.io rules: - apiGroups: - opentelemetry.io apiVersions: - - v1alpha1 + - v1beta1 operations: - CREATE - UPDATE @@ -173,15 +174,15 @@ webhooks: service: name: default-operator-webhook namespace: default - path: /validate-opentelemetry-io-v1alpha1-opentelemetrycollector + path: /validate-opentelemetry-io-v1beta1-opentelemetrycollector port: 443 failurePolicy: Ignore - name: vopentelemetrycollectordelete.kb.io + name: vopentelemetrycollectordeletebeta.kb.io rules: - apiGroups: - opentelemetry.io apiVersions: - - v1alpha1 + - v1beta1 operations: - DELETE resources: diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/admission-webhooks/operator-webhook.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/admission-webhooks/operator-webhook.yaml new file mode 100644 index 000000000..e6448b181 --- /dev/null +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/admission-webhooks/operator-webhook.yaml @@ -0,0 +1,12102 @@ +--- +# Source: splunk-otel-collector/charts/operator/templates/admission-webhooks/operator-webhook.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: default/default-operator-serving-cert + controller-gen.kubebuilder.io/version: v0.16.1 + creationTimestamp: null + labels: + app.kubernetes.io/name: opentelemetry-operator + name: opampbridges.opentelemetry.io +spec: + group: opentelemetry.io + names: + kind: OpAMPBridge + listKind: OpAMPBridgeList + plural: opampbridges + singular: opampbridge + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: OpenTelemetry Version + jsonPath: .status.version + name: Version + type: string + - jsonPath: .spec.endpoint + name: Endpoint + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + capabilities: + additionalProperties: + type: boolean + type: object + componentsAllowed: + additionalProperties: + items: + type: string + type: array + type: object + endpoint: + type: string + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + headers: + additionalProperties: + type: string + type: object + hostNetwork: + type: boolean + image: + type: string + imagePullPolicy: + type: string + ipFamilies: + items: + type: string + type: array + ipFamilyPolicy: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + podAnnotations: + additionalProperties: + type: string + type: object + podDnsConfig: + properties: + nameservers: + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + podSecurityContext: + properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + ports: + items: + properties: + appProtocol: + type: string + name: + type: string + nodePort: + format: int32 + type: integer + port: + format: int32 + type: integer + protocol: + default: TCP + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-type: atomic + priorityClassName: + type: string + replicas: + format: int32 + maximum: 1 + type: integer + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + serviceAccount: + type: string + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + format: int32 + type: integer + minDomains: + format: int32 + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + upgradeStrategy: + enum: + - automatic + - none + type: string + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-type: atomic + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + default: ext4 + type: string + kind: + type: string + readOnly: + default: false + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + default: default + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + default: /etc/ceph/keyring + type: string + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + default: xfs + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + default: ThinProvisioned + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-type: atomic + required: + - capabilities + - endpoint + type: object + status: + properties: + version: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null +--- +# Source: splunk-otel-collector/charts/operator/templates/admission-webhooks/operator-webhook.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: default/default-operator-serving-cert + controller-gen.kubebuilder.io/version: v0.16.1 + creationTimestamp: null + labels: + app.kubernetes.io/name: opentelemetry-operator + name: opentelemetrycollectors.opentelemetry.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: default-operator-webhook + namespace: default + path: /convert + port: 443 + + conversionReviewVersions: + - v1alpha1 + - v1beta1 + group: opentelemetry.io + names: + kind: OpenTelemetryCollector + listKind: OpenTelemetryCollectorList + plural: opentelemetrycollectors + shortNames: + - otelcol + - otelcols + singular: opentelemetrycollector + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Deployment Mode + jsonPath: .spec.mode + name: Mode + type: string + - description: OpenTelemetry Version + jsonPath: .status.version + name: Version + type: string + - jsonPath: .status.scale.statusReplicas + name: Ready + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.image + name: Image + type: string + - description: Management State + jsonPath: .spec.managementState + name: Management + type: string + deprecated: true + deprecationWarning: OpenTelemetryCollector v1alpha1 is deprecated. Migrate to + v1beta1. + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + additionalContainers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + args: + additionalProperties: + type: string + type: object + autoscaler: + properties: + behavior: + properties: + scaleDown: + properties: + policies: + items: + properties: + periodSeconds: + format: int32 + type: integer + type: + type: string + value: + format: int32 + type: integer + required: + - periodSeconds + - type + - value + type: object + type: array + x-kubernetes-list-type: atomic + selectPolicy: + type: string + stabilizationWindowSeconds: + format: int32 + type: integer + type: object + scaleUp: + properties: + policies: + items: + properties: + periodSeconds: + format: int32 + type: integer + type: + type: string + value: + format: int32 + type: integer + required: + - periodSeconds + - type + - value + type: object + type: array + x-kubernetes-list-type: atomic + selectPolicy: + type: string + stabilizationWindowSeconds: + format: int32 + type: integer + type: object + type: object + maxReplicas: + format: int32 + type: integer + metrics: + items: + properties: + pods: + properties: + metric: + properties: + name: + type: string + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + target: + properties: + averageUtilization: + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + type: string + value: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + type: + type: string + required: + - type + type: object + type: array + minReplicas: + format: int32 + type: integer + targetCPUUtilization: + format: int32 + type: integer + targetMemoryUtilization: + format: int32 + type: integer + type: object + config: + type: string + configmaps: + items: + properties: + mountpath: + type: string + name: + type: string + required: + - mountpath + - name + type: object + type: array + deploymentUpdateStrategy: + properties: + rollingUpdate: + properties: + maxSurge: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: + type: string + type: object + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + hostNetwork: + type: boolean + image: + type: string + imagePullPolicy: + type: string + ingress: + properties: + annotations: + additionalProperties: + type: string + type: object + hostname: + type: string + ingressClassName: + type: string + route: + properties: + termination: + enum: + - insecure + - edge + - passthrough + - reencrypt + type: string + type: object + ruleType: + enum: + - path + - subdomain + type: string + tls: + items: + properties: + hosts: + items: + type: string + type: array + x-kubernetes-list-type: atomic + secretName: + type: string + type: object + type: array + type: + enum: + - ingress + - route + type: string + type: object + initContainers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + managementState: + default: managed + enum: + - managed + - unmanaged + type: string + maxReplicas: + format: int32 + type: integer + minReplicas: + format: int32 + type: integer + mode: + enum: + - daemonset + - deployment + - sidecar + - statefulset + type: string + nodeSelector: + additionalProperties: + type: string + type: object + observability: + properties: + metrics: + properties: + DisablePrometheusAnnotations: + type: boolean + enableMetrics: + type: boolean + type: object + type: object + podAnnotations: + additionalProperties: + type: string + type: object + podDisruptionBudget: + properties: + maxUnavailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + minAvailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + podSecurityContext: + properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + ports: + items: + properties: + appProtocol: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + nodePort: + format: int32 + type: integer + port: + format: int32 + type: integer + protocol: + default: TCP + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-type: atomic + priorityClassName: + type: string + replicas: + format: int32 + type: integer + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + serviceAccount: + type: string + shareProcessNamespace: + type: boolean + targetAllocator: + properties: + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + allocationStrategy: + default: consistent-hashing + enum: + - least-weighted + - consistent-hashing + - per-node + type: string + enabled: + type: boolean + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + filterStrategy: + default: relabel-config + type: string + image: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + observability: + properties: + metrics: + properties: + DisablePrometheusAnnotations: + type: boolean + enableMetrics: + type: boolean + type: object + type: object + podDisruptionBudget: + properties: + maxUnavailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + minAvailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + podSecurityContext: + properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + prometheusCR: + properties: + enabled: + type: boolean + podMonitorSelector: + additionalProperties: + type: string + type: object + scrapeInterval: + default: 30s + format: duration + type: string + serviceMonitorSelector: + additionalProperties: + type: string + type: object + type: object + replicas: + format: int32 + type: integer + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + serviceAccount: + type: string + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + format: int32 + type: integer + minDomains: + format: int32 + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + format: int32 + type: integer + minDomains: + format: int32 + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + updateStrategy: + properties: + rollingUpdate: + properties: + maxSurge: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: + type: string + type: object + upgradeStrategy: + enum: + - automatic + - none + type: string + volumeClaimTemplates: + items: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + status: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + allocatedResourceStatuses: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + allocatedResources: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + capacity: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + conditions: + items: + properties: + lastProbeTime: + format: date-time + type: string + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + required: + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + currentVolumeAttributesClassName: + type: string + modifyVolumeStatus: + properties: + status: + type: string + targetVolumeAttributesClassName: + type: string + required: + - status + type: object + phase: + type: string + type: object + type: object + type: array + x-kubernetes-list-type: atomic + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-type: atomic + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + default: ext4 + type: string + kind: + type: string + readOnly: + default: false + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + default: default + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + default: /etc/ceph/keyring + type: string + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + default: xfs + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + default: ThinProvisioned + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-type: atomic + required: + - config + - managementState + type: object + status: + properties: + image: + type: string + messages: + items: + type: string + type: array + x-kubernetes-list-type: atomic + replicas: + format: int32 + type: integer + scale: + properties: + replicas: + format: int32 + type: integer + selector: + type: string + statusReplicas: + type: string + type: object + version: + type: string + type: object + type: object + served: true + storage: false + subresources: + scale: + labelSelectorPath: .status.scale.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.scale.replicas + status: {} + - additionalPrinterColumns: + - description: Deployment Mode + jsonPath: .spec.mode + name: Mode + type: string + - description: OpenTelemetry Version + jsonPath: .status.version + name: Version + type: string + - jsonPath: .status.scale.statusReplicas + name: Ready + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.image + name: Image + type: string + - description: Management State + jsonPath: .spec.managementState + name: Management + type: string + name: v1beta1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + additionalContainers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + args: + additionalProperties: + type: string + type: object + autoscaler: + properties: + behavior: + properties: + scaleDown: + properties: + policies: + items: + properties: + periodSeconds: + format: int32 + type: integer + type: + type: string + value: + format: int32 + type: integer + required: + - periodSeconds + - type + - value + type: object + type: array + x-kubernetes-list-type: atomic + selectPolicy: + type: string + stabilizationWindowSeconds: + format: int32 + type: integer + type: object + scaleUp: + properties: + policies: + items: + properties: + periodSeconds: + format: int32 + type: integer + type: + type: string + value: + format: int32 + type: integer + required: + - periodSeconds + - type + - value + type: object + type: array + x-kubernetes-list-type: atomic + selectPolicy: + type: string + stabilizationWindowSeconds: + format: int32 + type: integer + type: object + type: object + maxReplicas: + format: int32 + type: integer + metrics: + items: + properties: + pods: + properties: + metric: + properties: + name: + type: string + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + target: + properties: + averageUtilization: + format: int32 + type: integer + averageValue: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: + type: string + value: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - type + type: object + required: + - metric + - target + type: object + type: + type: string + required: + - type + type: object + type: array + minReplicas: + format: int32 + type: integer + targetCPUUtilization: + format: int32 + type: integer + targetMemoryUtilization: + format: int32 + type: integer + type: object + config: + properties: + connectors: + type: object + x-kubernetes-preserve-unknown-fields: true + exporters: + type: object + x-kubernetes-preserve-unknown-fields: true + extensions: + type: object + x-kubernetes-preserve-unknown-fields: true + processors: + type: object + x-kubernetes-preserve-unknown-fields: true + receivers: + type: object + x-kubernetes-preserve-unknown-fields: true + service: + properties: + extensions: + items: + type: string + type: array + pipelines: + additionalProperties: + properties: + exporters: + items: + type: string + type: array + processors: + items: + type: string + type: array + receivers: + items: + type: string + type: array + required: + - exporters + - receivers + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + telemetry: + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - pipelines + type: object + required: + - exporters + - receivers + - service + type: object + x-kubernetes-preserve-unknown-fields: true + configVersions: + default: 3 + minimum: 1 + type: integer + configmaps: + items: + properties: + mountpath: + type: string + name: + type: string + required: + - mountpath + - name + type: object + type: array + daemonSetUpdateStrategy: + properties: + rollingUpdate: + properties: + maxSurge: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: + type: string + type: object + deploymentUpdateStrategy: + properties: + rollingUpdate: + properties: + maxSurge: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: + type: string + type: object + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + hostNetwork: + type: boolean + image: + type: string + imagePullPolicy: + type: string + ingress: + properties: + annotations: + additionalProperties: + type: string + type: object + hostname: + type: string + ingressClassName: + type: string + route: + properties: + termination: + enum: + - insecure + - edge + - passthrough + - reencrypt + type: string + type: object + ruleType: + enum: + - path + - subdomain + type: string + tls: + items: + properties: + hosts: + items: + type: string + type: array + x-kubernetes-list-type: atomic + secretName: + type: string + type: object + type: array + type: + enum: + - ingress + - route + type: string + type: object + initContainers: + items: + properties: + args: + items: + type: string + type: array + x-kubernetes-list-type: atomic + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + x-kubernetes-list-type: atomic + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + default: TCP + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + resizePolicy: + items: + properties: + resourceName: + type: string + restartPolicy: + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + restartPolicy: + type: string + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - devicePath + x-kubernetes-list-type: map + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-map-keys: + - mountPath + x-kubernetes-list-type: map + workingDir: + type: string + required: + - name + type: object + type: array + ipFamilies: + items: + type: string + type: array + ipFamilyPolicy: + default: SingleStack + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + sleep: + properties: + seconds: + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + managementState: + default: managed + enum: + - managed + - unmanaged + type: string + mode: + enum: + - daemonset + - deployment + - sidecar + - statefulset + type: string + nodeSelector: + additionalProperties: + type: string + type: object + observability: + properties: + metrics: + properties: + disablePrometheusAnnotations: + type: boolean + enableMetrics: + type: boolean + type: object + type: object + podAnnotations: + additionalProperties: + type: string + type: object + podDisruptionBudget: + properties: + maxUnavailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + minAvailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + podDnsConfig: + properties: + nameservers: + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + podSecurityContext: + properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + ports: + items: + properties: + appProtocol: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + nodePort: + format: int32 + type: integer + port: + format: int32 + type: integer + protocol: + default: TCP + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + x-kubernetes-list-type: atomic + priorityClassName: + type: string + readinessProbe: + properties: + failureThreshold: + format: int32 + type: integer + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object + replicas: + format: int32 + type: integer + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + serviceAccount: + type: string + shareProcessNamespace: + type: boolean + targetAllocator: + properties: + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + allocationStrategy: + default: consistent-hashing + enum: + - least-weighted + - consistent-hashing + - per-node + type: string + enabled: + type: boolean + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + filterStrategy: + default: relabel-config + enum: + - "" + - relabel-config + type: string + image: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + observability: + properties: + metrics: + properties: + disablePrometheusAnnotations: + type: boolean + enableMetrics: + type: boolean + type: object + type: object + podDisruptionBudget: + properties: + maxUnavailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + minAvailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + podSecurityContext: + properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + prometheusCR: + properties: + enabled: + type: boolean + podMonitorSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + scrapeInterval: + default: 30s + format: duration + type: string + serviceMonitorSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + type: object + replicas: + format: int32 + type: integer + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + capabilities: + properties: + add: + items: + type: string + type: array + x-kubernetes-list-type: atomic + drop: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + hostProcess: + type: boolean + runAsUserName: + type: string + type: object + type: object + serviceAccount: + type: string + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + format: int32 + type: integer + minDomains: + format: int32 + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + format: int32 + type: integer + minDomains: + format: int32 + type: integer + nodeAffinityPolicy: + type: string + nodeTaintsPolicy: + type: string + topologyKey: + type: string + whenUnsatisfiable: + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + upgradeStrategy: + enum: + - automatic + - none + type: string + volumeClaimTemplates: + items: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + status: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + allocatedResourceStatuses: + additionalProperties: + type: string + type: object + x-kubernetes-map-type: granular + allocatedResources: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + capacity: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + conditions: + items: + properties: + lastProbeTime: + format: date-time + type: string + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + required: + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + currentVolumeAttributesClassName: + type: string + modifyVolumeStatus: + properties: + status: + type: string + targetVolumeAttributesClassName: + type: string + required: + - status + type: object + phase: + type: string + type: object + type: object + type: array + x-kubernetes-list-type: atomic + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + recursiveReadOnly: + type: string + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + x-kubernetes-list-type: atomic + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + default: ext4 + type: string + kind: + type: string + readOnly: + default: false + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + type: string + volumeAttributesClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + default: default + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + default: /etc/ceph/keyring + type: string + monitors: + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + default: rbd + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + user: + default: admin + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + default: xfs + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + type: boolean + storageMode: + default: ThinProvisioned + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + default: "" + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + x-kubernetes-list-type: atomic + required: + - config + - managementState + type: object + status: + properties: + image: + type: string + scale: + properties: + replicas: + format: int32 + type: integer + selector: + type: string + statusReplicas: + type: string + type: object + version: + type: string + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.scale.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.scale.replicas + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null +--- +# Source: splunk-otel-collector/charts/operator/templates/admission-webhooks/operator-webhook.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + creationTimestamp: null + labels: + app.kubernetes.io/name: opentelemetry-operator + name: instrumentations.opentelemetry.io +spec: + group: opentelemetry.io + names: + kind: Instrumentation + listKind: InstrumentationList + plural: instrumentations + shortNames: + - otelinst + - otelinsts + singular: instrumentation + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.exporter.endpoint + name: Endpoint + type: string + - jsonPath: .spec.sampler.type + name: Sampler + type: string + - jsonPath: .spec.sampler.argument + name: Sampler Arg + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + apacheHttpd: + properties: + attrs: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + configPath: + type: string + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + image: + type: string + resourceRequirements: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + version: + type: string + volumeLimitSize: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + defaults: + properties: + useLabelsForResourceAttributes: + type: boolean + type: object + dotnet: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + image: + type: string + resourceRequirements: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + volumeLimitSize: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + exporter: + properties: + endpoint: + type: string + type: object + go: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + image: + type: string + resourceRequirements: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + volumeLimitSize: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + java: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + extensions: + items: + properties: + dir: + type: string + image: + type: string + required: + - dir + - image + type: object + type: array + image: + type: string + resources: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + volumeLimitSize: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + nginx: + properties: + attrs: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + configFile: + type: string + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + image: + type: string + resourceRequirements: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + volumeLimitSize: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + nodejs: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + image: + type: string + resourceRequirements: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + volumeLimitSize: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + propagators: + items: + enum: + - tracecontext + - baggage + - b3 + - b3multi + - jaeger + - xray + - ottrace + - none + type: string + type: array + python: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + image: + type: string + resourceRequirements: + properties: + claims: + items: + properties: + name: + type: string + request: + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + volumeLimitSize: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + resource: + properties: + addK8sUIDAttributes: + type: boolean + resourceAttributes: + additionalProperties: + type: string + type: object + type: object + sampler: + properties: + argument: + type: string + type: + enum: + - always_on + - always_off + - traceidratio + - parentbased_always_on + - parentbased_always_off + - parentbased_traceidratio + - jaeger_remote + - xray + type: string + type: object + type: object + status: + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/certmanager.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/certmanager.yaml index e45f44154..46d53d3a0 100644 --- a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/certmanager.yaml +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/certmanager.yaml @@ -7,11 +7,12 @@ metadata: helm.sh/hook: post-install,post-upgrade helm.sh/hook-weight: "1" labels: - helm.sh/chart: operator-0.49.1 + helm.sh/chart: operator-0.71.2 app.kubernetes.io/name: operator - app.kubernetes.io/version: "0.95.0" + app.kubernetes.io/version: "0.110.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default + app.kubernetes.io/component: webhook name: default-operator-serving-cert namespace: default @@ -35,11 +36,12 @@ metadata: helm.sh/hook: post-install,post-upgrade helm.sh/hook-weight: "1" labels: - helm.sh/chart: operator-0.49.1 + helm.sh/chart: operator-0.71.2 app.kubernetes.io/name: operator - app.kubernetes.io/version: "0.95.0" + app.kubernetes.io/version: "0.110.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default + app.kubernetes.io/component: webhook name: default-operator-selfsigned-issuer namespace: default diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/clusterrole.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/clusterrole.yaml index 33bcf7cd1..ab3b32ef0 100644 --- a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/clusterrole.yaml +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/clusterrole.yaml @@ -4,11 +4,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - helm.sh/chart: operator-0.49.1 + helm.sh/chart: operator-0.71.2 app.kubernetes.io/name: operator - app.kubernetes.io/version: "0.95.0" + app.kubernetes.io/version: "0.110.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default + app.kubernetes.io/component: controller-manager name: default-operator-manager rules: @@ -59,6 +60,7 @@ rules: - watch - apiGroups: - apps + - extensions resources: - replicasets verbs: @@ -77,6 +79,14 @@ rules: - patch - update - watch + - apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch - apiGroups: - config.openshift.io resources: @@ -213,11 +223,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - helm.sh/chart: operator-0.49.1 + helm.sh/chart: operator-0.71.2 app.kubernetes.io/name: operator - app.kubernetes.io/version: "0.95.0" + app.kubernetes.io/version: "0.110.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default + app.kubernetes.io/component: controller-manager name: default-operator-metrics rules: @@ -231,11 +242,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - helm.sh/chart: operator-0.49.1 + helm.sh/chart: operator-0.71.2 app.kubernetes.io/name: operator - app.kubernetes.io/version: "0.95.0" + app.kubernetes.io/version: "0.110.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default + app.kubernetes.io/component: controller-manager name: default-operator-proxy rules: diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/clusterrolebinding.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/clusterrolebinding.yaml index 5887d21ab..6e50c893c 100644 --- a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/clusterrolebinding.yaml +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/clusterrolebinding.yaml @@ -4,11 +4,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - helm.sh/chart: operator-0.49.1 + helm.sh/chart: operator-0.71.2 app.kubernetes.io/name: operator - app.kubernetes.io/version: "0.95.0" + app.kubernetes.io/version: "0.110.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default + app.kubernetes.io/component: controller-manager name: default-operator-manager roleRef: @@ -25,11 +26,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - helm.sh/chart: operator-0.49.1 + helm.sh/chart: operator-0.71.2 app.kubernetes.io/name: operator - app.kubernetes.io/version: "0.95.0" + app.kubernetes.io/version: "0.110.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default + app.kubernetes.io/component: controller-manager name: default-operator-proxy roleRef: diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/deployment.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/deployment.yaml index a4171e951..e6dd3433f 100644 --- a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/deployment.yaml +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/deployment.yaml @@ -4,11 +4,12 @@ apiVersion: apps/v1 kind: Deployment metadata: labels: - helm.sh/chart: operator-0.49.1 + helm.sh/chart: operator-0.71.2 app.kubernetes.io/name: operator - app.kubernetes.io/version: "0.95.0" + app.kubernetes.io/version: "0.110.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default + app.kubernetes.io/component: controller-manager name: default-operator namespace: default @@ -33,13 +34,13 @@ spec: - --enable-leader-election - --health-probe-addr=:8081 - --webhook-port=9443 - - --collector-image=otel/opentelemetry-collector-contrib:0.95.0 + - --collector-image=quay.io/signalfx/splunk-otel-collector:0.110.0 command: - /manager env: - name: ENABLE_WEBHOOKS value: "true" - image: "ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator:0.95.0" + image: "ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator:0.110.0" name: manager ports: - containerPort: 8080 @@ -75,9 +76,8 @@ spec: - args: - --secure-listen-address=0.0.0.0:8443 - --upstream=http://127.0.0.1:8080/ - - --logtostderr=true - --v=0 - image: "quay.io/brancz/kube-rbac-proxy:v0.15.0" + image: "quay.io/brancz/kube-rbac-proxy:v0.18.1" name: kube-rbac-proxy ports: - containerPort: 8443 diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/instrumentation.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/instrumentation.yaml index bf47da93c..693f210c5 100644 --- a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/instrumentation.yaml +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/instrumentation.yaml @@ -7,90 +7,73 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-operator - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-operator + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-weight": "5" spec: exporter: - endpoint: http://$(SPLUNK_OTEL_AGENT):4317 + endpoint: http://default-splunk-otel-collector-agent.default.svc.cluster.local:4317 propagators: - tracecontext - baggage - b3 env: - - name: SPLUNK_OTEL_AGENT - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.hostIP apacheHttpd: image: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-apache-httpd:1.0.4 env: - name: OTEL_RESOURCE_ATTRIBUTES value: splunk.zc.method=autoinstrumentation-apache-httpd:1.0.4 dotnet: - image: ghcr.io/signalfx/splunk-otel-dotnet/splunk-otel-dotnet:v1.6.0 + image: ghcr.io/signalfx/splunk-otel-dotnet/splunk-otel-dotnet:v1.8.0 env: - name: OTEL_DOTNET_AUTO_PLUGINS value: "Splunk.OpenTelemetry.AutoInstrumentation.Plugin,Splunk.OpenTelemetry.AutoInstrumentation" - name: OTEL_RESOURCE_ATTRIBUTES - value: splunk.zc.method=splunk-otel-dotnet:v1.6.0 - - name: SPLUNK_OTEL_AGENT - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.hostIP + value: splunk.zc.method=splunk-otel-dotnet:v1.8.0 # dotnet auto-instrumentation uses http/proto by default, so data must be sent to 4318 instead of 4317. # See: https://github.com/open-telemetry/opentelemetry-operator#opentelemetry-auto-instrumentation-injection - name: OTEL_EXPORTER_OTLP_ENDPOINT - value: http://$(SPLUNK_OTEL_AGENT):4318 + value: http://default-splunk-otel-collector-agent.default.svc.cluster.local:4318 go: image: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-go:v0.10.1-alpha env: - name: OTEL_RESOURCE_ATTRIBUTES value: splunk.zc.method=autoinstrumentation-go:v0.10.1-alpha java: - image: ghcr.io/signalfx/splunk-otel-java/splunk-otel-java:v2.7.0 + image: ghcr.io/signalfx/splunk-otel-java/splunk-otel-java:v2.10.0 env: - name: OTEL_RESOURCE_ATTRIBUTES - value: splunk.zc.method=splunk-otel-java:v2.7.0 - - name: SPLUNK_OTEL_AGENT - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.hostIP + value: splunk.zc.method=splunk-otel-java:v2.10.0 # java auto-instrumentation uses http/proto by default, so data must be sent to 4318 instead of 4317. # See: https://github.com/open-telemetry/opentelemetry-operator#opentelemetry-auto-instrumentation-injection - name: OTEL_EXPORTER_OTLP_ENDPOINT - value: http://$(SPLUNK_OTEL_AGENT):4318 + value: http://default-splunk-otel-collector-agent.default.svc.cluster.local:4318 nginx: image: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-apache-httpd:1.0.4 env: - name: OTEL_RESOURCE_ATTRIBUTES value: splunk.zc.method=autoinstrumentation-apache-httpd:1.0.4 nodejs: - image: ghcr.io/signalfx/splunk-otel-js/splunk-otel-js:v2.11.0 + image: ghcr.io/signalfx/splunk-otel-js/splunk-otel-js:v2.15.0 env: - name: OTEL_RESOURCE_ATTRIBUTES - value: splunk.zc.method=splunk-otel-js:v2.11.0 + value: splunk.zc.method=splunk-otel-js:v2.15.0 python: image: ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.44b0 env: - name: OTEL_RESOURCE_ATTRIBUTES value: splunk.zc.method=autoinstrumentation-python:0.44b0 - - name: SPLUNK_OTEL_AGENT - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.hostIP # python auto-instrumentation uses http/proto by default, so data must be sent to 4318 instead of 4317. # See: https://github.com/open-telemetry/opentelemetry-operator#opentelemetry-auto-instrumentation-injection - name: OTEL_EXPORTER_OTLP_ENDPOINT - value: http://$(SPLUNK_OTEL_AGENT):4318 + value: http://default-splunk-otel-collector-agent.default.svc.cluster.local:4318 diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/role.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/role.yaml index a2fe14c6b..5b6229441 100644 --- a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/role.yaml +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/role.yaml @@ -4,11 +4,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: labels: - helm.sh/chart: operator-0.49.1 + helm.sh/chart: operator-0.71.2 app.kubernetes.io/name: operator - app.kubernetes.io/version: "0.95.0" + app.kubernetes.io/version: "0.110.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default + app.kubernetes.io/component: controller-manager name: default-operator-leader-election namespace: default diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/rolebinding.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/rolebinding.yaml index 961ebf313..341743e5e 100644 --- a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/rolebinding.yaml +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/rolebinding.yaml @@ -4,11 +4,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: - helm.sh/chart: operator-0.49.1 + helm.sh/chart: operator-0.71.2 app.kubernetes.io/name: operator - app.kubernetes.io/version: "0.95.0" + app.kubernetes.io/version: "0.110.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default + app.kubernetes.io/component: controller-manager name: default-operator-leader-election namespace: default diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/service.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/service.yaml index d687bd30a..3e2f9e001 100644 --- a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/service.yaml +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/service.yaml @@ -4,11 +4,12 @@ apiVersion: v1 kind: Service metadata: labels: - helm.sh/chart: operator-0.49.1 + helm.sh/chart: operator-0.71.2 app.kubernetes.io/name: operator - app.kubernetes.io/version: "0.95.0" + app.kubernetes.io/version: "0.110.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default + app.kubernetes.io/component: controller-manager name: default-operator namespace: default @@ -31,11 +32,12 @@ apiVersion: v1 kind: Service metadata: labels: - helm.sh/chart: operator-0.49.1 + helm.sh/chart: operator-0.71.2 app.kubernetes.io/name: operator - app.kubernetes.io/version: "0.95.0" + app.kubernetes.io/version: "0.110.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default + app.kubernetes.io/component: controller-manager name: default-operator-webhook namespace: default diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/serviceaccount.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/serviceaccount.yaml index 697cc60cf..f93689033 100644 --- a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/serviceaccount.yaml +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/serviceaccount.yaml @@ -6,9 +6,10 @@ metadata: name: operator namespace: default labels: - helm.sh/chart: operator-0.49.1 + helm.sh/chart: operator-0.71.2 app.kubernetes.io/name: operator - app.kubernetes.io/version: "0.95.0" + app.kubernetes.io/version: "0.110.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default + app.kubernetes.io/component: controller-manager diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/tests/test-certmanager-connection.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/tests/test-certmanager-connection.yaml index d9d44cc9b..28a5dcd8b 100644 --- a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/tests/test-certmanager-connection.yaml +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/tests/test-certmanager-connection.yaml @@ -6,11 +6,12 @@ metadata: name: "default-operator-cert-manager" namespace: default labels: - helm.sh/chart: operator-0.49.1 + helm.sh/chart: operator-0.71.2 app.kubernetes.io/name: operator - app.kubernetes.io/version: "0.95.0" + app.kubernetes.io/version: "0.110.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default + app.kubernetes.io/component: webhook annotations: "helm.sh/hook": test diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/tests/test-service-connection.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/tests/test-service-connection.yaml index 88397e4ba..bc162f1a4 100644 --- a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/tests/test-service-connection.yaml +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/operator/tests/test-service-connection.yaml @@ -6,11 +6,12 @@ metadata: name: "default-operator-metrics" namespace: default labels: - helm.sh/chart: operator-0.49.1 + helm.sh/chart: operator-0.71.2 app.kubernetes.io/name: operator - app.kubernetes.io/version: "0.95.0" + app.kubernetes.io/version: "0.110.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default + app.kubernetes.io/component: controller-manager annotations: "helm.sh/hook": test @@ -43,11 +44,12 @@ metadata: name: "default-operator-webhook" namespace: default labels: - helm.sh/chart: operator-0.49.1 + helm.sh/chart: operator-0.71.2 app.kubernetes.io/name: operator - app.kubernetes.io/version: "0.95.0" + app.kubernetes.io/version: "0.110.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default + app.kubernetes.io/component: controller-manager annotations: "helm.sh/hook": test diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/secret-splunk.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/secret-splunk.yaml +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/service-agent.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..5f693ffc0 --- /dev/null +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/service-agent.yaml @@ -0,0 +1,59 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: fluentforward + port: 8006 + targetPort: fluentforward + protocol: TCP + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/serviceAccount.yaml b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/enable-operator-and-auto-instrumentation/rendered_manifests/serviceAccount.yaml +++ b/examples/enable-operator-and-auto-instrumentation/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/enable-persistence-queue/rendered_manifests/clusterRole.yaml b/examples/enable-persistence-queue/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/enable-persistence-queue/rendered_manifests/clusterRole.yaml +++ b/examples/enable-persistence-queue/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/enable-persistence-queue/rendered_manifests/clusterRoleBinding.yaml b/examples/enable-persistence-queue/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/enable-persistence-queue/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/enable-persistence-queue/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/enable-persistence-queue/rendered_manifests/configmap-agent.yaml b/examples/enable-persistence-queue/rendered_manifests/configmap-agent.yaml index d72b36ebc..66b408a40 100644 --- a/examples/enable-persistence-queue/rendered_manifests/configmap-agent.yaml +++ b/examples/enable-persistence-queue/rendered_manifests/configmap-agent.yaml @@ -7,17 +7,21 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest..signalfx.com/v3/event splunk_hec/platform_logs: disable_compression: true endpoint: CHANGEME @@ -38,7 +42,7 @@ data: storage: file_storage/persistent_queue source: kubernetes splunk_app_name: splunk-otel-collector - splunk_app_version: 0.105.5 + splunk_app_version: 0.113.0 timeout: 10s tls: insecure_skip_verify: false @@ -62,7 +66,7 @@ data: storage: file_storage/persistent_queue source: kubernetes splunk_app_name: splunk-otel-collector - splunk_app_version: 0.105.5 + splunk_app_version: 0.113.0 timeout: 10s tls: insecure_skip_verify: false @@ -86,7 +90,7 @@ data: storage: file_storage/persistent_queue source: kubernetes splunk_app_name: splunk-otel-collector - splunk_app_version: 0.105.5 + splunk_app_version: 0.113.0 timeout: 10s tls: insecure_skip_verify: false @@ -104,7 +108,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -336,10 +342,15 @@ data: endpoint: 0.0.0.0:8006 hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -361,6 +372,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -387,7 +399,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: smartagent/coredns: @@ -463,6 +475,16 @@ data: - filelog - fluentforward - otlp + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - splunk_hec/platform_metrics @@ -505,4 +527,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/enable-persistence-queue/rendered_manifests/configmap-cluster-receiver.yaml b/examples/enable-persistence-queue/rendered_manifests/configmap-cluster-receiver.yaml index c6a663b85..08ca70f27 100644 --- a/examples/enable-persistence-queue/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/enable-persistence-queue/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -36,7 +36,7 @@ data: queue_size: 1000 source: kubernetes splunk_app_name: splunk-otel-collector - splunk_app_version: 0.105.5 + splunk_app_version: 0.113.0 timeout: 10s tls: insecure_skip_verify: false @@ -136,7 +136,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -166,4 +166,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/enable-persistence-queue/rendered_manifests/daemonset.yaml b/examples/enable-persistence-queue/rendered_manifests/daemonset.yaml index 7b5619dfc..a90d19401 100644 --- a/examples/enable-persistence-queue/rendered_manifests/daemonset.yaml +++ b/examples/enable-persistence-queue/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: f1e5fbd7c1b4136ebec743dd00fa834dcb9d6d78c37eed30cff72e4fc0efe22c + checksum/config: 7e28ba091fe865d050d7328d4484a25a517e3f7937d660ab8aca68b1f7206d21 kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,14 +41,21 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists initContainers: - name: migrate-checkpoint - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent command: ["/migratecheckpoint"] securityContext: @@ -118,7 +125,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent securityContext: runAsUser: 0 @@ -156,24 +163,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_platform_hec_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/enable-persistence-queue/rendered_manifests/deployment-cluster-receiver.yaml b/examples/enable-persistence-queue/rendered_manifests/deployment-cluster-receiver.yaml index fe946f9eb..a39fcbcba 100644 --- a/examples/enable-persistence-queue/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/enable-persistence-queue/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: c238a58b0c8c81f81b1497dcff8d795a79788d35ee239ffafb7ad4f58dc6b09e + checksum/config: 31057c8fa63ede3a8e48bce75a40865619cdd244a7cc16ade3471013072fb603 spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/enable-persistence-queue/rendered_manifests/secret-splunk.yaml b/examples/enable-persistence-queue/rendered_manifests/secret-splunk.yaml index 9351af226..145b7e0b7 100644 --- a/examples/enable-persistence-queue/rendered_manifests/secret-splunk.yaml +++ b/examples/enable-persistence-queue/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/enable-persistence-queue/rendered_manifests/service-agent.yaml b/examples/enable-persistence-queue/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..5f693ffc0 --- /dev/null +++ b/examples/enable-persistence-queue/rendered_manifests/service-agent.yaml @@ -0,0 +1,59 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: fluentforward + port: 8006 + targetPort: fluentforward + protocol: TCP + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/enable-persistence-queue/rendered_manifests/serviceAccount.yaml b/examples/enable-persistence-queue/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/enable-persistence-queue/rendered_manifests/serviceAccount.yaml +++ b/examples/enable-persistence-queue/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/enable-trace-sampling/rendered_manifests/clusterRole.yaml b/examples/enable-trace-sampling/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/enable-trace-sampling/rendered_manifests/clusterRole.yaml +++ b/examples/enable-trace-sampling/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/enable-trace-sampling/rendered_manifests/clusterRoleBinding.yaml b/examples/enable-trace-sampling/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/enable-trace-sampling/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/enable-trace-sampling/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/enable-trace-sampling/rendered_manifests/configmap-agent.yaml b/examples/enable-trace-sampling/rendered_manifests/configmap-agent.yaml index ed4febbf2..b621f10f6 100644 --- a/examples/enable-trace-sampling/rendered_manifests/configmap-agent.yaml +++ b/examples/enable-trace-sampling/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -35,7 +41,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -134,6 +142,7 @@ data: endpoint: 0.0.0.0:14250 thrift_http: endpoint: 0.0.0.0:14268 + nop: null otlp: protocols: grpc: @@ -160,7 +169,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 smartagent/signalfx-forwarder: listenAddress: 0.0.0.0:9080 type: signalfx-forwarder @@ -172,6 +181,16 @@ data: - k8s_observer - zpages pipelines: + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics/agent: exporters: - signalfx @@ -185,7 +204,7 @@ data: - prometheus/agent traces: exporters: - - sapm + - otlphttp processors: - memory_limiter - probabilistic_sampler @@ -200,4 +219,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/enable-trace-sampling/rendered_manifests/daemonset.yaml b/examples/enable-trace-sampling/rendered_manifests/daemonset.yaml index 765dfca79..49e2166f9 100644 --- a/examples/enable-trace-sampling/rendered_manifests/daemonset.yaml +++ b/examples/enable-trace-sampling/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: 97ca7f08dc86937b1d1caebfe51b34e776af7dcb82f56da5e06af8541e9f9811 + checksum/config: 87ed5affb87edf7ca49ed750ba43eeb6ec9c15f1a30ff507640dbdc10a819d19 kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,11 +41,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists containers: - name: otel-collector command: @@ -75,7 +82,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/enable-trace-sampling/rendered_manifests/secret-splunk.yaml b/examples/enable-trace-sampling/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/enable-trace-sampling/rendered_manifests/secret-splunk.yaml +++ b/examples/enable-trace-sampling/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/enable-trace-sampling/rendered_manifests/service-agent.yaml b/examples/enable-trace-sampling/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..ba5000eca --- /dev/null +++ b/examples/enable-trace-sampling/rendered_manifests/service-agent.yaml @@ -0,0 +1,51 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/enable-trace-sampling/rendered_manifests/serviceAccount.yaml b/examples/enable-trace-sampling/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/enable-trace-sampling/rendered_manifests/serviceAccount.yaml +++ b/examples/enable-trace-sampling/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/enabled-pprof-extension/rendered_manifests/clusterRole.yaml b/examples/enabled-pprof-extension/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/enabled-pprof-extension/rendered_manifests/clusterRole.yaml +++ b/examples/enabled-pprof-extension/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/enabled-pprof-extension/rendered_manifests/clusterRoleBinding.yaml b/examples/enabled-pprof-extension/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/enabled-pprof-extension/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/enabled-pprof-extension/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/enabled-pprof-extension/rendered_manifests/configmap-agent.yaml b/examples/enabled-pprof-extension/rendered_manifests/configmap-agent.yaml index 04d762fbf..497310d93 100644 --- a/examples/enabled-pprof-extension/rendered_manifests/configmap-agent.yaml +++ b/examples/enabled-pprof-extension/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -36,7 +42,9 @@ data: pprof: null zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -128,10 +136,15 @@ data: receivers: hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -153,6 +166,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -179,7 +193,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: smartagent/coredns: @@ -242,6 +256,16 @@ data: - zpages - pprof pipelines: + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - signalfx @@ -269,7 +293,7 @@ data: - prometheus/agent traces: exporters: - - sapm + - otlphttp - signalfx processors: - memory_limiter @@ -284,4 +308,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/enabled-pprof-extension/rendered_manifests/configmap-cluster-receiver.yaml b/examples/enabled-pprof-extension/rendered_manifests/configmap-cluster-receiver.yaml index 5267ca292..69e8e29f3 100644 --- a/examples/enabled-pprof-extension/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/enabled-pprof-extension/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -91,7 +91,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -119,4 +119,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/enabled-pprof-extension/rendered_manifests/daemonset.yaml b/examples/enabled-pprof-extension/rendered_manifests/daemonset.yaml index f20787e75..b390b7f0f 100644 --- a/examples/enabled-pprof-extension/rendered_manifests/daemonset.yaml +++ b/examples/enabled-pprof-extension/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: 30b576d2e73592acc6cfe8959fa90d23ea9a7d46bb2d454a82d39b32ad24db4c + checksum/config: 533c39b970ca066d32816a89883d8d71a47056cb8fb9e01c7f4a164e3dddc894 kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,11 +41,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists containers: - name: otel-collector command: @@ -79,7 +86,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB @@ -115,24 +122,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/enabled-pprof-extension/rendered_manifests/deployment-cluster-receiver.yaml b/examples/enabled-pprof-extension/rendered_manifests/deployment-cluster-receiver.yaml index 409e578cd..256cfb004 100644 --- a/examples/enabled-pprof-extension/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/enabled-pprof-extension/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: b3af0a34b5e344447d0ee02fd0477bb8f57cfb689bee60b44dbae23de5e6d7ff + checksum/config: ff5ed57ffd6eaabc34c61f909a53c3ead6cf13539461c69ae410cc955d467a21 spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/enabled-pprof-extension/rendered_manifests/secret-splunk.yaml b/examples/enabled-pprof-extension/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/enabled-pprof-extension/rendered_manifests/secret-splunk.yaml +++ b/examples/enabled-pprof-extension/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/enabled-pprof-extension/rendered_manifests/service-agent.yaml b/examples/enabled-pprof-extension/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..95c885c81 --- /dev/null +++ b/examples/enabled-pprof-extension/rendered_manifests/service-agent.yaml @@ -0,0 +1,55 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/enabled-pprof-extension/rendered_manifests/serviceAccount.yaml b/examples/enabled-pprof-extension/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/enabled-pprof-extension/rendered_manifests/serviceAccount.yaml +++ b/examples/enabled-pprof-extension/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/clusterRole.yaml b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/clusterRole.yaml +++ b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/clusterRoleBinding.yaml b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/configmap-agent.yaml b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/configmap-agent.yaml index f284a73a3..43a565a8d 100644 --- a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/configmap-agent.yaml +++ b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -41,7 +47,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -145,10 +153,15 @@ data: endpoint: 0.0.0.0:8006 hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -170,6 +183,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -196,7 +210,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: smartagent/coredns: @@ -273,6 +287,16 @@ data: receivers: - fluentforward - otlp + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - signalfx @@ -300,7 +324,7 @@ data: - prometheus/agent traces: exporters: - - sapm + - otlphttp - signalfx processors: - memory_limiter @@ -315,4 +339,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/configmap-cluster-receiver.yaml b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/configmap-cluster-receiver.yaml index 5267ca292..69e8e29f3 100644 --- a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -91,7 +91,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -119,4 +119,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/configmap-fluentd-json.yaml b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/configmap-fluentd-json.yaml index 059fe7d9a..0f66420cf 100644 --- a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/configmap-fluentd-json.yaml +++ b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/configmap-fluentd-json.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: diff --git a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/configmap-fluentd.yaml b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/configmap-fluentd.yaml index 58355c9cc..8f41037c7 100644 --- a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/configmap-fluentd.yaml +++ b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/configmap-fluentd.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: diff --git a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/daemonset.yaml b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/daemonset.yaml index 0094bba0a..3810c3ea1 100644 --- a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/daemonset.yaml +++ b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm engine: fluentd @@ -33,7 +33,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: 8603a27ced4e339d1370ba08e61e0e9f08cefcf10dc8fd9f3f71b59c312cee73 + checksum/config: 294cef8c6d6f4ab7e9c2c92b02f377e8ede701cc0b7942fde18617b1c615684e kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -42,11 +42,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists initContainers: - name: prepare-fluentd-config image: splunk/fluentd-hec:1.3.3 @@ -152,7 +159,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB @@ -188,24 +195,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/deployment-cluster-receiver.yaml b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/deployment-cluster-receiver.yaml index 409e578cd..256cfb004 100644 --- a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: b3af0a34b5e344447d0ee02fd0477bb8f57cfb689bee60b44dbae23de5e6d7ff + checksum/config: ff5ed57ffd6eaabc34c61f909a53c3ead6cf13539461c69ae410cc955d467a21 spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/secret-splunk.yaml b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/secret-splunk.yaml +++ b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/service-agent.yaml b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..5f693ffc0 --- /dev/null +++ b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/service-agent.yaml @@ -0,0 +1,59 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: fluentforward + port: 8006 + targetPort: fluentforward + protocol: TCP + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/serviceAccount.yaml b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/serviceAccount.yaml +++ b/examples/fluentd-multiline-logs-java-stack-traces/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/fluentd-refresh-interval/rendered_manifests/clusterRole.yaml b/examples/fluentd-refresh-interval/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/fluentd-refresh-interval/rendered_manifests/clusterRole.yaml +++ b/examples/fluentd-refresh-interval/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/fluentd-refresh-interval/rendered_manifests/clusterRoleBinding.yaml b/examples/fluentd-refresh-interval/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/fluentd-refresh-interval/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/fluentd-refresh-interval/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/fluentd-refresh-interval/rendered_manifests/configmap-agent.yaml b/examples/fluentd-refresh-interval/rendered_manifests/configmap-agent.yaml index f284a73a3..43a565a8d 100644 --- a/examples/fluentd-refresh-interval/rendered_manifests/configmap-agent.yaml +++ b/examples/fluentd-refresh-interval/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -41,7 +47,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -145,10 +153,15 @@ data: endpoint: 0.0.0.0:8006 hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -170,6 +183,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -196,7 +210,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: smartagent/coredns: @@ -273,6 +287,16 @@ data: receivers: - fluentforward - otlp + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - signalfx @@ -300,7 +324,7 @@ data: - prometheus/agent traces: exporters: - - sapm + - otlphttp - signalfx processors: - memory_limiter @@ -315,4 +339,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/fluentd-refresh-interval/rendered_manifests/configmap-cluster-receiver.yaml b/examples/fluentd-refresh-interval/rendered_manifests/configmap-cluster-receiver.yaml index 5267ca292..69e8e29f3 100644 --- a/examples/fluentd-refresh-interval/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/fluentd-refresh-interval/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -91,7 +91,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -119,4 +119,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/fluentd-refresh-interval/rendered_manifests/configmap-fluentd-json.yaml b/examples/fluentd-refresh-interval/rendered_manifests/configmap-fluentd-json.yaml index 059fe7d9a..0f66420cf 100644 --- a/examples/fluentd-refresh-interval/rendered_manifests/configmap-fluentd-json.yaml +++ b/examples/fluentd-refresh-interval/rendered_manifests/configmap-fluentd-json.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: diff --git a/examples/fluentd-refresh-interval/rendered_manifests/configmap-fluentd.yaml b/examples/fluentd-refresh-interval/rendered_manifests/configmap-fluentd.yaml index 537dfa13b..adbcc3e93 100644 --- a/examples/fluentd-refresh-interval/rendered_manifests/configmap-fluentd.yaml +++ b/examples/fluentd-refresh-interval/rendered_manifests/configmap-fluentd.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: diff --git a/examples/fluentd-refresh-interval/rendered_manifests/daemonset.yaml b/examples/fluentd-refresh-interval/rendered_manifests/daemonset.yaml index 62c1af2ce..bb1942a30 100644 --- a/examples/fluentd-refresh-interval/rendered_manifests/daemonset.yaml +++ b/examples/fluentd-refresh-interval/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm engine: fluentd @@ -33,7 +33,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: 98cec2956dd693db8fda00d979e51e7ec96e4a3656c266527bac8963c88ec94d + checksum/config: cffb91393d44138edc979f79c33175a975444fb3a7ff92240498d76bf23da4e6 kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -42,11 +42,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists initContainers: - name: prepare-fluentd-config image: splunk/fluentd-hec:1.3.3 @@ -152,7 +159,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB @@ -188,24 +195,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/fluentd-refresh-interval/rendered_manifests/deployment-cluster-receiver.yaml b/examples/fluentd-refresh-interval/rendered_manifests/deployment-cluster-receiver.yaml index 409e578cd..256cfb004 100644 --- a/examples/fluentd-refresh-interval/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/fluentd-refresh-interval/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: b3af0a34b5e344447d0ee02fd0477bb8f57cfb689bee60b44dbae23de5e6d7ff + checksum/config: ff5ed57ffd6eaabc34c61f909a53c3ead6cf13539461c69ae410cc955d467a21 spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/fluentd-refresh-interval/rendered_manifests/secret-splunk.yaml b/examples/fluentd-refresh-interval/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/fluentd-refresh-interval/rendered_manifests/secret-splunk.yaml +++ b/examples/fluentd-refresh-interval/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/fluentd-refresh-interval/rendered_manifests/service-agent.yaml b/examples/fluentd-refresh-interval/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..5f693ffc0 --- /dev/null +++ b/examples/fluentd-refresh-interval/rendered_manifests/service-agent.yaml @@ -0,0 +1,59 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: fluentforward + port: 8006 + targetPort: fluentforward + protocol: TCP + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/fluentd-refresh-interval/rendered_manifests/serviceAccount.yaml b/examples/fluentd-refresh-interval/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/fluentd-refresh-interval/rendered_manifests/serviceAccount.yaml +++ b/examples/fluentd-refresh-interval/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/kubernetes-windows-nodes/rendered_manifests/clusterRole.yaml b/examples/kubernetes-windows-nodes/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/kubernetes-windows-nodes/rendered_manifests/clusterRole.yaml +++ b/examples/kubernetes-windows-nodes/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/kubernetes-windows-nodes/rendered_manifests/clusterRoleBinding.yaml b/examples/kubernetes-windows-nodes/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/kubernetes-windows-nodes/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/kubernetes-windows-nodes/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/kubernetes-windows-nodes/rendered_manifests/configmap-agent.yaml b/examples/kubernetes-windows-nodes/rendered_manifests/configmap-agent.yaml index bab9e2227..99241178d 100644 --- a/examples/kubernetes-windows-nodes/rendered_manifests/configmap-agent.yaml +++ b/examples/kubernetes-windows-nodes/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -43,7 +49,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -253,7 +261,11 @@ data: scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -275,6 +287,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -301,7 +314,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: smartagent/coredns: @@ -379,6 +392,16 @@ data: - filelog - fluentforward - otlp + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - signalfx @@ -407,7 +430,7 @@ data: - prometheus/agent traces: exporters: - - sapm + - otlphttp - signalfx processors: - memory_limiter @@ -422,4 +445,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/kubernetes-windows-nodes/rendered_manifests/configmap-cluster-receiver.yaml b/examples/kubernetes-windows-nodes/rendered_manifests/configmap-cluster-receiver.yaml index 5267ca292..69e8e29f3 100644 --- a/examples/kubernetes-windows-nodes/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/kubernetes-windows-nodes/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -91,7 +91,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -119,4 +119,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/kubernetes-windows-nodes/rendered_manifests/daemonset.yaml b/examples/kubernetes-windows-nodes/rendered_manifests/daemonset.yaml index e22f902e2..750dcba98 100644 --- a/examples/kubernetes-windows-nodes/rendered_manifests/daemonset.yaml +++ b/examples/kubernetes-windows-nodes/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: f4ff6c3833bc151410474578db8459674d03f3af89ed142d63488bd34ad96890 + checksum/config: bf49f549e45d83598b52121660f4cd179d3f97f0f71dab39bf412367b621d242 kubectl.kubernetes.io/default-container: otel-collector spec: dnsPolicy: ClusterFirstWithHostNet @@ -40,11 +40,18 @@ spec: nodeSelector: kubernetes.io/os: windows tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists containers: - name: otel-collector command: @@ -84,7 +91,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector-windows:0.105.0 + image: quay.io/signalfx/splunk-otel-collector-windows:0.113.0 imagePullPolicy: IfNotPresent securityContext: windowsOptions: @@ -123,19 +130,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: C:\hostfs\proc - - name: HOST_SYS - value: C:\hostfs\sys - - name: HOST_ETC - value: C:\hostfs\etc - - name: HOST_VAR - value: C:\hostfs\var - - name: HOST_RUN - value: C:\hostfs\run - - name: HOST_DEV - value: C:\hostfs\dev readinessProbe: initialDelaySeconds: 60 diff --git a/examples/kubernetes-windows-nodes/rendered_manifests/deployment-cluster-receiver.yaml b/examples/kubernetes-windows-nodes/rendered_manifests/deployment-cluster-receiver.yaml index 472c3e435..f0c57db56 100644 --- a/examples/kubernetes-windows-nodes/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/kubernetes-windows-nodes/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: b3af0a34b5e344447d0ee02fd0477bb8f57cfb689bee60b44dbae23de5e6d7ff + checksum/config: ff5ed57ffd6eaabc34c61f909a53c3ead6cf13539461c69ae410cc955d467a21 spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -43,7 +43,7 @@ spec: - -command - .\otelcol.exe - --config=C:\\conf\relay.yaml - image: quay.io/signalfx/splunk-otel-collector-windows:0.105.0 + image: quay.io/signalfx/splunk-otel-collector-windows:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/kubernetes-windows-nodes/rendered_manifests/secret-splunk.yaml b/examples/kubernetes-windows-nodes/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/kubernetes-windows-nodes/rendered_manifests/secret-splunk.yaml +++ b/examples/kubernetes-windows-nodes/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/kubernetes-windows-nodes/rendered_manifests/service-agent.yaml b/examples/kubernetes-windows-nodes/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..5f693ffc0 --- /dev/null +++ b/examples/kubernetes-windows-nodes/rendered_manifests/service-agent.yaml @@ -0,0 +1,59 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: fluentforward + port: 8006 + targetPort: fluentforward + protocol: TCP + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/kubernetes-windows-nodes/rendered_manifests/serviceAccount.yaml b/examples/kubernetes-windows-nodes/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/kubernetes-windows-nodes/rendered_manifests/serviceAccount.yaml +++ b/examples/kubernetes-windows-nodes/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/multi-metrics/rendered_manifests/clusterRole.yaml b/examples/multi-metrics/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/multi-metrics/rendered_manifests/clusterRole.yaml +++ b/examples/multi-metrics/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/multi-metrics/rendered_manifests/clusterRoleBinding.yaml b/examples/multi-metrics/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/multi-metrics/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/multi-metrics/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/multi-metrics/rendered_manifests/configmap-agent.yaml b/examples/multi-metrics/rendered_manifests/configmap-agent.yaml index a61b04de0..3fc8c55f8 100644 --- a/examples/multi-metrics/rendered_manifests/configmap-agent.yaml +++ b/examples/multi-metrics/rendered_manifests/configmap-agent.yaml @@ -7,17 +7,21 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest..signalfx.com/v3/event splunk_hec/platform_logs: disable_compression: true endpoint: CHANGEME @@ -37,7 +41,7 @@ data: queue_size: 1000 source: kubernetes splunk_app_name: splunk-otel-collector - splunk_app_version: 0.105.5 + splunk_app_version: 0.113.0 timeout: 10s tls: insecure_skip_verify: false @@ -60,7 +64,7 @@ data: queue_size: 1000 source: kubernetes splunk_app_name: splunk-otel-collector - splunk_app_version: 0.105.5 + splunk_app_version: 0.113.0 timeout: 10s tls: insecure_skip_verify: false @@ -76,7 +80,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -308,10 +314,15 @@ data: endpoint: 0.0.0.0:8006 hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -327,6 +338,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -353,7 +365,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: smartagent/coredns: @@ -426,6 +438,16 @@ data: - filelog - fluentforward - otlp + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - splunk_hec/platform_metrics @@ -455,4 +477,9 @@ data: - prometheus/agent telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/multi-metrics/rendered_manifests/configmap-cluster-receiver.yaml b/examples/multi-metrics/rendered_manifests/configmap-cluster-receiver.yaml index c6a663b85..08ca70f27 100644 --- a/examples/multi-metrics/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/multi-metrics/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -36,7 +36,7 @@ data: queue_size: 1000 source: kubernetes splunk_app_name: splunk-otel-collector - splunk_app_version: 0.105.5 + splunk_app_version: 0.113.0 timeout: 10s tls: insecure_skip_verify: false @@ -136,7 +136,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -166,4 +166,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/multi-metrics/rendered_manifests/daemonset.yaml b/examples/multi-metrics/rendered_manifests/daemonset.yaml index 59404ee95..abe7bd829 100644 --- a/examples/multi-metrics/rendered_manifests/daemonset.yaml +++ b/examples/multi-metrics/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: 96af0d7f6f9dba3b552802d25ca1356f443c35bb3a34492873f004325a1c9fb4 + checksum/config: 5b4ddaf2cc99de360060d11ef3f956eaf152f249d2aeb62cf477602745b4cea1 kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,14 +41,21 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists initContainers: - name: migrate-checkpoint - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent command: ["/migratecheckpoint"] securityContext: @@ -102,7 +109,7 @@ spec: containerPort: 9943 hostPort: 9943 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent securityContext: runAsUser: 0 @@ -140,24 +147,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_platform_hec_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/multi-metrics/rendered_manifests/deployment-cluster-receiver.yaml b/examples/multi-metrics/rendered_manifests/deployment-cluster-receiver.yaml index fe946f9eb..a39fcbcba 100644 --- a/examples/multi-metrics/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/multi-metrics/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: c238a58b0c8c81f81b1497dcff8d795a79788d35ee239ffafb7ad4f58dc6b09e + checksum/config: 31057c8fa63ede3a8e48bce75a40865619cdd244a7cc16ade3471013072fb603 spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/multi-metrics/rendered_manifests/secret-splunk.yaml b/examples/multi-metrics/rendered_manifests/secret-splunk.yaml index 9351af226..145b7e0b7 100644 --- a/examples/multi-metrics/rendered_manifests/secret-splunk.yaml +++ b/examples/multi-metrics/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/multi-metrics/rendered_manifests/service-agent.yaml b/examples/multi-metrics/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..6aff55b3b --- /dev/null +++ b/examples/multi-metrics/rendered_manifests/service-agent.yaml @@ -0,0 +1,43 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: fluentforward + port: 8006 + targetPort: fluentforward + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/multi-metrics/rendered_manifests/serviceAccount.yaml b/examples/multi-metrics/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/multi-metrics/rendered_manifests/serviceAccount.yaml +++ b/examples/multi-metrics/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/only-logs-fluentd/rendered_manifests/clusterRole.yaml b/examples/only-logs-fluentd/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/only-logs-fluentd/rendered_manifests/clusterRole.yaml +++ b/examples/only-logs-fluentd/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/only-logs-fluentd/rendered_manifests/clusterRoleBinding.yaml b/examples/only-logs-fluentd/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/only-logs-fluentd/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/only-logs-fluentd/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/only-logs-fluentd/rendered_manifests/configmap-agent.yaml b/examples/only-logs-fluentd/rendered_manifests/configmap-agent.yaml index ce4cab8e2..f9d4e24aa 100644 --- a/examples/only-logs-fluentd/rendered_manifests/configmap-agent.yaml +++ b/examples/only-logs-fluentd/rendered_manifests/configmap-agent.yaml @@ -7,17 +7,21 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -38,7 +42,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -140,6 +146,7 @@ data: receivers: fluentforward: endpoint: 0.0.0.0:8006 + nop: null otlp: protocols: grpc: @@ -166,7 +173,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -188,6 +195,16 @@ data: receivers: - fluentforward - otlp + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics/agent: exporters: - signalfx @@ -201,4 +218,9 @@ data: - prometheus/agent telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/only-logs-fluentd/rendered_manifests/configmap-fluentd-json.yaml b/examples/only-logs-fluentd/rendered_manifests/configmap-fluentd-json.yaml index 059fe7d9a..0f66420cf 100644 --- a/examples/only-logs-fluentd/rendered_manifests/configmap-fluentd-json.yaml +++ b/examples/only-logs-fluentd/rendered_manifests/configmap-fluentd-json.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: diff --git a/examples/only-logs-fluentd/rendered_manifests/configmap-fluentd.yaml b/examples/only-logs-fluentd/rendered_manifests/configmap-fluentd.yaml index 21477d4ff..d5bf56d7f 100644 --- a/examples/only-logs-fluentd/rendered_manifests/configmap-fluentd.yaml +++ b/examples/only-logs-fluentd/rendered_manifests/configmap-fluentd.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: diff --git a/examples/only-logs-fluentd/rendered_manifests/daemonset.yaml b/examples/only-logs-fluentd/rendered_manifests/daemonset.yaml index e2d5195a2..c26559d2a 100644 --- a/examples/only-logs-fluentd/rendered_manifests/daemonset.yaml +++ b/examples/only-logs-fluentd/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm engine: fluentd @@ -33,7 +33,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: de41e753261efcfe4413e289efb9c6c343cdda44d0b3bdc85aeeb9007b762e9c + checksum/config: bdc593fc6bb963542e70636338e108d880db0730ef8e3c356df5925bcebb2af6 kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -42,11 +42,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists initContainers: - name: prepare-fluentd-config image: splunk/fluentd-hec:1.3.3 @@ -132,7 +139,7 @@ spec: - name: otlp-http containerPort: 4318 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/only-logs-fluentd/rendered_manifests/secret-splunk.yaml b/examples/only-logs-fluentd/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/only-logs-fluentd/rendered_manifests/secret-splunk.yaml +++ b/examples/only-logs-fluentd/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/only-logs-fluentd/rendered_manifests/service-agent.yaml b/examples/only-logs-fluentd/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..edad6bf22 --- /dev/null +++ b/examples/only-logs-fluentd/rendered_manifests/service-agent.yaml @@ -0,0 +1,39 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: fluentforward + port: 8006 + targetPort: fluentforward + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/only-logs-fluentd/rendered_manifests/serviceAccount.yaml b/examples/only-logs-fluentd/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/only-logs-fluentd/rendered_manifests/serviceAccount.yaml +++ b/examples/only-logs-fluentd/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/only-logs-otel/rendered_manifests/clusterRole.yaml b/examples/only-logs-otel/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/only-logs-otel/rendered_manifests/clusterRole.yaml +++ b/examples/only-logs-otel/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/only-logs-otel/rendered_manifests/clusterRoleBinding.yaml b/examples/only-logs-otel/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/only-logs-otel/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/only-logs-otel/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/only-logs-otel/rendered_manifests/configmap-agent.yaml b/examples/only-logs-otel/rendered_manifests/configmap-agent.yaml index 23c6d5c08..a16075a82 100644 --- a/examples/only-logs-otel/rendered_manifests/configmap-agent.yaml +++ b/examples/only-logs-otel/rendered_manifests/configmap-agent.yaml @@ -7,17 +7,21 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -40,7 +44,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -275,6 +281,7 @@ data: storage: file_storage fluentforward: endpoint: 0.0.0.0:8006 + nop: null otlp: protocols: grpc: @@ -301,7 +308,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - file_storage @@ -324,6 +331,16 @@ data: - filelog - fluentforward - otlp + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics/agent: exporters: - signalfx @@ -337,4 +354,9 @@ data: - prometheus/agent telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/only-logs-otel/rendered_manifests/daemonset.yaml b/examples/only-logs-otel/rendered_manifests/daemonset.yaml index 73bef61cc..ddc1d1ead 100644 --- a/examples/only-logs-otel/rendered_manifests/daemonset.yaml +++ b/examples/only-logs-otel/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: dde316a0ca68c84475b76f867a9469720ba8eb341e6a8654504572f5cf7c7111 + checksum/config: 7ae5eac37faee22fb3d54332696ec32e0ee7c02cf20a5a4e7665d26bfbfc5a2c kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,14 +41,21 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists initContainers: - name: migrate-checkpoint - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent command: ["/migratecheckpoint"] securityContext: @@ -98,7 +105,7 @@ spec: - name: otlp-http containerPort: 4318 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent securityContext: runAsUser: 0 diff --git a/examples/only-logs-otel/rendered_manifests/secret-splunk.yaml b/examples/only-logs-otel/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/only-logs-otel/rendered_manifests/secret-splunk.yaml +++ b/examples/only-logs-otel/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/only-logs-otel/rendered_manifests/service-agent.yaml b/examples/only-logs-otel/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..edad6bf22 --- /dev/null +++ b/examples/only-logs-otel/rendered_manifests/service-agent.yaml @@ -0,0 +1,39 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: fluentforward + port: 8006 + targetPort: fluentforward + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/only-logs-otel/rendered_manifests/serviceAccount.yaml b/examples/only-logs-otel/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/only-logs-otel/rendered_manifests/serviceAccount.yaml +++ b/examples/only-logs-otel/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/only-logs-with-extra-file-logs/rendered_manifests/clusterRole.yaml b/examples/only-logs-with-extra-file-logs/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/only-logs-with-extra-file-logs/rendered_manifests/clusterRole.yaml +++ b/examples/only-logs-with-extra-file-logs/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/only-logs-with-extra-file-logs/rendered_manifests/clusterRoleBinding.yaml b/examples/only-logs-with-extra-file-logs/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/only-logs-with-extra-file-logs/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/only-logs-with-extra-file-logs/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/only-logs-with-extra-file-logs/rendered_manifests/configmap-agent.yaml b/examples/only-logs-with-extra-file-logs/rendered_manifests/configmap-agent.yaml index 5bedf9098..1c4de8c50 100644 --- a/examples/only-logs-with-extra-file-logs/rendered_manifests/configmap-agent.yaml +++ b/examples/only-logs-with-extra-file-logs/rendered_manifests/configmap-agent.yaml @@ -7,17 +7,21 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -40,7 +44,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -268,6 +274,7 @@ data: storage: file_storage fluentforward: endpoint: 0.0.0.0:8006 + nop: null otlp: protocols: grpc: @@ -294,7 +301,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - file_storage @@ -317,6 +324,16 @@ data: - filelog - fluentforward - otlp + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop logs/host: exporters: - splunk_hec/o11y @@ -340,4 +357,9 @@ data: - prometheus/agent telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/only-logs-with-extra-file-logs/rendered_manifests/daemonset.yaml b/examples/only-logs-with-extra-file-logs/rendered_manifests/daemonset.yaml index 1b32c45d6..0731bbf28 100644 --- a/examples/only-logs-with-extra-file-logs/rendered_manifests/daemonset.yaml +++ b/examples/only-logs-with-extra-file-logs/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: 113e79694c260f081337b5149e20291fbbc42f157e80fdf8683b5bf886dff197 + checksum/config: c32110f203c813ed884597ecfa98f6337945e80cf154d1a66dd84232c74d1508 kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,14 +41,21 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists initContainers: - name: migrate-checkpoint - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent command: ["/migratecheckpoint"] securityContext: @@ -98,7 +105,7 @@ spec: - name: otlp-http containerPort: 4318 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent securityContext: runAsUser: 0 diff --git a/examples/only-logs-with-extra-file-logs/rendered_manifests/secret-splunk.yaml b/examples/only-logs-with-extra-file-logs/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/only-logs-with-extra-file-logs/rendered_manifests/secret-splunk.yaml +++ b/examples/only-logs-with-extra-file-logs/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/only-logs-with-extra-file-logs/rendered_manifests/service-agent.yaml b/examples/only-logs-with-extra-file-logs/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..edad6bf22 --- /dev/null +++ b/examples/only-logs-with-extra-file-logs/rendered_manifests/service-agent.yaml @@ -0,0 +1,39 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: fluentforward + port: 8006 + targetPort: fluentforward + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/only-logs-with-extra-file-logs/rendered_manifests/serviceAccount.yaml b/examples/only-logs-with-extra-file-logs/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/only-logs-with-extra-file-logs/rendered_manifests/serviceAccount.yaml +++ b/examples/only-logs-with-extra-file-logs/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/only-metrics-platform/rendered_manifests/clusterRole.yaml b/examples/only-metrics-platform/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/only-metrics-platform/rendered_manifests/clusterRole.yaml +++ b/examples/only-metrics-platform/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/only-metrics-platform/rendered_manifests/clusterRoleBinding.yaml b/examples/only-metrics-platform/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/only-metrics-platform/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/only-metrics-platform/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/only-metrics-platform/rendered_manifests/configmap-agent.yaml b/examples/only-metrics-platform/rendered_manifests/configmap-agent.yaml index 385dda26a..18aca72b3 100644 --- a/examples/only-metrics-platform/rendered_manifests/configmap-agent.yaml +++ b/examples/only-metrics-platform/rendered_manifests/configmap-agent.yaml @@ -7,17 +7,21 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest..signalfx.com/v3/event splunk_hec/platform_metrics: disable_compression: true endpoint: CHANGEME @@ -36,7 +40,7 @@ data: queue_size: 1000 source: kubernetes splunk_app_name: splunk-otel-collector - splunk_app_version: 0.105.5 + splunk_app_version: 0.113.0 timeout: 10s tls: insecure_skip_verify: false @@ -49,7 +53,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -171,10 +177,15 @@ data: receivers: hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -190,6 +201,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -216,7 +228,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: smartagent/coredns: @@ -273,6 +285,16 @@ data: - k8s_observer - zpages pipelines: + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - splunk_hec/platform_metrics @@ -302,4 +324,9 @@ data: - prometheus/agent telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/only-metrics-platform/rendered_manifests/configmap-cluster-receiver.yaml b/examples/only-metrics-platform/rendered_manifests/configmap-cluster-receiver.yaml index c6a663b85..08ca70f27 100644 --- a/examples/only-metrics-platform/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/only-metrics-platform/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -36,7 +36,7 @@ data: queue_size: 1000 source: kubernetes splunk_app_name: splunk-otel-collector - splunk_app_version: 0.105.5 + splunk_app_version: 0.113.0 timeout: 10s tls: insecure_skip_verify: false @@ -136,7 +136,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -166,4 +166,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/only-metrics-platform/rendered_manifests/daemonset.yaml b/examples/only-metrics-platform/rendered_manifests/daemonset.yaml index 0efe599b3..386aeb854 100644 --- a/examples/only-metrics-platform/rendered_manifests/daemonset.yaml +++ b/examples/only-metrics-platform/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: ca910388e71a4073e6934a02abfb92b7c2b25a1be94dd1dc902759dd1f6d0aeb + checksum/config: 1d48c12fc0681c6793c2c6e581e5199248d33a81165761431e28cc813cecd5c5 kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,11 +41,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists containers: - name: otel-collector command: @@ -63,7 +70,7 @@ spec: containerPort: 9943 hostPort: 9943 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB @@ -99,24 +106,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_platform_hec_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/only-metrics-platform/rendered_manifests/deployment-cluster-receiver.yaml b/examples/only-metrics-platform/rendered_manifests/deployment-cluster-receiver.yaml index fe946f9eb..a39fcbcba 100644 --- a/examples/only-metrics-platform/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/only-metrics-platform/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: c238a58b0c8c81f81b1497dcff8d795a79788d35ee239ffafb7ad4f58dc6b09e + checksum/config: 31057c8fa63ede3a8e48bce75a40865619cdd244a7cc16ade3471013072fb603 spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/only-metrics-platform/rendered_manifests/secret-splunk.yaml b/examples/only-metrics-platform/rendered_manifests/secret-splunk.yaml index 9351af226..145b7e0b7 100644 --- a/examples/only-metrics-platform/rendered_manifests/secret-splunk.yaml +++ b/examples/only-metrics-platform/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/only-metrics-platform/rendered_manifests/service-agent.yaml b/examples/only-metrics-platform/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..03dc55f65 --- /dev/null +++ b/examples/only-metrics-platform/rendered_manifests/service-agent.yaml @@ -0,0 +1,39 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/only-metrics-platform/rendered_manifests/serviceAccount.yaml b/examples/only-metrics-platform/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/only-metrics-platform/rendered_manifests/serviceAccount.yaml +++ b/examples/only-metrics-platform/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/only-metrics/rendered_manifests/clusterRole.yaml b/examples/only-metrics/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/only-metrics/rendered_manifests/clusterRole.yaml +++ b/examples/only-metrics/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/only-metrics/rendered_manifests/clusterRoleBinding.yaml b/examples/only-metrics/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/only-metrics/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/only-metrics/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/only-metrics/rendered_manifests/configmap-agent.yaml b/examples/only-metrics/rendered_manifests/configmap-agent.yaml index a0736ed85..b94daee3a 100644 --- a/examples/only-metrics/rendered_manifests/configmap-agent.yaml +++ b/examples/only-metrics/rendered_manifests/configmap-agent.yaml @@ -7,17 +7,21 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -32,7 +36,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -124,10 +130,15 @@ data: receivers: hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -143,6 +154,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -169,7 +181,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: smartagent/coredns: @@ -226,6 +238,16 @@ data: - k8s_observer - zpages pipelines: + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - signalfx @@ -253,4 +275,9 @@ data: - prometheus/agent telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/only-metrics/rendered_manifests/configmap-cluster-receiver.yaml b/examples/only-metrics/rendered_manifests/configmap-cluster-receiver.yaml index 5267ca292..69e8e29f3 100644 --- a/examples/only-metrics/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/only-metrics/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -91,7 +91,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -119,4 +119,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/only-metrics/rendered_manifests/daemonset.yaml b/examples/only-metrics/rendered_manifests/daemonset.yaml index d68b4cee2..b834592e0 100644 --- a/examples/only-metrics/rendered_manifests/daemonset.yaml +++ b/examples/only-metrics/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: ff74075250fdf750fd6d9138f7ca47065de2bd5edcc9c25a98a23319195b745a + checksum/config: 25993deedb3b21f8c07058cba61cb0cf7bde34473b48c7e3d2dca615b5eed460 kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,11 +41,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists containers: - name: otel-collector command: @@ -63,7 +70,7 @@ spec: containerPort: 9943 hostPort: 9943 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB @@ -99,24 +106,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/only-metrics/rendered_manifests/deployment-cluster-receiver.yaml b/examples/only-metrics/rendered_manifests/deployment-cluster-receiver.yaml index 409e578cd..256cfb004 100644 --- a/examples/only-metrics/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/only-metrics/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: b3af0a34b5e344447d0ee02fd0477bb8f57cfb689bee60b44dbae23de5e6d7ff + checksum/config: ff5ed57ffd6eaabc34c61f909a53c3ead6cf13539461c69ae410cc955d467a21 spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/only-metrics/rendered_manifests/secret-splunk.yaml b/examples/only-metrics/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/only-metrics/rendered_manifests/secret-splunk.yaml +++ b/examples/only-metrics/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/only-metrics/rendered_manifests/service-agent.yaml b/examples/only-metrics/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..03dc55f65 --- /dev/null +++ b/examples/only-metrics/rendered_manifests/service-agent.yaml @@ -0,0 +1,39 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/only-metrics/rendered_manifests/serviceAccount.yaml b/examples/only-metrics/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/only-metrics/rendered_manifests/serviceAccount.yaml +++ b/examples/only-metrics/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/only-traces/rendered_manifests/clusterRole.yaml b/examples/only-traces/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/only-traces/rendered_manifests/clusterRole.yaml +++ b/examples/only-traces/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/only-traces/rendered_manifests/clusterRoleBinding.yaml b/examples/only-traces/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/only-traces/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/only-traces/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/only-traces/rendered_manifests/configmap-agent.yaml b/examples/only-traces/rendered_manifests/configmap-agent.yaml index aed7d7238..42f943d05 100644 --- a/examples/only-traces/rendered_manifests/configmap-agent.yaml +++ b/examples/only-traces/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -35,7 +41,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -131,6 +139,7 @@ data: endpoint: 0.0.0.0:14250 thrift_http: endpoint: 0.0.0.0:14268 + nop: null otlp: protocols: grpc: @@ -157,7 +166,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 smartagent/signalfx-forwarder: listenAddress: 0.0.0.0:9080 type: signalfx-forwarder @@ -169,6 +178,16 @@ data: - k8s_observer - zpages pipelines: + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics/agent: exporters: - signalfx @@ -182,7 +201,7 @@ data: - prometheus/agent traces: exporters: - - sapm + - otlphttp processors: - memory_limiter - k8sattributes @@ -196,4 +215,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/only-traces/rendered_manifests/daemonset.yaml b/examples/only-traces/rendered_manifests/daemonset.yaml index 3e11a4e8b..fb70e0ba2 100644 --- a/examples/only-traces/rendered_manifests/daemonset.yaml +++ b/examples/only-traces/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: f48bac0e02bcdd298e767883ee1cc5c3a35fb758f2c1e9a5e2cb39bcca7ea48f + checksum/config: 2c176facdef5a9c1b642a71cc7608bd30d1ff2bd95ee138a8b2ef96a72c385bd kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,11 +41,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists containers: - name: otel-collector command: @@ -75,7 +82,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/only-traces/rendered_manifests/secret-splunk.yaml b/examples/only-traces/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/only-traces/rendered_manifests/secret-splunk.yaml +++ b/examples/only-traces/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/only-traces/rendered_manifests/service-agent.yaml b/examples/only-traces/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..ba5000eca --- /dev/null +++ b/examples/only-traces/rendered_manifests/service-agent.yaml @@ -0,0 +1,51 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/only-traces/rendered_manifests/serviceAccount.yaml b/examples/only-traces/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/only-traces/rendered_manifests/serviceAccount.yaml +++ b/examples/only-traces/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/route-data-through-gateway-deployed-separately/rendered_manifests/clusterRole.yaml b/examples/route-data-through-gateway-deployed-separately/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/route-data-through-gateway-deployed-separately/rendered_manifests/clusterRole.yaml +++ b/examples/route-data-through-gateway-deployed-separately/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/route-data-through-gateway-deployed-separately/rendered_manifests/clusterRoleBinding.yaml b/examples/route-data-through-gateway-deployed-separately/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/route-data-through-gateway-deployed-separately/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/route-data-through-gateway-deployed-separately/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/route-data-through-gateway-deployed-separately/rendered_manifests/configmap-agent.yaml b/examples/route-data-through-gateway-deployed-separately/rendered_manifests/configmap-agent.yaml index f47a571b0..d4c86481d 100644 --- a/examples/route-data-through-gateway-deployed-separately/rendered_manifests/configmap-agent.yaml +++ b/examples/route-data-through-gateway-deployed-separately/rendered_manifests/configmap-agent.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -22,9 +22,15 @@ data: endpoint: :4317 tls: insecure: true - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: http://:6060 @@ -39,7 +45,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -131,10 +139,15 @@ data: receivers: hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -156,6 +169,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -182,7 +196,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: smartagent/coredns: @@ -247,6 +261,16 @@ data: logs: exporters: - otlp + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - otlp @@ -289,4 +313,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/route-data-through-gateway-deployed-separately/rendered_manifests/configmap-cluster-receiver.yaml b/examples/route-data-through-gateway-deployed-separately/rendered_manifests/configmap-cluster-receiver.yaml index 8d44d2a9c..96ba5092e 100644 --- a/examples/route-data-through-gateway-deployed-separately/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/route-data-through-gateway-deployed-separately/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -91,7 +91,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -119,4 +119,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/route-data-through-gateway-deployed-separately/rendered_manifests/daemonset.yaml b/examples/route-data-through-gateway-deployed-separately/rendered_manifests/daemonset.yaml index f243ed7f3..3dc54a8c6 100644 --- a/examples/route-data-through-gateway-deployed-separately/rendered_manifests/daemonset.yaml +++ b/examples/route-data-through-gateway-deployed-separately/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: f5dbd222b32887e0bfbba8c0fdea45941498cf6343d66af4a1dbbaf5e2dc310d + checksum/config: 2b6cf7100cf13fc7b91055f8f32720a6999caf9ece7f6c834d21204ab80c02bd kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,11 +41,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists containers: - name: otel-collector command: @@ -79,7 +86,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB @@ -115,24 +122,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/route-data-through-gateway-deployed-separately/rendered_manifests/deployment-cluster-receiver.yaml b/examples/route-data-through-gateway-deployed-separately/rendered_manifests/deployment-cluster-receiver.yaml index da78aeb29..3c47cff73 100644 --- a/examples/route-data-through-gateway-deployed-separately/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/route-data-through-gateway-deployed-separately/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: 980b8fb6f15b89874da3b5165ddd95d9ad8a8e8c78d06d7b96651bc9d27e72f1 + checksum/config: 54fc5cd7eeb294eed3f035509cb18d352a86accf13563cdc91e49f2d527d523d spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/route-data-through-gateway-deployed-separately/rendered_manifests/secret-splunk.yaml b/examples/route-data-through-gateway-deployed-separately/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/route-data-through-gateway-deployed-separately/rendered_manifests/secret-splunk.yaml +++ b/examples/route-data-through-gateway-deployed-separately/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/route-data-through-gateway-deployed-separately/rendered_manifests/service-agent.yaml b/examples/route-data-through-gateway-deployed-separately/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..95c885c81 --- /dev/null +++ b/examples/route-data-through-gateway-deployed-separately/rendered_manifests/service-agent.yaml @@ -0,0 +1,55 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/route-data-through-gateway-deployed-separately/rendered_manifests/serviceAccount.yaml b/examples/route-data-through-gateway-deployed-separately/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/route-data-through-gateway-deployed-separately/rendered_manifests/serviceAccount.yaml +++ b/examples/route-data-through-gateway-deployed-separately/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/secret-validation/rendered_manifests/clusterRole.yaml b/examples/secret-validation/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/secret-validation/rendered_manifests/clusterRole.yaml +++ b/examples/secret-validation/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/secret-validation/rendered_manifests/clusterRoleBinding.yaml b/examples/secret-validation/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/secret-validation/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/secret-validation/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/secret-validation/rendered_manifests/configmap-agent.yaml b/examples/secret-validation/rendered_manifests/configmap-agent.yaml index 86b59f73d..88210f8e4 100644 --- a/examples/secret-validation/rendered_manifests/configmap-agent.yaml +++ b/examples/secret-validation/rendered_manifests/configmap-agent.yaml @@ -7,17 +7,21 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -40,7 +44,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -240,6 +246,7 @@ data: storage: file_storage fluentforward: endpoint: 0.0.0.0:8006 + nop: null otlp: protocols: grpc: @@ -266,7 +273,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - file_storage @@ -289,6 +296,16 @@ data: - filelog - fluentforward - otlp + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics/agent: exporters: - signalfx @@ -302,4 +319,9 @@ data: - prometheus/agent telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/secret-validation/rendered_manifests/daemonset.yaml b/examples/secret-validation/rendered_manifests/daemonset.yaml index 6a4dbc5ea..e979cb909 100644 --- a/examples/secret-validation/rendered_manifests/daemonset.yaml +++ b/examples/secret-validation/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: 4400535d6b3d2931d0223a165eee10034c750fe207dd810dad79b3662ee66932 + checksum/config: 6c57b882104f758c1b708ac9ee4f5e5bd133faeb8594957b7b1baacbf6d37e98 kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,14 +41,21 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists initContainers: - name: migrate-checkpoint - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent command: ["/migratecheckpoint"] securityContext: @@ -98,7 +105,7 @@ spec: - name: otlp-http containerPort: 4318 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent securityContext: runAsUser: 0 diff --git a/examples/secret-validation/rendered_manifests/secret-splunk-validation-hook.yaml b/examples/secret-validation/rendered_manifests/secret-splunk-validation-hook.yaml index 3e2dd1d07..ac23c0199 100644 --- a/examples/secret-validation/rendered_manifests/secret-splunk-validation-hook.yaml +++ b/examples/secret-validation/rendered_manifests/secret-splunk-validation-hook.yaml @@ -9,10 +9,10 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" annotations: "helm.sh/hook": pre-upgrade,pre-install "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded @@ -20,7 +20,7 @@ spec: restartPolicy: Never containers: - name: validate-secret - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent command: ["sh", "-c"] args: diff --git a/examples/secret-validation/rendered_manifests/service-agent.yaml b/examples/secret-validation/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..edad6bf22 --- /dev/null +++ b/examples/secret-validation/rendered_manifests/service-agent.yaml @@ -0,0 +1,39 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: fluentforward + port: 8006 + targetPort: fluentforward + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/secret-validation/rendered_manifests/serviceAccount.yaml b/examples/secret-validation/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/secret-validation/rendered_manifests/serviceAccount.yaml +++ b/examples/secret-validation/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/splunk-enterprise-index-routing/rendered_manifests/clusterRole.yaml b/examples/splunk-enterprise-index-routing/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/splunk-enterprise-index-routing/rendered_manifests/clusterRole.yaml +++ b/examples/splunk-enterprise-index-routing/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/splunk-enterprise-index-routing/rendered_manifests/clusterRoleBinding.yaml b/examples/splunk-enterprise-index-routing/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/splunk-enterprise-index-routing/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/splunk-enterprise-index-routing/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/splunk-enterprise-index-routing/rendered_manifests/configmap-agent.yaml b/examples/splunk-enterprise-index-routing/rendered_manifests/configmap-agent.yaml index 769d89fae..f623ce860 100644 --- a/examples/splunk-enterprise-index-routing/rendered_manifests/configmap-agent.yaml +++ b/examples/splunk-enterprise-index-routing/rendered_manifests/configmap-agent.yaml @@ -7,17 +7,21 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest..signalfx.com/v3/event splunk_hec/platform_logs: disable_compression: true endpoint: http://localhost:8088/services/collector @@ -37,7 +41,7 @@ data: queue_size: 1000 source: kubernetes splunk_app_name: splunk-otel-collector - splunk_app_version: 0.105.5 + splunk_app_version: 0.113.0 timeout: 10s tls: insecure_skip_verify: false @@ -52,7 +56,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -255,6 +261,7 @@ data: storage: file_storage fluentforward: endpoint: 0.0.0.0:8006 + nop: null otlp: protocols: grpc: @@ -281,7 +288,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - file_storage @@ -304,6 +311,21 @@ data: - filelog - fluentforward - otlp + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/splunk-enterprise-index-routing/rendered_manifests/daemonset.yaml b/examples/splunk-enterprise-index-routing/rendered_manifests/daemonset.yaml index e6c6ee7cf..0d6ba0129 100644 --- a/examples/splunk-enterprise-index-routing/rendered_manifests/daemonset.yaml +++ b/examples/splunk-enterprise-index-routing/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: ab9ce05e17745f0a4c0cc43777ab129f3217ebf01174b19396942fdfea7e9da9 + checksum/config: 3a45f718a21ebacf0c0c7d5d892d43e4cf761adc81e7f6eba5a9e2c6b466fb4e kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,14 +41,21 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists initContainers: - name: migrate-checkpoint - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent command: ["/migratecheckpoint"] securityContext: @@ -98,7 +105,7 @@ spec: - name: otlp-http containerPort: 4318 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent securityContext: runAsUser: 0 diff --git a/examples/splunk-enterprise-index-routing/rendered_manifests/secret-splunk.yaml b/examples/splunk-enterprise-index-routing/rendered_manifests/secret-splunk.yaml index 9351af226..145b7e0b7 100644 --- a/examples/splunk-enterprise-index-routing/rendered_manifests/secret-splunk.yaml +++ b/examples/splunk-enterprise-index-routing/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/splunk-enterprise-index-routing/rendered_manifests/service-agent.yaml b/examples/splunk-enterprise-index-routing/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..edad6bf22 --- /dev/null +++ b/examples/splunk-enterprise-index-routing/rendered_manifests/service-agent.yaml @@ -0,0 +1,39 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: fluentforward + port: 8006 + targetPort: fluentforward + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/splunk-enterprise-index-routing/rendered_manifests/serviceAccount.yaml b/examples/splunk-enterprise-index-routing/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/splunk-enterprise-index-routing/rendered_manifests/serviceAccount.yaml +++ b/examples/splunk-enterprise-index-routing/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/target-allocator/rendered_manifests/clusterRole.yaml b/examples/target-allocator/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/target-allocator/rendered_manifests/clusterRole.yaml +++ b/examples/target-allocator/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/target-allocator/rendered_manifests/clusterRoleBinding.yaml b/examples/target-allocator/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/target-allocator/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/target-allocator/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/target-allocator/rendered_manifests/configmap-agent.yaml b/examples/target-allocator/rendered_manifests/configmap-agent.yaml index 2462fb4ad..cf3ae3f81 100644 --- a/examples/target-allocator/rendered_manifests/configmap-agent.yaml +++ b/examples/target-allocator/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -35,7 +41,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -127,10 +135,15 @@ data: receivers: hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -152,6 +165,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -178,7 +192,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 prometheus/crd: config: global: @@ -248,6 +262,16 @@ data: - k8s_observer - zpages pipelines: + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - signalfx @@ -275,7 +299,7 @@ data: - prometheus/agent traces: exporters: - - sapm + - otlphttp - signalfx processors: - memory_limiter @@ -290,4 +314,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/target-allocator/rendered_manifests/configmap-cluster-receiver.yaml b/examples/target-allocator/rendered_manifests/configmap-cluster-receiver.yaml index 5267ca292..69e8e29f3 100644 --- a/examples/target-allocator/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/target-allocator/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -91,7 +91,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -119,4 +119,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/target-allocator/rendered_manifests/daemonset.yaml b/examples/target-allocator/rendered_manifests/daemonset.yaml index 08d6fb801..00d9ae358 100644 --- a/examples/target-allocator/rendered_manifests/daemonset.yaml +++ b/examples/target-allocator/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: ce027723588acdb45daed6b94df7ffb80f96b2f5ee4abd87663e5260efb897bc + checksum/config: 5a020b1268df244ba36936ae8f5f0d3e64c0f804979402c47d071f94b1066d7e kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,11 +41,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists containers: - name: otel-collector command: @@ -79,7 +86,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB @@ -115,24 +122,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/target-allocator/rendered_manifests/deployment-cluster-receiver.yaml b/examples/target-allocator/rendered_manifests/deployment-cluster-receiver.yaml index 409e578cd..256cfb004 100644 --- a/examples/target-allocator/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/target-allocator/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: b3af0a34b5e344447d0ee02fd0477bb8f57cfb689bee60b44dbae23de5e6d7ff + checksum/config: ff5ed57ffd6eaabc34c61f909a53c3ead6cf13539461c69ae410cc955d467a21 spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/target-allocator/rendered_manifests/secret-splunk.yaml b/examples/target-allocator/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/target-allocator/rendered_manifests/secret-splunk.yaml +++ b/examples/target-allocator/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/target-allocator/rendered_manifests/service-agent.yaml b/examples/target-allocator/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..95c885c81 --- /dev/null +++ b/examples/target-allocator/rendered_manifests/service-agent.yaml @@ -0,0 +1,55 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/target-allocator/rendered_manifests/serviceAccount.yaml b/examples/target-allocator/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/target-allocator/rendered_manifests/serviceAccount.yaml +++ b/examples/target-allocator/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/use-proxy/rendered_manifests/clusterRole.yaml b/examples/use-proxy/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/use-proxy/rendered_manifests/clusterRole.yaml +++ b/examples/use-proxy/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/use-proxy/rendered_manifests/clusterRoleBinding.yaml b/examples/use-proxy/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/use-proxy/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/use-proxy/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/use-proxy/rendered_manifests/configmap-agent.yaml b/examples/use-proxy/rendered_manifests/configmap-agent.yaml index 741292002..5ae71d8b5 100644 --- a/examples/use-proxy/rendered_manifests/configmap-agent.yaml +++ b/examples/use-proxy/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -35,7 +41,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -127,10 +135,15 @@ data: receivers: hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -152,6 +165,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -178,7 +192,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 receiver_creator: receivers: smartagent/coredns: @@ -240,6 +254,16 @@ data: - k8s_observer - zpages pipelines: + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - signalfx @@ -267,7 +291,7 @@ data: - prometheus/agent traces: exporters: - - sapm + - otlphttp - signalfx processors: - memory_limiter @@ -282,4 +306,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/use-proxy/rendered_manifests/configmap-cluster-receiver.yaml b/examples/use-proxy/rendered_manifests/configmap-cluster-receiver.yaml index 5267ca292..69e8e29f3 100644 --- a/examples/use-proxy/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/use-proxy/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -91,7 +91,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -119,4 +119,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/use-proxy/rendered_manifests/daemonset.yaml b/examples/use-proxy/rendered_manifests/daemonset.yaml index dffef639a..16ee8848c 100644 --- a/examples/use-proxy/rendered_manifests/daemonset.yaml +++ b/examples/use-proxy/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: 60aa06bfcb080c769f77047ef5c7d14232089748eaa54188d6d37144b3d00e75 + checksum/config: 1d6330c88154a12a7ef72f47ff1fe38ad8ab08eb47f5116a2b16c13f735ade30 kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,11 +41,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists containers: - name: otel-collector command: @@ -79,7 +86,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB @@ -115,24 +122,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo - name: HTTPS_PROXY value: 192.168.0.10 diff --git a/examples/use-proxy/rendered_manifests/deployment-cluster-receiver.yaml b/examples/use-proxy/rendered_manifests/deployment-cluster-receiver.yaml index 09ee28fea..f9d5cf2e5 100644 --- a/examples/use-proxy/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/use-proxy/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: b3af0a34b5e344447d0ee02fd0477bb8f57cfb689bee60b44dbae23de5e6d7ff + checksum/config: ff5ed57ffd6eaabc34c61f909a53c3ead6cf13539461c69ae410cc955d467a21 spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/use-proxy/rendered_manifests/secret-splunk.yaml b/examples/use-proxy/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/use-proxy/rendered_manifests/secret-splunk.yaml +++ b/examples/use-proxy/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/use-proxy/rendered_manifests/service-agent.yaml b/examples/use-proxy/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..95c885c81 --- /dev/null +++ b/examples/use-proxy/rendered_manifests/service-agent.yaml @@ -0,0 +1,55 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/use-proxy/rendered_manifests/serviceAccount.yaml b/examples/use-proxy/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/use-proxy/rendered_manifests/serviceAccount.yaml +++ b/examples/use-proxy/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/with-target-allocator/rendered_manifests/clusterRole.yaml b/examples/with-target-allocator/rendered_manifests/clusterRole.yaml index e91f7bd1e..f365299bd 100644 --- a/examples/with-target-allocator/rendered_manifests/clusterRole.yaml +++ b/examples/with-target-allocator/rendered_manifests/clusterRole.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm rules: diff --git a/examples/with-target-allocator/rendered_manifests/clusterRoleBinding.yaml b/examples/with-target-allocator/rendered_manifests/clusterRoleBinding.yaml index d9d5cdb93..e19e3f9a0 100644 --- a/examples/with-target-allocator/rendered_manifests/clusterRoleBinding.yaml +++ b/examples/with-target-allocator/rendered_manifests/clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/with-target-allocator/rendered_manifests/configmap-agent.yaml b/examples/with-target-allocator/rendered_manifests/configmap-agent.yaml index d14dad9d8..f644d782e 100644 --- a/examples/with-target-allocator/rendered_manifests/configmap-agent.yaml +++ b/examples/with-target-allocator/rendered_manifests/configmap-agent.yaml @@ -7,20 +7,26 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: relay: | exporters: - sapm: - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} - endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace + otlphttp: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + metrics_endpoint: https://ingest.CHANGEME.signalfx.com/v2/datapoint/otlp + traces_endpoint: https://ingest.CHANGEME.signalfx.com/v2/trace/otlp + otlphttp/entities: + headers: + X-SF-Token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + logs_endpoint: https://ingest.CHANGEME.signalfx.com/v3/event signalfx: access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} api_url: https://api.CHANGEME.signalfx.com @@ -35,7 +41,9 @@ data: node: ${K8S_NODE_NAME} zpages: null processors: - batch: null + batch: + metadata_keys: + - X-SF-Token filter/logs: logs: exclude: @@ -127,10 +135,15 @@ data: receivers: hostmetrics: collection_interval: 10s + root_path: /hostfs scrapers: cpu: null disk: null - filesystem: null + filesystem: + include_mount_points: + match_type: strict + mount_points: + - / load: null memory: null network: null @@ -152,6 +165,7 @@ data: - container - pod - node + nop: null otlp: protocols: grpc: @@ -178,7 +192,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 prometheus/ta: config: global: @@ -248,6 +262,16 @@ data: - k8s_observer - zpages pipelines: + logs/entities: + exporters: + - otlphttp/entities + processors: + - memory_limiter + - batch + - resourcedetection + - resource + receivers: + - nop metrics: exporters: - signalfx @@ -276,7 +300,7 @@ data: - prometheus/agent traces: exporters: - - sapm + - otlphttp - signalfx processors: - memory_limiter @@ -291,4 +315,9 @@ data: - zipkin telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/with-target-allocator/rendered_manifests/configmap-cluster-receiver.yaml b/examples/with-target-allocator/rendered_manifests/configmap-cluster-receiver.yaml index 5267ca292..69e8e29f3 100644 --- a/examples/with-target-allocator/rendered_manifests/configmap-cluster-receiver.yaml +++ b/examples/with-target-allocator/rendered_manifests/configmap-cluster-receiver.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: @@ -91,7 +91,7 @@ data: scrape_interval: 10s static_configs: - targets: - - ${K8S_POD_IP}:8889 + - localhost:8889 service: extensions: - health_check @@ -119,4 +119,9 @@ data: - prometheus/k8s_cluster_receiver telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 diff --git a/examples/with-target-allocator/rendered_manifests/daemonset.yaml b/examples/with-target-allocator/rendered_manifests/daemonset.yaml index 2a84a0dd7..5ed25e80b 100644 --- a/examples/with-target-allocator/rendered_manifests/daemonset.yaml +++ b/examples/with-target-allocator/rendered_manifests/daemonset.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-collector-agent - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm spec: @@ -32,7 +32,7 @@ spec: component: otel-collector-agent release: default annotations: - checksum/config: ce5bfb976b854d61a40ec8fe6413f9b9fe0e263a6eb8803d50ab5bf62e896713 + checksum/config: 134b3c49173c0d6dfac367908b7792d171d61a6547407cdb1a37102577920b30 kubectl.kubernetes.io/default-container: otel-collector spec: hostNetwork: true @@ -41,11 +41,18 @@ spec: nodeSelector: kubernetes.io/os: linux tolerations: - - effect: NoSchedule key: node-role.kubernetes.io/master + operator: Exists - effect: NoSchedule key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: kubernetes.io/system-node + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/infra + operator: Exists containers: - name: otel-collector command: @@ -79,7 +86,7 @@ spec: containerPort: 9411 hostPort: 9411 protocol: TCP - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB @@ -115,24 +122,6 @@ spec: secretKeyRef: name: default-splunk-otel-collector key: splunk_observability_access_token - # Env variables for host metrics receiver - - name: HOST_PROC - value: /hostfs/proc - - name: HOST_SYS - value: /hostfs/sys - - name: HOST_ETC - value: /hostfs/etc - - name: HOST_VAR - value: /hostfs/var - - name: HOST_RUN - value: /hostfs/run - - name: HOST_DEV - value: /hostfs/dev - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo readinessProbe: httpGet: diff --git a/examples/with-target-allocator/rendered_manifests/deployment-cluster-receiver.yaml b/examples/with-target-allocator/rendered_manifests/deployment-cluster-receiver.yaml index 409e578cd..256cfb004 100644 --- a/examples/with-target-allocator/rendered_manifests/deployment-cluster-receiver.yaml +++ b/examples/with-target-allocator/rendered_manifests/deployment-cluster-receiver.yaml @@ -7,13 +7,13 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector component: otel-k8s-cluster-receiver - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm app.kubernetes.io/component: otel-k8s-cluster-receiver @@ -31,7 +31,7 @@ spec: component: otel-k8s-cluster-receiver release: default annotations: - checksum/config: b3af0a34b5e344447d0ee02fd0477bb8f57cfb689bee60b44dbae23de5e6d7ff + checksum/config: ff5ed57ffd6eaabc34c61f909a53c3ead6cf13539461c69ae410cc955d467a21 spec: serviceAccountName: default-splunk-otel-collector nodeSelector: @@ -41,7 +41,7 @@ spec: command: - /otelcol - --config=/conf/relay.yaml - image: quay.io/signalfx/splunk-otel-collector:0.105.0 + image: quay.io/signalfx/splunk-otel-collector:0.113.0 imagePullPolicy: IfNotPresent env: - name: SPLUNK_MEMORY_TOTAL_MIB diff --git a/examples/with-target-allocator/rendered_manifests/secret-splunk.yaml b/examples/with-target-allocator/rendered_manifests/secret-splunk.yaml index ccea1b5dd..c550912dc 100644 --- a/examples/with-target-allocator/rendered_manifests/secret-splunk.yaml +++ b/examples/with-target-allocator/rendered_manifests/secret-splunk.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm type: Opaque diff --git a/examples/with-target-allocator/rendered_manifests/service-agent.yaml b/examples/with-target-allocator/rendered_manifests/service-agent.yaml new file mode 100644 index 000000000..95c885c81 --- /dev/null +++ b/examples/with-target-allocator/rendered_manifests/service-agent.yaml @@ -0,0 +1,55 @@ +--- +# Source: splunk-otel-collector/templates/service-agent.yaml +apiVersion: v1 +kind: Service +metadata: + name: default-splunk-otel-collector-agent + namespace: default + labels: + app.kubernetes.io/name: splunk-otel-collector + helm.sh/chart: splunk-otel-collector-0.113.0 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: default + app.kubernetes.io/version: "0.113.0" + app: splunk-otel-collector + component: otel-collector-agent + chart: splunk-otel-collector-0.113.0 + release: default + heritage: Helm + app.kubernetes.io/component: otel-collector-agent +spec: + type: ClusterIP + ports: + - name: jaeger-grpc + port: 14250 + targetPort: jaeger-grpc + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: jaeger-thrift + protocol: TCP + - name: otlp + port: 4317 + targetPort: otlp + protocol: TCP + - name: otlp-http + port: 4318 + targetPort: otlp-http + protocol: TCP + - name: sfx-forwarder + port: 9080 + targetPort: sfx-forwarder + protocol: TCP + - name: signalfx + port: 9943 + targetPort: signalfx + protocol: TCP + - name: zipkin + port: 9411 + targetPort: zipkin + protocol: TCP + selector: + app: splunk-otel-collector + component: otel-collector-agent + release: default + internalTrafficPolicy: Local diff --git a/examples/with-target-allocator/rendered_manifests/serviceAccount.yaml b/examples/with-target-allocator/rendered_manifests/serviceAccount.yaml index 9c5bd56f4..2a5a436cd 100644 --- a/examples/with-target-allocator/rendered_manifests/serviceAccount.yaml +++ b/examples/with-target-allocator/rendered_manifests/serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/examples/with-target-allocator/rendered_manifests/targetAllocator-clusterRoleBinding.yaml b/examples/with-target-allocator/rendered_manifests/targetAllocator-clusterRoleBinding.yaml index 1fcc9735e..8e8c4a6e6 100644 --- a/examples/with-target-allocator/rendered_manifests/targetAllocator-clusterRoleBinding.yaml +++ b/examples/with-target-allocator/rendered_manifests/targetAllocator-clusterRoleBinding.yaml @@ -6,12 +6,12 @@ metadata: name: default-splunk-otel-collector-ta-clusterRoleBinding labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm roleRef: diff --git a/examples/with-target-allocator/rendered_manifests/targetAllocator-configmap.yaml b/examples/with-target-allocator/rendered_manifests/targetAllocator-configmap.yaml index 5dfe9eb24..d1b5a1da3 100644 --- a/examples/with-target-allocator/rendered_manifests/targetAllocator-configmap.yaml +++ b/examples/with-target-allocator/rendered_manifests/targetAllocator-configmap.yaml @@ -7,12 +7,12 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm data: diff --git a/examples/with-target-allocator/rendered_manifests/targetAllocator-serviceAccount.yaml b/examples/with-target-allocator/rendered_manifests/targetAllocator-serviceAccount.yaml index b0fa519c0..48d87a2c2 100644 --- a/examples/with-target-allocator/rendered_manifests/targetAllocator-serviceAccount.yaml +++ b/examples/with-target-allocator/rendered_manifests/targetAllocator-serviceAccount.yaml @@ -8,11 +8,11 @@ metadata: namespace: default labels: app.kubernetes.io/name: splunk-otel-collector - helm.sh/chart: splunk-otel-collector-0.105.5 + helm.sh/chart: splunk-otel-collector-0.113.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: default - app.kubernetes.io/version: "0.105.0" + app.kubernetes.io/version: "0.113.0" app: splunk-otel-collector - chart: splunk-otel-collector-0.105.5 + chart: splunk-otel-collector-0.113.0 release: default heritage: Helm diff --git a/functional_tests/common.go b/functional_tests/common.go index d53260c49..53d67803c 100644 --- a/functional_tests/common.go +++ b/functional_tests/common.go @@ -71,6 +71,26 @@ func waitForMetrics(t *testing.T, entriesNum int, mc *consumertest.MetricsSink) len(mc.AllMetrics()), timeoutMinutes) } +func checkNoEventsReceived(t *testing.T, lc *consumertest.LogsSink) { + require.True(t, len(lc.AllLogs()) == 0, + "received %d logs, expected 0 logs", len(lc.AllLogs())) +} + +func checkNoMetricsReceived(t *testing.T, lc *consumertest.MetricsSink) { + require.True(t, len(lc.AllMetrics()) == 0, + "received %d metrics, expected 0 metrics", len(lc.AllMetrics())) +} + +func resetMetricsSink(t *testing.T, mc *consumertest.MetricsSink) { + mc.Reset() + t.Logf("Metrics sink reset, current metrics: %d", len(mc.AllMetrics())) +} + +func resetLogsSink(t *testing.T, lc *consumertest.LogsSink) { + lc.Reset() + t.Logf("Logs sink reset, current logs: %d", len(lc.AllLogs())) +} + func writeNewExpectedTracesResult(t *testing.T, file string, trace *ptrace.Traces) { require.NoError(t, os.MkdirAll("results", 0755)) require.NoError(t, golden.WriteTraces(t, filepath.Join("results", filepath.Base(file)), *trace)) diff --git a/functional_tests/configuration_switching_test.go b/functional_tests/configuration_switching_test.go new file mode 100644 index 000000000..3cf5c8f6e --- /dev/null +++ b/functional_tests/configuration_switching_test.go @@ -0,0 +1,498 @@ +// Copyright Splunk Inc. +// SPDX-License-Identifier: Apache-2.0 + +//go:build configuration_switching + +package functional_tests + +import ( + "bytes" + "context" + "fmt" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver/receivertest" + "gopkg.in/yaml.v3" + "os" + "path/filepath" + "strings" + "sync" + "testing" + "text/template" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/consumer/consumertest" + "helm.sh/helm/v3/pkg/action" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/kube" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + _ "k8s.io/client-go/plugin/pkg/client/auth" + "k8s.io/client-go/tools/clientcmd" + + "github.com/signalfx/splunk-otel-collector-chart/functional_tests/internal" +) + +const ( + hecReceiverPort = 8090 + hecMetricsReceiverPort = 8091 + apiPort = 8881 + hecLogsObjectsReceiverPort = 8092 + testDir = "testdata_configuration_switching" + valuesDir = "values" +) + +var globalSinks *sinks + +var setupRun = sync.Once{} + +type sinks struct { + logsConsumer *consumertest.LogsSink + hecMetricsConsumer *consumertest.MetricsSink + logsObjectsConsumer *consumertest.LogsSink +} + +func setupOnce(t *testing.T) *sinks { + setupRun.Do(func() { + // create an API server + internal.CreateApiServer(t, apiPort) + // set ingest pipelines + logs, metrics := setupHEC(t) + globalSinks = &sinks{ + logsConsumer: logs, + hecMetricsConsumer: metrics, + logsObjectsConsumer: setupHECLogsObjects(t), + } + if os.Getenv("TEARDOWN_BEFORE_SETUP") == "true" { + teardown(t) + } + }) + return globalSinks +} + +func deployChartsAndApps(t *testing.T, valuesFileName string, repl map[string]interface{}) { + testKubeConfig, setKubeConfig := os.LookupEnv("KUBECONFIG") + require.True(t, setKubeConfig, "the environment variable KUBECONFIG must be set") + kubeConfig, err := clientcmd.BuildConfigFromFlags("", testKubeConfig) + require.NoError(t, err) + client, err := kubernetes.NewForConfig(kubeConfig) + require.NoError(t, err) + + chartPath := filepath.Join("..", "helm-charts", "splunk-otel-collector") + chart, err := loader.Load(chartPath) + require.NoError(t, err) + + var valuesBytes []byte + valuesBytes, err = os.ReadFile(filepath.Join(testDir, valuesDir, valuesFileName)) + require.NoError(t, err) + + hostEp := hostEndpoint(t) + if len(hostEp) == 0 { + require.Fail(t, "Host endpoint not found") + } + replacements := map[string]interface{}{ + "LogHecEndpoint": fmt.Sprintf("http://%s:%d", hostEp, hecReceiverPort), + "MetricHecEndpoint": fmt.Sprintf("http://%s:%d/services/collector", hostEp, hecMetricsReceiverPort), + } + for k, v := range repl { + replacements[k] = v + } + + tmpl, err := template.New("").Parse(string(valuesBytes)) + require.NoError(t, err) + var buf bytes.Buffer + err = tmpl.Execute(&buf, replacements) + require.NoError(t, err) + var values map[string]interface{} + err = yaml.Unmarshal(buf.Bytes(), &values) + require.NoError(t, err) + + actionConfig := new(action.Configuration) + if err := actionConfig.Init(kube.GetConfig(testKubeConfig, "", "default"), "default", os.Getenv("HELM_DRIVER"), func(format string, v ...interface{}) { + t.Logf(format+"\n", v...) + }); err != nil { + require.NoError(t, err) + } + install := action.NewInstall(actionConfig) + install.Namespace = "default" + install.ReleaseName = "sock" + _, err = install.Run(chart, values) + if err != nil { + t.Logf("error reported during helm install: %v\n", err) + retryUpgrade := action.NewUpgrade(actionConfig) + retryUpgrade.Namespace = "default" + retryUpgrade.Install = true + _, err = retryUpgrade.Run("sock", chart, values) + require.NoError(t, err) + } + + waitForAllDeploymentsToStart(t, client) + t.Log("Deployments started") + + t.Cleanup(func() { + if os.Getenv("SKIP_TEARDOWN") == "true" { + t.Log("Skipping teardown as SKIP_TEARDOWN is set to true") + return + } + t.Log("Cleaning up cluster") + teardown(t) + + }) + +} +func teardown(t *testing.T) { + t.Log("Running teardown") + uninstallDeployment(t) +} + +func waitForAllDeploymentsToStart(t *testing.T, client *kubernetes.Clientset) { + require.Eventually(t, func() bool { + di, err := client.AppsV1().Deployments("default").List(context.Background(), metav1.ListOptions{}) + require.NoError(t, err) + for _, d := range di.Items { + if d.Status.ReadyReplicas != d.Status.Replicas { + var messages string + for _, c := range d.Status.Conditions { + messages += c.Message + messages += "\n" + } + + t.Logf("Deployment not ready: %s, %s", d.Name, messages) + return false + } + } + return true + }, 5*time.Minute, 10*time.Second) +} + +func Test_Functions(t *testing.T) { + _ = setupOnce(t) + if os.Getenv("SKIP_TESTS") == "true" { + t.Log("Skipping tests as SKIP_TESTS is set to true") + return + } + + t.Run("agent logs and metrics enabled or disabled", testAgentLogsAndMetrics) + t.Run("logs and metrics index switch", testIndexSwitch) + t.Run("cluster receiver enabled or disabled", testClusterReceiverEnabledOrDisabled) + +} + +func testAgentLogsAndMetrics(t *testing.T) { + + valuesFileName := "values_logs_and_metrics_switching.yaml.tmpl" + hecMetricsConsumer := setupOnce(t).hecMetricsConsumer + agentLogsConsumer := setupOnce(t).logsConsumer + + t.Run("check logs and metrics received when both are enabled", func(t *testing.T) { + resetLogsSink(t, agentLogsConsumer) + resetMetricsSink(t, hecMetricsConsumer) + + checkNoMetricsReceived(t, hecMetricsConsumer) + checkNoEventsReceived(t, agentLogsConsumer) + + replacements := map[string]interface{}{ + "MetricsEnabled": true, + "LogsEnabled": true, + } + deployChartsAndApps(t, valuesFileName, replacements) + + waitForMetrics(t, 5, hecMetricsConsumer) + waitForLogs(t, 5, agentLogsConsumer) + uninstallDeployment(t) + }) + + t.Run("check metrics only enabled", func(t *testing.T) { + resetLogsSink(t, agentLogsConsumer) + resetMetricsSink(t, hecMetricsConsumer) + + checkNoMetricsReceived(t, hecMetricsConsumer) + checkNoEventsReceived(t, agentLogsConsumer) + + replacements := map[string]interface{}{ + "MetricsEnabled": true, + "LogsEnabled": false, + } + deployChartsAndApps(t, valuesFileName, replacements) + + waitForMetrics(t, 5, hecMetricsConsumer) + checkNoEventsReceived(t, agentLogsConsumer) + uninstallDeployment(t) + }) + + t.Run("check logs only enabled", func(t *testing.T) { + resetLogsSink(t, agentLogsConsumer) + resetMetricsSink(t, hecMetricsConsumer) + + replacements := map[string]interface{}{ + "MetricsEnabled": false, + "LogsEnabled": true, + } + deployChartsAndApps(t, valuesFileName, replacements) + + waitForLogs(t, 5, agentLogsConsumer) + uninstallDeployment(t) + resetLogsSink(t, agentLogsConsumer) + resetMetricsSink(t, hecMetricsConsumer) + }) +} + +func testIndexSwitch(t *testing.T) { + var metricsIndex string = "metricsIndex" + var newMetricsIndex string = "newMetricsIndex" + var logsIndex string = "main" + var newLogsIndex string = "newLogsIndex" + var nonDefaultSourcetype = "my-sourcetype" + + valuesFileName := "values_indexes_switching.yaml.tmpl" + hecMetricsConsumer := setupOnce(t).hecMetricsConsumer + checkNoMetricsReceived(t, hecMetricsConsumer) + agentLogsConsumer := setupOnce(t).logsConsumer + checkNoEventsReceived(t, agentLogsConsumer) + + t.Run("check logs and metrics index switching", func(t *testing.T) { + replacements := map[string]interface{}{ + "MetricsIndex": metricsIndex, + "LogsIndex": logsIndex, + } + deployChartsAndApps(t, valuesFileName, replacements) + + waitForMetrics(t, 3, hecMetricsConsumer) + waitForLogs(t, 3, agentLogsConsumer) + + var sourcetypes []string + var indices []string + logs := agentLogsConsumer.AllLogs() + sourcetypes, indices = getLogsIndexAndSourceType(logs) + assert.True(t, len(sourcetypes) > 1) // we are also receiving logs from other kind containers + assert.Contains(t, sourcetypes, "kube:container:kindnet-cni") + assert.True(t, len(indices) == 1) + assert.True(t, indices[0] == logsIndex) + + var mIndices []string + mIndices = getMetricsIndex(hecMetricsConsumer.AllMetrics()) + assert.True(t, len(mIndices) == 1) + assert.True(t, mIndices[0] == metricsIndex) + + replacements = map[string]interface{}{ + "MetricsIndex": newMetricsIndex, + "LogsIndex": newLogsIndex, + "NonDefaultSourcetype": true, + "Sourcetype": nonDefaultSourcetype, + } + deployChartsAndApps(t, valuesFileName, replacements) + resetLogsSink(t, agentLogsConsumer) + resetMetricsSink(t, hecMetricsConsumer) + + waitForMetrics(t, 3, hecMetricsConsumer) + waitForLogs(t, 3, agentLogsConsumer) + logs = agentLogsConsumer.AllLogs() + sourcetypes, indices = getLogsIndexAndSourceType(logs) + t.Logf("Indices: %v", indices) + assert.Contains(t, indices, newLogsIndex) + assert.Contains(t, sourcetypes, nonDefaultSourcetype) + assert.True(t, len(indices) == 1) + assert.True(t, len(sourcetypes) == 1) + mIndices = getMetricsIndex(hecMetricsConsumer.AllMetrics()) + assert.True(t, len(mIndices) == 1) + assert.True(t, mIndices[0] == newMetricsIndex) + }) + uninstallDeployment(t) + resetLogsSink(t, agentLogsConsumer) + resetMetricsSink(t, hecMetricsConsumer) +} + +func testClusterReceiverEnabledOrDisabled(t *testing.T) { + valuesFileName := "values_cluster_receiver_switching.yaml.tmpl" + namespace := "default" + logsObjectsConsumer := setupOnce(t).logsObjectsConsumer + hostEp := hostEndpoint(t) + if len(hostEp) == 0 { + require.Fail(t, "Host endpoint not found") + } + logsObjectsHecEndpoint := fmt.Sprintf("http://%s:%d/services/collector", hostEp, hecLogsObjectsReceiverPort) + + t.Run("check cluster receiver enabled", func(t *testing.T) { + replacements := map[string]interface{}{ + "ClusterReceiverEnabled": false, + "LogObjectsHecEndpoint": logsObjectsHecEndpoint, + } + deployChartsAndApps(t, valuesFileName, replacements) + var pods *corev1.PodList + pods = listPodsInNamespace(t, namespace) + assert.True(t, len(pods.Items) == 1) + assert.True(t, strings.HasPrefix(pods.Items[0].Name, "sock-splunk-otel-collector-agent")) + checkNoEventsReceived(t, logsObjectsConsumer) + + t.Log("cluster receiver enabled") + replacements = map[string]interface{}{ + "ClusterReceiverEnabled": true, + "LogObjectsHecEndpoint": logsObjectsHecEndpoint, + } + deployChartsAndApps(t, valuesFileName, replacements) + resetLogsSink(t, logsObjectsConsumer) + + pods = listPodsInNamespace(t, namespace) + assert.True(t, len(pods.Items) == 2) + assert.True(t, checkPodExists(pods, "sock-splunk-otel-collector-agent")) + assert.True(t, checkPodExists(pods, "sock-splunk-otel-collector-k8s-cluster-receiver")) + waitForLogs(t, 5, logsObjectsConsumer) + }) + uninstallDeployment(t) + resetLogsSink(t, logsObjectsConsumer) +} + +func checkPodExists(pods *corev1.PodList, podNamePrefix string) bool { + for _, pod := range pods.Items { + if strings.HasPrefix(pod.Name, podNamePrefix) { + return true + } + } + return false +} + +func listPodsInNamespace(t *testing.T, namespace string) *corev1.PodList { + testKubeConfig, setKubeConfig := os.LookupEnv("KUBECONFIG") + require.True(t, setKubeConfig, "the environment variable KUBECONFIG must be set") + kubeConfig, err := clientcmd.BuildConfigFromFlags("", testKubeConfig) + require.NoError(t, err) + client, err := kubernetes.NewForConfig(kubeConfig) + require.NoError(t, err) + + // Get the list of pods in the specified namespace + pods, err := client.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{}) + require.NoError(t, err) + t.Logf("There are %d pods in the namespace %q\n", len(pods.Items), namespace) + return pods +} + +func waitForAllPodsToBeRemoved(t *testing.T, namespace string) { + timeoutMinutes := 2 + require.Eventuallyf(t, func() bool { + return len(listPodsInNamespace(t, namespace).Items) == 0 + }, time.Duration(timeoutMinutes)*time.Minute, 5*time.Second, "There are still %d pods in the namespace", len(listPodsInNamespace(t, namespace).Items)) +} + +func getLogsIndexAndSourceType(logs []plog.Logs) ([]string, []string) { + var sourcetypes []string + var indices []string + + for i := 0; i < len(logs); i++ { + l := logs[i] + for j := 0; j < l.ResourceLogs().Len(); j++ { + rl := l.ResourceLogs().At(j) + if value, ok := rl.Resource().Attributes().Get("com.splunk.sourcetype"); ok { + sourcetype := value.AsString() + // check if sourcetype is already in the list + if !contains(sourcetypes, sourcetype) { + sourcetypes = append(sourcetypes, sourcetype) + } + } + if value, ok := rl.Resource().Attributes().Get("com.splunk.index"); ok { + index := value.AsString() + // check if index is already in the list + if !contains(indices, index) { + indices = append(indices, index) + } + } + } + } + return sourcetypes, indices +} + +// get metrics index from metrics +func getMetricsIndex(metrics []pmetric.Metrics) []string { + var indices []string + for i := 0; i < len(metrics); i++ { + m := metrics[i] + fmt.Printf("Metrics: %v", m.ResourceMetrics().At(0).Resource().Attributes()) + if value, ok := m.ResourceMetrics().At(0).Resource().Attributes().Get("com.splunk.index"); ok { + index := value.AsString() + if !contains(indices, index) { + indices = append(indices, index) + } + } + } + return indices +} + +func contains(list []string, newValue string) bool { + for _, v := range list { + if v == newValue { + return true + } + } + return false +} + +func uninstallDeployment(t *testing.T) { + testKubeConfig, setKubeConfig := os.LookupEnv("KUBECONFIG") + require.True(t, setKubeConfig, "the environment variable KUBECONFIG must be set") + actionConfig := new(action.Configuration) + if err := actionConfig.Init(kube.GetConfig(testKubeConfig, "", "default"), "default", os.Getenv("HELM_DRIVER"), func(format string, v ...interface{}) { + t.Logf(format+"\n", v...) + }); err != nil { + require.NoError(t, err) + } + + uninstall := action.NewUninstall(actionConfig) + uninstallResponse, err := uninstall.Run("sock") + if err != nil { + t.Logf("Failed to uninstall release: %v", err) + } + t.Logf("Uninstalled release: %v", uninstallResponse) + waitForAllPodsToBeRemoved(t, "default") +} + +func setupHEC(t *testing.T) (*consumertest.LogsSink, *consumertest.MetricsSink) { + // the splunkhecreceiver does poorly at receiving logs and metrics. Use separate ports for now. + f := splunkhecreceiver.NewFactory() + cfg := f.CreateDefaultConfig().(*splunkhecreceiver.Config) + cfg.Endpoint = fmt.Sprintf("0.0.0.0:%d", hecReceiverPort) + + mCfg := f.CreateDefaultConfig().(*splunkhecreceiver.Config) + mCfg.Endpoint = fmt.Sprintf("0.0.0.0:%d", hecMetricsReceiverPort) + + lc := new(consumertest.LogsSink) + mc := new(consumertest.MetricsSink) + rcvr, err := f.CreateLogs(context.Background(), receivertest.NewNopSettings(), cfg, lc) + mrcvr, err := f.CreateMetrics(context.Background(), receivertest.NewNopSettings(), mCfg, mc) + require.NoError(t, err) + + require.NoError(t, rcvr.Start(context.Background(), componenttest.NewNopHost())) + require.NoError(t, err, "failed creating logs receiver") + t.Cleanup(func() { + assert.NoError(t, rcvr.Shutdown(context.Background())) + }) + + require.NoError(t, mrcvr.Start(context.Background(), componenttest.NewNopHost())) + require.NoError(t, err, "failed creating metrics receiver") + t.Cleanup(func() { + assert.NoError(t, mrcvr.Shutdown(context.Background())) + }) + + return lc, mc +} + +func setupHECLogsObjects(t *testing.T) *consumertest.LogsSink { + f := splunkhecreceiver.NewFactory() + cfg := f.CreateDefaultConfig().(*splunkhecreceiver.Config) + cfg.Endpoint = fmt.Sprintf("0.0.0.0:%d", hecLogsObjectsReceiverPort) + + lc := new(consumertest.LogsSink) + rcvr, err := f.CreateLogs(context.Background(), receivertest.NewNopSettings(), cfg, lc) + require.NoError(t, err) + + require.NoError(t, rcvr.Start(context.Background(), componenttest.NewNopHost())) + require.NoError(t, err, "failed creating logs receiver") + t.Cleanup(func() { + assert.NoError(t, rcvr.Shutdown(context.Background())) + }) + + return lc +} diff --git a/functional_tests/functional_test.go b/functional_tests/functional_test.go index 6ed3c26f1..bca0fb2b9 100644 --- a/functional_tests/functional_test.go +++ b/functional_tests/functional_test.go @@ -9,10 +9,6 @@ import ( "bytes" "context" "fmt" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/dynamic" "os" "path/filepath" "regexp" @@ -47,7 +43,11 @@ import ( appextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" k8sruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth" @@ -69,6 +69,7 @@ const ( eksTestKubeEnv = "eks" autopilotTestKubeEnv = "gke/autopilot" aksTestKubeEnv = "aks" + gceTestKubeEnv = "gce" testDir = "testdata" valuesDir = "values" manifestsDir = "manifests" @@ -532,7 +533,7 @@ func Test_Functions(t *testing.T) { require.True(t, setKubeTestEnv, "the environment variable KUBE_TEST_ENV must be set") switch kubeTestEnv { - case kindTestKubeEnv, autopilotTestKubeEnv, aksTestKubeEnv: + case kindTestKubeEnv, autopilotTestKubeEnv, aksTestKubeEnv, gceTestKubeEnv: expectedValuesDir = kindValuesDir case eksTestKubeEnv: expectedValuesDir = eksValuesDir @@ -700,8 +701,6 @@ func testDotNetTraces(t *testing.T) { selectedTrace = &trace break } - selectedTrace = &trace - break } } return selectedTrace != nil @@ -723,6 +722,7 @@ func testDotNetTraces(t *testing.T) { ptracetest.IgnoreResourceAttributeValue("k8s.pod.uid"), ptracetest.IgnoreResourceAttributeValue("k8s.replicaset.name"), ptracetest.IgnoreResourceAttributeValue("os.version"), + ptracetest.IgnoreResourceAttributeValue("os.build_id"), ptracetest.IgnoreResourceAttributeValue("host.arch"), ptracetest.IgnoreResourceAttributeValue("telemetry.distro.version"), ptracetest.IgnoreResourceAttributeValue("telemetry.sdk.version"), @@ -1045,10 +1045,6 @@ func testAgentMetrics(t *testing.T) { "otelcol_exporter_sent_spans", "otelcol_otelsvc_k8s_ip_lookup_miss", "otelcol_processor_accepted_spans", - "otelcol_processor_dropped_spans", - "otelcol_processor_refused_spans", - "otelcol_processor_refused_log_records", - "otelcol_processor_dropped_log_records", "otelcol_processor_accepted_log_records", "otelcol_exporter_queue_size", "otelcol_exporter_sent_metric_points", @@ -1066,8 +1062,6 @@ func testAgentMetrics(t *testing.T) { "otelcol_processor_accepted_metric_points", "otelcol_processor_filter_logs_filtered", "otelcol_receiver_accepted_metric_points", - "otelcol_processor_dropped_metric_points", - "otelcol_processor_refused_metric_points", "otelcol_receiver_accepted_log_records", "otelcol_receiver_refused_log_records", "otelcol_receiver_refused_metric_points", @@ -1121,7 +1115,7 @@ func testAgentMetrics(t *testing.T) { replaceWithStar := func(string) string { return "*" } - selectedInternalMetrics := selectMetricSet(expectedInternalMetrics, "otelcol_process_runtime_total_alloc_bytes", agentMetricsConsumer, false) + selectedInternalMetrics := selectMetricSet(expectedInternalMetrics, "otelcol_process_runtime_total_alloc_bytes", agentMetricsConsumer, true) if selectedInternalMetrics == nil { t.Skip("No metric batch identified with the right metric count, exiting") return @@ -1134,8 +1128,8 @@ func testAgentMetrics(t *testing.T) { pmetrictest.IgnoreMetricAttributeValue("container.id", metricNames...), pmetrictest.IgnoreMetricAttributeValue("k8s.daemonset.uid", metricNames...), pmetrictest.IgnoreMetricAttributeValue("k8s.deployment.uid", metricNames...), - pmetrictest.IgnoreMetricAttributeValue("k8s.pod.uid", metricNames...), - pmetrictest.IgnoreMetricAttributeValue("k8s.pod.name", metricNames...), + pmetrictest.IgnoreMetricAttributeValue("k8s.pod.uid"), + pmetrictest.IgnoreMetricAttributeValue("k8s.pod.name"), pmetrictest.IgnoreMetricAttributeValue("k8s.replicaset.uid", metricNames...), pmetrictest.IgnoreMetricAttributeValue("k8s.replicaset.name", metricNames...), pmetrictest.IgnoreMetricAttributeValue("k8s.namespace.uid", metricNames...), @@ -1143,11 +1137,11 @@ func testAgentMetrics(t *testing.T) { pmetrictest.IgnoreMetricAttributeValue("container.image.tag", metricNames...), pmetrictest.IgnoreMetricAttributeValue("k8s.node.uid", metricNames...), pmetrictest.IgnoreMetricAttributeValue("net.host.name", metricNames...), - pmetrictest.IgnoreMetricAttributeValue("service.instance.id", metricNames...), - pmetrictest.IgnoreMetricAttributeValue("service_instance_id", metricNames...), + pmetrictest.IgnoreMetricAttributeValue("service.instance.id"), + pmetrictest.IgnoreMetricAttributeValue("service_instance_id"), pmetrictest.IgnoreMetricAttributeValue("service_version", metricNames...), pmetrictest.IgnoreMetricAttributeValue("receiver", metricNames...), - pmetrictest.IgnoreMetricValues(metricNames...), + pmetrictest.IgnoreMetricValues(), pmetrictest.ChangeResourceAttributeValue("k8s.deployment.name", shortenNames), pmetrictest.ChangeResourceAttributeValue("k8s.pod.name", shortenNames), pmetrictest.ChangeResourceAttributeValue("k8s.replicaset.name", shortenNames), @@ -1168,6 +1162,7 @@ func testAgentMetrics(t *testing.T) { pmetrictest.IgnoreMetricsOrder(), pmetrictest.IgnoreScopeMetricsOrder(), pmetrictest.IgnoreMetricDataPointsOrder(), + pmetrictest.IgnoreSubsequentDataPoints("otelcol_receiver_accepted_log_records", "otelcol_receiver_refused_log_records"), ) if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" { writeNewExpectedMetricsResult(t, expectedInternalMetricsFile, selectedInternalMetrics) @@ -1225,10 +1220,8 @@ func testAgentMetrics(t *testing.T) { ) if err != nil && os.Getenv("UPDATE_EXPECTED_RESULTS") == "true" { writeNewExpectedMetricsResult(t, expectedKubeletStatsMetricsFile, selectedKubeletstatsMetrics) - t.Skipf("we have trouble identifying exact payloads right now: %v", err) - } else { - assert.NoError(t, err) } + assert.NoError(t, err) } func testPrometheusAnnotationMetrics(t *testing.T) { @@ -1325,8 +1318,6 @@ func testHECMetrics(t *testing.T) { "otelcol_exporter_sent_metric_points", "otelcol_exporter_sent_log_records", "otelcol_otelsvc_k8s_ip_lookup_miss", - "otelcol_processor_refused_log_records", - "otelcol_processor_dropped_log_records", "otelcol_processor_accepted_log_records", "otelcol_otelsvc_k8s_namespace_added", "otelcol_otelsvc_k8s_pod_added", @@ -1339,8 +1330,6 @@ func testHECMetrics(t *testing.T) { "otelcol_process_runtime_total_sys_memory_bytes", "otelcol_process_uptime", "otelcol_processor_accepted_metric_points", - "otelcol_processor_dropped_metric_points", - "otelcol_processor_refused_metric_points", "otelcol_receiver_accepted_metric_points", "otelcol_receiver_refused_metric_points", "otelcol_scraper_errored_metric_points", @@ -1413,7 +1402,7 @@ func setupTraces(t *testing.T) *consumertest.TracesSink { cfg.Protocols.GRPC.NetAddr.Endpoint = fmt.Sprintf("0.0.0.0:%d", otlpReceiverPort) cfg.Protocols.HTTP.Endpoint = fmt.Sprintf("0.0.0.0:%d", otlpHTTPReceiverPort) - rcvr, err := f.CreateTracesReceiver(context.Background(), receivertest.NewNopSettings(), cfg, tc) + rcvr, err := f.CreateTraces(context.Background(), receivertest.NewNopSettings(), cfg, tc) require.NoError(t, err) require.NoError(t, rcvr.Start(context.Background(), componenttest.NewNopHost())) @@ -1431,7 +1420,7 @@ func setupSignalfxReceiver(t *testing.T, port int) *consumertest.MetricsSink { cfg := f.CreateDefaultConfig().(*signalfxreceiver.Config) cfg.Endpoint = fmt.Sprintf("0.0.0.0:%d", port) - rcvr, err := f.CreateMetricsReceiver(context.Background(), receivertest.NewNopSettings(), cfg, mc) + rcvr, err := f.CreateMetrics(context.Background(), receivertest.NewNopSettings(), cfg, mc) require.NoError(t, err) require.NoError(t, rcvr.Start(context.Background(), componenttest.NewNopHost())) @@ -1454,8 +1443,8 @@ func setupHEC(t *testing.T) (*consumertest.LogsSink, *consumertest.MetricsSink) lc := new(consumertest.LogsSink) mc := new(consumertest.MetricsSink) - rcvr, err := f.CreateLogsReceiver(context.Background(), receivertest.NewNopSettings(), cfg, lc) - mrcvr, err := f.CreateMetricsReceiver(context.Background(), receivertest.NewNopSettings(), mCfg, mc) + rcvr, err := f.CreateLogs(context.Background(), receivertest.NewNopSettings(), cfg, lc) + mrcvr, err := f.CreateMetrics(context.Background(), receivertest.NewNopSettings(), mCfg, mc) require.NoError(t, err) require.NoError(t, rcvr.Start(context.Background(), componenttest.NewNopHost())) @@ -1479,7 +1468,7 @@ func setupHECLogsObjects(t *testing.T) *consumertest.LogsSink { cfg.Endpoint = fmt.Sprintf("0.0.0.0:%d", hecLogsObjectsReceiverPort) lc := new(consumertest.LogsSink) - rcvr, err := f.CreateLogsReceiver(context.Background(), receivertest.NewNopSettings(), cfg, lc) + rcvr, err := f.CreateLogs(context.Background(), receivertest.NewNopSettings(), cfg, lc) require.NoError(t, err) require.NoError(t, rcvr.Start(context.Background(), componenttest.NewNopHost())) @@ -1491,11 +1480,6 @@ func setupHECLogsObjects(t *testing.T) *consumertest.LogsSink { return lc } -type dimensionFilter struct { - key string - value string -} - func checkMetricsAreEmitted(t *testing.T, mc *consumertest.MetricsSink, metricNames []string, matchFn func(string, pcommon.Map) bool) { metricsToFind := map[string]bool{} for _, name := range metricNames { diff --git a/functional_tests/go.mod b/functional_tests/go.mod index 4d8e03426..f2f534d3e 100644 --- a/functional_tests/go.mod +++ b/functional_tests/go.mod @@ -8,46 +8,49 @@ go 1.22.0 toolchain go1.22.5 require ( - github.com/docker/docker v27.1.1+incompatible - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.106.1 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.106.2-0.20240808194002-5374bbac3595 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver v0.106.1 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver v0.106.1 - github.com/stretchr/testify v1.9.0 - go.opentelemetry.io/collector/component v0.106.1 - go.opentelemetry.io/collector/consumer/consumertest v0.106.1 - go.opentelemetry.io/collector/pdata v1.12.0 - go.opentelemetry.io/collector/receiver v0.106.1 - go.opentelemetry.io/collector/receiver/otlpreceiver v0.106.1 + github.com/docker/docker v27.3.1+incompatible + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver v0.115.0 + github.com/stretchr/testify v1.10.0 + go.opentelemetry.io/collector/component/componenttest v0.115.0 + go.opentelemetry.io/collector/consumer/consumertest v0.115.0 + go.opentelemetry.io/collector/pdata v1.21.0 + go.opentelemetry.io/collector/receiver/otlpreceiver v0.115.0 + go.opentelemetry.io/collector/receiver/receivertest v0.115.0 gopkg.in/yaml.v3 v3.0.1 - helm.sh/helm/v3 v3.15.3 - k8s.io/api v0.30.3 - k8s.io/apiextensions-apiserver v0.30.1 - k8s.io/apimachinery v0.31.0 - k8s.io/client-go v0.30.3 + helm.sh/helm/v3 v3.16.3 + k8s.io/api v0.31.3 + k8s.io/apiextensions-apiserver v0.31.3 + k8s.io/apimachinery v0.31.3 + k8s.io/client-go v0.31.3 ) require ( + dario.cat/mergo v1.0.1 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/BurntSushi/toml v1.3.2 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.2.1 // indirect - github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/Masterminds/semver/v3 v3.3.0 // indirect + github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Masterminds/squirrel v1.5.4 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/Microsoft/hcsshim v0.11.4 // indirect - github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect - github.com/containerd/containerd v1.7.12 // indirect + github.com/containerd/containerd v1.7.23 // indirect + github.com/containerd/errdefs v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect - github.com/cyphar/filepath-securejoin v0.2.4 // indirect + github.com/containerd/platforms v0.2.1 // indirect + github.com/cyphar/filepath-securejoin v0.3.4 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/distribution/reference v0.5.0 // indirect + github.com/distribution/reference v0.6.0 // indirect github.com/docker/cli v25.0.1+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker-credential-helpers v0.7.0 // indirect @@ -55,11 +58,11 @@ require ( github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch v5.7.0+incompatible // indirect + github.com/evanphx/json-patch v5.9.0+incompatible // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/color v1.13.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect @@ -68,7 +71,7 @@ require ( github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.4 // indirect - github.com/go-viper/mapstructure/v2 v2.0.0 // indirect + github.com/go-viper/mapstructure/v2 v2.2.1 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect @@ -87,15 +90,15 @@ require ( github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-version v1.7.0 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect - github.com/huandu/xstrings v1.4.0 // indirect - github.com/imdario/mergo v0.3.13 // indirect + github.com/huandu/xstrings v1.5.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/jmoiron/sqlx v1.3.5 // indirect + github.com/jmoiron/sqlx v1.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.9 // indirect + github.com/klauspost/compress v1.17.11 // indirect github.com/knadh/koanf v1.5.0 // indirect - github.com/knadh/koanf/v2 v2.1.1 // indirect + github.com/knadh/koanf/v2 v2.1.2 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/lib/pq v1.10.9 // indirect @@ -114,99 +117,104 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect - github.com/morikuni/aec v1.0.0 // indirect github.com/mostynb/go-grpc-compression v1.2.3 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/onsi/gomega v1.33.1 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/extension/ackextension v0.106.1 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.106.1 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.106.1 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.106.1 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.106.1 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.106.1 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/extension/ackextension v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.115.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc6 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.19.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/rs/cors v1.11.0 // indirect - github.com/rubenv/sql-migrate v1.5.2 // indirect + github.com/rs/cors v1.11.1 // indirect + github.com/rubenv/sql-migrate v1.7.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/shopspring/decimal v1.3.1 // indirect + github.com/shopspring/decimal v1.4.0 // indirect github.com/signalfx/com_signalfx_metrics_protobuf v0.0.3 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/cast v1.5.0 // indirect - github.com/spf13/cobra v1.8.0 // indirect + github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v1.2.0 // indirect - go.opentelemetry.io/collector v0.106.1 // indirect - go.opentelemetry.io/collector/client v0.106.1 // indirect - go.opentelemetry.io/collector/config/configauth v0.106.1 // indirect - go.opentelemetry.io/collector/config/configcompression v1.12.0 // indirect - go.opentelemetry.io/collector/config/configgrpc v0.106.1 // indirect - go.opentelemetry.io/collector/config/confighttp v0.106.1 // indirect - go.opentelemetry.io/collector/config/confignet v0.106.1 // indirect - go.opentelemetry.io/collector/config/configopaque v1.12.0 // indirect - go.opentelemetry.io/collector/config/configretry v1.12.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.106.1 // indirect - go.opentelemetry.io/collector/config/configtls v1.12.0 // indirect - go.opentelemetry.io/collector/config/internal v0.106.1 // indirect - go.opentelemetry.io/collector/confmap v0.106.1 // indirect - go.opentelemetry.io/collector/consumer v0.106.1 // indirect - go.opentelemetry.io/collector/consumer/consumerprofiles v0.106.1 // indirect - go.opentelemetry.io/collector/exporter v0.106.1 // indirect - go.opentelemetry.io/collector/extension v0.106.1 // indirect - go.opentelemetry.io/collector/extension/auth v0.106.1 // indirect - go.opentelemetry.io/collector/featuregate v1.12.0 // indirect - go.opentelemetry.io/collector/internal/globalgates v0.106.1 // indirect - go.opentelemetry.io/collector/pdata/pprofile v0.106.1 // indirect - go.opentelemetry.io/collector/semconv v0.106.1 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.50.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/sdk v1.28.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.opentelemetry.io/collector v0.115.0 // indirect + go.opentelemetry.io/collector/client v1.21.0 // indirect + go.opentelemetry.io/collector/component v0.115.0 // indirect + go.opentelemetry.io/collector/component/componentstatus v0.115.0 // indirect + go.opentelemetry.io/collector/config/configauth v0.115.0 // indirect + go.opentelemetry.io/collector/config/configcompression v1.21.0 // indirect + go.opentelemetry.io/collector/config/configgrpc v0.115.0 // indirect + go.opentelemetry.io/collector/config/confighttp v0.115.0 // indirect + go.opentelemetry.io/collector/config/confignet v1.21.0 // indirect + go.opentelemetry.io/collector/config/configopaque v1.21.0 // indirect + go.opentelemetry.io/collector/config/configretry v1.21.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.115.0 // indirect + go.opentelemetry.io/collector/config/configtls v1.21.0 // indirect + go.opentelemetry.io/collector/config/internal v0.115.0 // indirect + go.opentelemetry.io/collector/confmap v1.21.0 // indirect + go.opentelemetry.io/collector/consumer v1.21.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.115.0 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 // indirect + go.opentelemetry.io/collector/exporter v0.115.0 // indirect + go.opentelemetry.io/collector/extension v0.115.0 // indirect + go.opentelemetry.io/collector/extension/auth v0.115.0 // indirect + go.opentelemetry.io/collector/extension/experimental/storage v0.115.0 // indirect + go.opentelemetry.io/collector/featuregate v1.21.0 // indirect + go.opentelemetry.io/collector/internal/sharedcomponent v0.115.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.115.0 // indirect + go.opentelemetry.io/collector/pipeline v0.115.0 // indirect + go.opentelemetry.io/collector/receiver v0.115.0 // indirect + go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0 // indirect + go.opentelemetry.io/collector/semconv v0.115.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect + go.opentelemetry.io/otel v1.32.0 // indirect + go.opentelemetry.io/otel/metric v1.32.0 // indirect + go.opentelemetry.io/otel/sdk v1.32.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect + go.opentelemetry.io/otel/trace v1.32.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.25.0 // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.27.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/term v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect + golang.org/x/crypto v0.31.0 // indirect + golang.org/x/net v0.31.0 // indirect + golang.org/x/oauth2 v0.22.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect - google.golang.org/grpc v1.65.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 // indirect + google.golang.org/grpc v1.67.1 // indirect + google.golang.org/protobuf v1.35.2 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/apiserver v0.30.1 // indirect - k8s.io/cli-runtime v0.30.0 // indirect - k8s.io/component-base v0.30.1 // indirect + k8s.io/apiserver v0.31.3 // indirect + k8s.io/cli-runtime v0.31.1 // indirect + k8s.io/component-base v0.31.3 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect - k8s.io/kubectl v0.30.0 // indirect + k8s.io/kubectl v0.31.1 // indirect k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect oras.land/oras-go v1.2.5 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect - sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect + sigs.k8s.io/kustomize/api v0.17.2 // indirect + sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/functional_tests/go.sum b/functional_tests/go.sum index f71ee000e..26378d99a 100644 --- a/functional_tests/go.sum +++ b/functional_tests/go.sum @@ -540,7 +540,11 @@ cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoIS cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= @@ -558,17 +562,16 @@ github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= -github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= -github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= +github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= +github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8= -github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Microsoft/hcsshim v0.11.7 h1:vl/nj3Bar/CvJSYo7gIQPyRWc9f3c6IeSNavBTSZNZQ= +github.com/Microsoft/hcsshim v0.11.7/go.mod h1:MV8xMfmECjl5HdO7U/3/hFVnkmSBjAjmA09d4bExKcU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= @@ -592,8 +595,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= @@ -609,6 +612,8 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= @@ -650,34 +655,38 @@ github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= -github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0= -github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk= +github.com/containerd/containerd v1.7.23 h1:H2CClyUkmpKAGlhQp95g2WXHfLYc7whAuvZGBNYOOwQ= +github.com/containerd/containerd v1.7.23/go.mod h1:7QUzfURqZWCZV7RLNEn1XjUCQLEf0bkaK4GjUaZehxw= github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM= github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= +github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4= +github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= -github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cyphar/filepath-securejoin v0.3.4 h1:VBWugsJh2ZxJmLFSM06/0qzQyiQX2Qs0ViKrUAcqdZ8= +github.com/cyphar/filepath-securejoin v0.3.4/go.mod h1:8s/MCNJREmFK0H02MF6Ihv1nakJe4L/w3WZLHNkvlYM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 h1:aBfCb7iqHmDEIp6fBvC/hQUddQfg+3qdYjwzaiP9Hnc= github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2/go.mod h1:WHNsWjnIn2V1LYOrME7e8KxSeKunYHsxEm4am0BUtcI= -github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= -github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/cli v25.0.1+incompatible h1:mFpqnrS6Hsm3v1k7Wa/BO23oz0k121MTbTO1lpcGSkU= github.com/docker/cli v25.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= -github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= +github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= @@ -692,6 +701,10 @@ github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arX github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/ebitengine/purego v0.8.1 h1:sdRKd6plj7KYW33EH5As6YKfe8m9zbN9JMrOjNVF/BE= +github.com/ebitengine/purego v0.8.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= +github.com/elastic/lunes v0.1.0 h1:amRtLPjwkWtzDF/RKzcEPMvSsSseLDLW+bnhfNSLRe4= +github.com/elastic/lunes v0.1.0/go.mod h1:xGphYIt3XdZRtyWosHQTErsQTd4OP1p9wsbVoHelrd4= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -709,8 +722,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= -github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= -github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -722,13 +735,13 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= -github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= -github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= +github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -767,21 +780,14 @@ github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogB github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= -github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= -github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-viper/mapstructure/v2 v2.0.0 h1:dhn8MZ1gZ0mzeodTG3jt5Vj/o87xZKuNAprG2mQfMfc= -github.com/go-viper/mapstructure/v2 v2.0.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/gobuffalo/logger v1.0.6 h1:nnZNpxYo0zx+Aj9RfMPBm+x9zAU2OayFh/xrAWi34HU= -github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs= -github.com/gobuffalo/packd v1.0.1 h1:U2wXfRr4E9DH8IdsDLlRFwTZTK7hLfq9qT/QHXGVe/0= -github.com/gobuffalo/packd v1.0.1/go.mod h1:PP2POP3p3RXGz7Jh6eYEf93S7vA2za6xM7QT85L4+VY= -github.com/gobuffalo/packr/v2 v2.8.3 h1:xE1yzvnO56cUC0sTpKR3DIbxZgB54AftTFMhB2XEWlY= -github.com/gobuffalo/packr/v2 v2.8.3/go.mod h1:0SahksCVcx4IMnigTjiFuyldmTrdTctXsOdiU5KwbKc= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= @@ -858,7 +864,6 @@ github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkj github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -944,19 +949,17 @@ github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvh github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hjson/hjson-go/v4 v4.0.0/go.mod h1:KaYt3bTw3zhBjYqnXkYywcYctk0A2nxeEFTse3rH13E= -github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= -github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= -github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= +github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= +github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= @@ -971,20 +974,18 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= -github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/knadh/koanf v1.5.0 h1:q2TSd/3Pyc/5yP9ldIrSdIz26MCcyNQzW0pEAugLPNs= github.com/knadh/koanf v1.5.0/go.mod h1:Hgyjp4y8v44hpZtPzs7JZfRAW5AhN7KfZcwv1RYggDs= -github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= -github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= +github.com/knadh/koanf/v2 v2.1.2 h1:I2rtLRqXRy1p01m/utEtpZSSA6dcJbgGVuE27kW2PzQ= +github.com/knadh/koanf/v2 v2.1.2/go.mod h1:Gphfaen0q1Fc1HTgJgSTC4oRX9R2R5ErYMZJy8fLJBo= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= @@ -1003,7 +1004,6 @@ github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= @@ -1013,14 +1013,10 @@ github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2 github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= +github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg= +github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/markbates/errx v1.1.0 h1:QDFeR+UP95dO12JgW+tgi2UVfo0V8YBHiUIOaeBPiEI= -github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc= -github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY= -github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= -github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -1038,14 +1034,14 @@ github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPn github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= -github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM= +github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= @@ -1073,6 +1069,8 @@ github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8 github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1100,40 +1098,40 @@ github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.106.1 h1:uWo5xrc05cDsE0iPFZUYhDDWi6oSZQlFF4rhGTg6UbY= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.106.1/go.mod h1:oZ/2WhKaXLEnPfr1qcHrQZAJn708DcHw089DpeqKaNw= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splunkhecexporter v0.106.1 h1:hrwkaTXHhJ/H51OP888iOHGJpdhcIoYYd2hrw3TmIhI= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splunkhecexporter v0.106.1/go.mod h1:5s9sXSvuOimvLlC6aaUqdT8ujflfBhY05mOv5U/RtLU= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/ackextension v0.106.1 h1:Dko1Nhhv0QFuwen6l/8SfyCJZOx2SVLQ0kQCsGO4OTc= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/ackextension v0.106.1/go.mod h1:gSEgRqhbIiaz9j8y2KNHVaZWWkk7jSADMWH/qUWthUo= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.106.1 h1:mrVp9OyF85td0DcOuA66r/iGbRPSqjZNXuxFG+OcGvw= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.106.1/go.mod h1:I+fL3494NrarNgHHs2D7kz0v02KGCmwmp3L89KUqENA= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.106.1 h1:5w2vfMlgpt0nf0Z/F18dSv3achW5cTSdNaaNfsS0gVI= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.106.1/go.mod h1:gS1bYFpF6fHb12/5JNjY0liAjStZ91ts8wHZNw1hkz0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.106.1 h1:ekqB6RaI48pavLEe39JGMfBm0id+FKnxJ3Dtq8OYXdE= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.106.1/go.mod h1:ZdsFe3ZBK/r4ur9rTYkHD1A4CAuKs2I3ytJIqtG8dq4= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.106.1 h1:tpjY48nO/wruPyMC+FKstk+KZhc9lS8kWOyuXMU1M2Y= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.106.1/go.mod h1:UDNMNbgnl1I28lOI6169Tm/RvOURtoKk2jd3bcCxqJo= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.106.1 h1:y9Q4QQLQCrYPqVBKZMIaOIAACA9f+enKHlp/cFfkzRI= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.106.1/go.mod h1:72ydW2UqosBrgIh2LLaB2nyrANoNwVtDux9eQ6NNW7o= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.106.1 h1:YhLcOtqVzYEXm8ZiNAh6x/5VG0thwFn+ipOY8IOzWIc= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.106.1/go.mod h1:9fXA6J9q3FGV1SZYi1QLHbYXK+iTUKe1e+/uMKxB83c= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.106.1 h1:+Yxoktjmfged3ChJuAwvw8A6CqTklqF/vR7rfntsvrs= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.106.1/go.mod h1:6MVXAX6OpG01Gb38KJUP/8APe2BCmGYtzKPOua05bTw= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.106.2-0.20240808194002-5374bbac3595 h1:UU5ov0g2XYS56D5b3eF15VAG2FToBRSkxRm3rLcxdAE= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.106.2-0.20240808194002-5374bbac3595/go.mod h1:St0VVFKzA0fNxo5RmzI4fg7ucGttd840OZ56a+ZECZs= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.106.1 h1:okbtl90cPTT9qfpDOkNrPaPyEpFBvNxxa7vv74XRA1s= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.106.1/go.mod h1:ehzaiDdkrww7l1Stvse5GCOAsAZOpFcgeIbB/2PqFs4= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.106.1 h1:yrHDzF/q52vPKs9j/GPIiBMKGIIKVpJAVrQyuPG+a4s= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.106.1/go.mod h1:Ys6NbuOJARIZYb2eY/NrG1XulnR3ke6i78Jx6fW9mXg= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver v0.106.1 h1:xwzRBbBvAK6bz9NYyRNzb5sMfGYPkI+wX+iBT4ghP1I= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver v0.106.1/go.mod h1:H1gTtgVBPEutIq9I0ymXgNDZyKeDndz0wSDVIsefqAY= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver v0.106.1 h1:AmThzwLE033o6meePGKdfw4A0+W48otNovXoc2tTDpA= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver v0.106.1/go.mod h1:5uU5nvMrywTcKdeaPqhFikzOP6LUnBxnQMoSGIBBjHs= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.115.0 h1:OKZ7621PLOPS60NR793joAlPp6OVyrR0bQiBOymmFYM= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter v0.115.0/go.mod h1:19AqiOGBL0OqT9Y36cvTsq3pynqPX3Eber86jJvDv8c= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splunkhecexporter v0.115.0 h1:QpEV5poZP1QGnhyLg8fcvvYuq9aDkwzv9A/Nv2KlXes= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splunkhecexporter v0.115.0/go.mod h1:2X7h1Wf+TOtiaDR5SnE2a0mBTtDoWd2oM03Zodh+kSI= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/ackextension v0.115.0 h1:un39NY5vpr2th6fJeAeKRnNBxsc3UqV3RiMDuE/vSuc= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/ackextension v0.115.0/go.mod h1:LS2OD4qu4tfBS2sU8BN/Rxb944z3fDmDtSw1tDcymxA= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.115.0 h1:vRQQFD4YpasQFUAdF030UWtaflSYFXK542bfWMGhOK0= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.115.0/go.mod h1:BZ7DT+0VkKR7P3I9PGEDfVa0GdB0ty41eEcejIUXF9A= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.115.0 h1:a36EJz/mb83f6ieX0v4fNDJ1jXqpeaM6DVQXeFDvdhw= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.115.0/go.mod h1:r5/40YO1eSP5ZreOmRzVOUtDr7YG39ZIUcVjHd+9Izc= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.115.0 h1:MuyDWyVoCty8HyP2CAYoRZXwINiThHovcC1Bj3+H8lk= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.115.0/go.mod h1:asekVnrdzYsMJBaJtIyXOt8p07l1x0xs8X3h00sZyf0= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.115.0 h1:6GIJOSEIWBt9bprARMtTjRlENrwNsJl2UzbtjOBk7A0= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.115.0/go.mod h1:/Fg/itwlAzDjyM0Sjenup9TbdOT+aVNPSqXsF80M8hw= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.115.0 h1:l4NBxl2AELPlyqupLu1IVAjtbGOEovaKEyt0UGMsuq8= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.115.0/go.mod h1:j1qF1hE/Qcy2I655yXbf2ItezXok61OW+9AAxbH2ORw= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.115.0 h1:l9AsnVHr3Sp4lAGFlBJ6Ochl7mlPE0d5MNd70o4qKEM= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.115.0/go.mod h1:kARk81QZpcX6L8x4fLo4Nr/z/+jpo5PxXtugBxF2DyE= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.115.0 h1:Z9p78zj9Qblw472mGkPieuX7mqduAp47rzMbFfq5evI= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.115.0/go.mod h1:mtxUxJEIQy27MaGR1yzcn/OK8NoddEgb7fumpEbKYss= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.115.0 h1:MerLKMrkM4YoGF6Di0D9yMXO02yCX8mrZAi/+jJVVeI= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.115.0/go.mod h1:R8AkVWe9G5Q0oMOapvm9HNS076E3Min8SVlmhBL3QD0= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.115.0 h1:WEqcnWSy9dNSlGb8pYRBX7zhaz2ReyaeImlenbzNTB4= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.115.0/go.mod h1:6Mk71CakHUA3I6oM9hARDiyQypYyOolvb+4PFYyVEFg= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.115.0 h1:y+Ystow5wnNQg/5yxx2AnWwtdUrKBTtdzTS3M+d1o8U= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx v0.115.0/go.mod h1:UNQYkrGC0fQYoGCU/9ReJwWtTUCg3b37qrQj8BCWKOE= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver v0.115.0 h1:McHRAaPXx8FN6gXnRPnNaDSPAsPc/WRCvA0HhL15iLM= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver v0.115.0/go.mod h1:FDCqM9AnvMJX2Gu0O9ARrg+nhqHIVgJZzQhWTF0xASw= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver v0.115.0 h1:3BJLq6lX2P+RoJ0vSW7sAeGXFKbUw3m57EQ+g+mYnzQ= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkhecreceiver v0.115.0/go.mod h1:2SetZwljaevaRr6Ek1zvzQaFkTxcNYcMcakLdtU/sTI= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc6 h1:XDqvyKsJEbRtATzkgItUqBA7QHk58yxX1Ov9HERHNqU= -github.com/opencontainers/image-spec v1.1.0-rc6/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= @@ -1146,6 +1144,8 @@ github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1196,10 +1196,10 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= -github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/rubenv/sql-migrate v1.5.2 h1:bMDqOnrJVV/6JQgQ/MxOpU+AdO8uzYYA/TxFUBzFtS0= -github.com/rubenv/sql-migrate v1.5.2/go.mod h1:H38GW8Vqf8F0Su5XignRyaRcbXbJunSWxs+kmzlg0Is= +github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rubenv/sql-migrate v1.7.0 h1:HtQq1xyTN2ISmQDggnh0c9U3JlP8apWh8YO2jzlXpTI= +github.com/rubenv/sql-migrate v1.7.0/go.mod h1:S4wtDEG1CKn+0ShpTtzWhFpHHI5PvCUtiGI+C+Z2THE= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= @@ -1208,15 +1208,12 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shirou/gopsutil/v4 v4.24.6 h1:9qqCSYF2pgOU+t+NgJtp7Co5+5mHF/HyKBUckySQL64= -github.com/shirou/gopsutil/v4 v4.24.6/go.mod h1:aoebb2vxetJ/yIDZISmduFvVNPHqXQ9SEJwRXxkf0RA= -github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= -github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= -github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shirou/gopsutil/v4 v4.24.10 h1:7VOzPtfw/5YDU+jLEoBwXwxJbQetULywoSV4RYY7HkM= +github.com/shirou/gopsutil/v4 v4.24.10/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/signalfx/com_signalfx_metrics_protobuf v0.0.3 h1:32k2QLgsKhcEs55q4REPKyIadvid5FPy2+VMgvbmKJ0= github.com/signalfx/com_signalfx_metrics_protobuf v0.0.3/go.mod h1:gJrXWi7wSGXfiC7+VheQaz+ypdCt5SmZNL+BRxUe7y4= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -1228,11 +1225,10 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -1250,8 +1246,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= @@ -1290,100 +1286,102 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector v0.106.1 h1:ZSQMpFGzFP3RILe1/+K80kCCT2ahn3MKt5e3u0Yz7Rs= -go.opentelemetry.io/collector v0.106.1/go.mod h1:1FabMxWLluLNcC0dq8cI01GaE6t6fYxE6Oxuf8u7AGQ= -go.opentelemetry.io/collector/client v0.106.1 h1:aBasAp+t7F30lI+oQpT95ZgYMiNaUlYRlgyeEvEGwjk= -go.opentelemetry.io/collector/client v0.106.1/go.mod h1:QEmOGAu/8vNn2lhwcLVI3iEUIoQlXNGWsdCfENN5qDc= -go.opentelemetry.io/collector/component v0.106.1 h1:6Xp4tKqnd/JkJDG/C4p1hto+Y5zvk5FwqZIdMCPzZlA= -go.opentelemetry.io/collector/component v0.106.1/go.mod h1:KiVE/5ZayuLlDJTe7mHqHRCn/5LrmF99C7/mKe54mWA= -go.opentelemetry.io/collector/config/configauth v0.106.1 h1:ANwKV2vzJoAcif/T23s5AIlDt8kTa8bUMcSN6fYAruQ= -go.opentelemetry.io/collector/config/configauth v0.106.1/go.mod h1:nBTtlQ2KoMnUEp1PXa6hMCwcJpJ59poUdKyDq1fO/R4= -go.opentelemetry.io/collector/config/configcompression v1.12.0 h1:RxqSDVZPJyL7I3v+gdVDvnJ/9tV0ZWgraRDX/gaddfA= -go.opentelemetry.io/collector/config/configcompression v1.12.0/go.mod h1:6+m0GKCv7JKzaumn7u80A2dLNCuYf5wdR87HWreoBO0= -go.opentelemetry.io/collector/config/configgrpc v0.106.1 h1:J0w1o/1vvJE2MtQAs4j+iq8ZMVZqduhoiu30hlsZeDo= -go.opentelemetry.io/collector/config/configgrpc v0.106.1/go.mod h1:aKGBNu/A4b3MEUUi1nNEuTHHCm/FNw9t56oUvQcFXi4= -go.opentelemetry.io/collector/config/confighttp v0.106.1 h1:OBX21JpJOVRASDnkwor5JpNz85qGvWYjtCIFO+5cUb8= -go.opentelemetry.io/collector/config/confighttp v0.106.1/go.mod h1:kafsg9XCdSHbgwOJO96Xxv3VolmjUN8KOozrjZzrun4= -go.opentelemetry.io/collector/config/confignet v0.106.1 h1:h/Rgqp5+1uKKJgsZuuLeO6zmJhdGi5PJGE2BdrM0eis= -go.opentelemetry.io/collector/config/confignet v0.106.1/go.mod h1:pfOrCTfSZEB6H2rKtx41/3RN4dKs+X2EKQbw3MGRh0E= -go.opentelemetry.io/collector/config/configopaque v1.12.0 h1:aIsp9NdcLZSiG4YDoFPGXhmma03Tk+6e89+n8GtU/Mc= -go.opentelemetry.io/collector/config/configopaque v1.12.0/go.mod h1:0xURn2sOy5j4fbaocpEYfM97HPGsiffkkVudSPyTJlM= -go.opentelemetry.io/collector/config/configretry v1.12.0 h1:tEBwueO4AIkwWosxz6NWqnghdZ7y5SfHcIzLrvh6kB8= -go.opentelemetry.io/collector/config/configretry v1.12.0/go.mod h1:P+RA0IA+QoxnDn4072uyeAk1RIoYiCbxYsjpKX5eFC4= -go.opentelemetry.io/collector/config/configtelemetry v0.106.1 h1:A8nwYnI6brfur5KPFC8GtVX/49pByvVoKSgO4qPXBqg= -go.opentelemetry.io/collector/config/configtelemetry v0.106.1/go.mod h1:WxWKNVAQJg/Io1nA3xLgn/DWLE/W1QOB2+/Js3ACi40= -go.opentelemetry.io/collector/config/configtls v1.12.0 h1:Px0+GE4LE/9sXMgkwBb5g8QHWvnrnuRg9BLSa+QtxgM= -go.opentelemetry.io/collector/config/configtls v1.12.0/go.mod h1:aeCGPlvrWhc+EySpIKdelPAj4l9wXKzZPouQO3NIoTs= -go.opentelemetry.io/collector/config/internal v0.106.1 h1:cEENs6xIMi4wjGF18JZX5uTV70mHPGfHKg9XndOubBs= -go.opentelemetry.io/collector/config/internal v0.106.1/go.mod h1:cmhchMcN0o/upXM7bxGv5djhhSW8kAYjKJoqT+AOxy0= -go.opentelemetry.io/collector/confmap v0.106.1 h1:R7HQIPDRPOEwauBeJUlkT8Elc5f0KQr/s/kQfZi05t0= -go.opentelemetry.io/collector/confmap v0.106.1/go.mod h1:iWdWgvxRYSHdAt5ySgPJq/i6fQMKGNnP5Pt7jOfTXno= -go.opentelemetry.io/collector/consumer v0.106.1 h1:+AQ/Kmoc/g0WP8thwymNkXk1jeWsHDK6XyYfdezcxcc= -go.opentelemetry.io/collector/consumer v0.106.1/go.mod h1:oy6pR/v5o/N9cxsICskyt//bU8k8EG0JeOO1MTDfs5A= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.106.1 h1:uxQjWm2XE7d1OncQDM9tL1ha+otGt1HjoRYIcQRMOfQ= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.106.1/go.mod h1:xQScBf9/PORFaYM6JVPOr7/TcRVEuKcW5XbAXfJByRs= -go.opentelemetry.io/collector/consumer/consumertest v0.106.1 h1:hDdFeVjCLIJ6iLfbiYcV9s+4iboFXbkJ/k3h09qusPw= -go.opentelemetry.io/collector/consumer/consumertest v0.106.1/go.mod h1:WRTYnQ8bYHQrEN6eJZ80oC4pNI7VeDRdsTZI6xs9o5M= -go.opentelemetry.io/collector/exporter v0.106.1 h1:PIqLqWacxSFcCstiF/6kxI1GLiP+9pjG7XFujxZG1z0= -go.opentelemetry.io/collector/exporter v0.106.1/go.mod h1:aGIoEwT5UjCHzq+oFYMuc5Mt15AfqvgDeLdjWTYSqNo= -go.opentelemetry.io/collector/extension v0.106.1 h1:HNV2eOxaSDy5gL5KYoPoTLlp6fdeWmuw4paxhi10VUo= -go.opentelemetry.io/collector/extension v0.106.1/go.mod h1:DCOGxD+WnIJiDveKWlvH5WwB9AgqlRUlnKgwhNCLrtQ= -go.opentelemetry.io/collector/extension/auth v0.106.1 h1:IJyY5zeC11H/jlEGVx2bfTW0oVui2k1AjV8DnC6tYhE= -go.opentelemetry.io/collector/extension/auth v0.106.1/go.mod h1:4VcPLz6QTNq3fbY6kH5tvTnF6tWtz4toK2LC1ydpnts= -go.opentelemetry.io/collector/featuregate v1.12.0 h1:l5WbV2vMQd2bL8ubfGrbKNtZaeJRckE12CTHvRe47Tw= -go.opentelemetry.io/collector/featuregate v1.12.0/go.mod h1:PsOINaGgTiFc+Tzu2K/X2jP+Ngmlp7YKGV1XrnBkH7U= -go.opentelemetry.io/collector/internal/globalgates v0.106.1 h1:0NQHTcykmYNDsNKObJ2XocGCv3WUAQZppfP3o6hZUIA= -go.opentelemetry.io/collector/internal/globalgates v0.106.1/go.mod h1:Z5US6O2xkZAtxVSSBnHAPFZwPhFoxlyKLUvS67Vx4gc= -go.opentelemetry.io/collector/pdata v1.12.0 h1:Xx5VK1p4VO0md8MWm2icwC1MnJ7f8EimKItMWw46BmA= -go.opentelemetry.io/collector/pdata v1.12.0/go.mod h1:MYeB0MmMAxeM0hstCFrCqWLzdyeYySim2dG6pDT6nYI= -go.opentelemetry.io/collector/pdata/pprofile v0.106.1 h1:nOLo25YnluNi+zAbU7G24RN86cJ1/EZJc6VEayBlOPo= -go.opentelemetry.io/collector/pdata/pprofile v0.106.1/go.mod h1:chr7lMJIzyXkccnPRkIPhyXtqLZLSReZYhwsggOGEfg= -go.opentelemetry.io/collector/pdata/testdata v0.106.1 h1:JUyLAwKD8o/9jgkBi16zOClxOyY028A7XIXHPV4mNmM= -go.opentelemetry.io/collector/pdata/testdata v0.106.1/go.mod h1:ghdz2RDEzsfigW0J+9oqA4fGmQJ/DJYUhE3vYU6JfhM= -go.opentelemetry.io/collector/receiver v0.106.1 h1:9kDLDJmInnz+AzAV9oV/UGMoc1+oI1pwMMs7+uMiJq4= -go.opentelemetry.io/collector/receiver v0.106.1/go.mod h1:3j9asWz7mqsgE77rPaNhlNQhRwgFhRynf0UEPs/4rkM= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.106.1 h1:Z4wSuYpTpnrgUHq6XnG8eHNCXzSsF+yDODzZdVV2GmU= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.106.1/go.mod h1:IE8RQoGF84xPW0UzRkyq6eT8mVbSu5vuXGRrKjK7iaM= -go.opentelemetry.io/collector/semconv v0.106.1 h1:x0OSXrQCFinqZNUPTKrHU0gnbwngOVOPyhedQCDyDoQ= -go.opentelemetry.io/collector/semconv v0.106.1/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= -go.opentelemetry.io/contrib/config v0.8.0 h1:OD7aDMhL+2EpzdSHfkDmcdD/uUA+PgKM5faFyF9XFT0= -go.opentelemetry.io/contrib/config v0.8.0/go.mod h1:dGeVZWE//3wrxYHHP0iCBYJU1QmOmPcbV+FNB7pjDYI= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.4.0 h1:zBPZAISA9NOc5cE8zydqDiS0itvg/P/0Hn9m72a5gvM= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.4.0/go.mod h1:gcj2fFjEsqpV3fXuzAA+0Ze1p2/4MJ4T7d77AmkvueQ= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 h1:U2guen0GhqH8o/G2un8f/aG/y++OuW6MyCo6hT9prXk= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0/go.mod h1:yeGZANgEcpdx/WK0IvvRFC+2oLiMS2u4L/0Rj2M2Qr0= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0 h1:aLmmtjRke7LPDQ3lvpFz+kNEH43faFhzW7v8BFIEydg= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0/go.mod h1:TC1pyCt6G9Sjb4bQpShH+P5R53pO6ZuGnHuuln9xMeE= +go.opentelemetry.io/collector v0.115.0 h1:qUZ0bTeNBudMxNQ7FJKS//TxTjeJ7tfU/z22mcFavWU= +go.opentelemetry.io/collector v0.115.0/go.mod h1:66qx0xKnVvdwq60e1DEfb4e+zmM9szhPsv2hxZ/Mpj4= +go.opentelemetry.io/collector/client v1.21.0 h1:3Kes8lOFMYVxoxeAmX+DTEAkuS1iTA3NkSfqzGmygJA= +go.opentelemetry.io/collector/client v1.21.0/go.mod h1:jYJGiL0UA975OOyHmjbQSokNWt1OiviI5KjPOMUMGwc= +go.opentelemetry.io/collector/component v0.115.0 h1:iLte1oCiXzjiCnaOBKdsXacfFiECecpWxW3/LeriMoo= +go.opentelemetry.io/collector/component v0.115.0/go.mod h1:oIUFiH7w1eOimdeYhFI+gAIxYSiLDocKVJ0PTvX7d6s= +go.opentelemetry.io/collector/component/componentstatus v0.115.0 h1:pbpUIL+uKDfEiSgKK+S5nuSL6MDIIQYsp4b65ZGVb9M= +go.opentelemetry.io/collector/component/componentstatus v0.115.0/go.mod h1:36A+9XSiOz0Cdhq+UwwPRlEr5CYuSkEnVO9om4BH7d0= +go.opentelemetry.io/collector/component/componenttest v0.115.0 h1:9URDJ9VyP6tuij+YHjp/kSSMecnZOd7oGvzu+rw9SJY= +go.opentelemetry.io/collector/component/componenttest v0.115.0/go.mod h1:PzXvNqKLCiSADZGZFKH+IOHMkaQ0GTHuzysfVbTPKYY= +go.opentelemetry.io/collector/config/configauth v0.115.0 h1:xa+ALdyPgva3rZnLBh1H2oS5MsHP6JxSqMtQmcELnys= +go.opentelemetry.io/collector/config/configauth v0.115.0/go.mod h1:C7anpb3Rf4KswMT+dgOzkW9UX0z/65PLORpUw3p0VYc= +go.opentelemetry.io/collector/config/configcompression v1.21.0 h1:0zbPdZAgPFMAarwJEC4gaR6f/JBP686A3TYSgb3oa+E= +go.opentelemetry.io/collector/config/configcompression v1.21.0/go.mod h1:LvYG00tbPTv0NOLoZN0wXq1F5thcxvukO8INq7xyfWU= +go.opentelemetry.io/collector/config/configgrpc v0.115.0 h1:gZzXSFe6hB3RUcEeAYqk1yT+TBa+X9tp6/1x29Yg2yk= +go.opentelemetry.io/collector/config/configgrpc v0.115.0/go.mod h1:107lRZ5LdQPMdGJGd4m1GhyKxyH0az2cUOqrJgTEN8E= +go.opentelemetry.io/collector/config/confighttp v0.115.0 h1:BIy394oNXnqySJwrCqgAJu4gWgAV5aQUDD6k1hy6C8o= +go.opentelemetry.io/collector/config/confighttp v0.115.0/go.mod h1:Wr50ut12NmCEAl4bWLJryw2EjUmJTtYRg89560Q51wc= +go.opentelemetry.io/collector/config/confignet v1.21.0 h1:PeQ5YrMnfftysFL/WVaSrjPOWjD6DfeABY50pf9CZxU= +go.opentelemetry.io/collector/config/confignet v1.21.0/go.mod h1:ZppUH1hgUJOubawEsxsQ9MzEYFytqo2GnVSS7d4CVxc= +go.opentelemetry.io/collector/config/configopaque v1.21.0 h1:PcvRGkBk4Px8BQM7tX+kw4i3jBsfAHGoGQbtZg6Ox7U= +go.opentelemetry.io/collector/config/configopaque v1.21.0/go.mod h1:sW0t0iI/VfRL9VYX7Ik6XzVgPcR+Y5kejTLsYcMyDWs= +go.opentelemetry.io/collector/config/configretry v1.21.0 h1:ZHoOvAkEcv5BBeaJn8IQ6rQ4GMPZWW4S+W7R4QTEbZU= +go.opentelemetry.io/collector/config/configretry v1.21.0/go.mod h1:cleBc9I0DIWpTiiHfu9v83FUaCTqcPXmebpLxjEIqro= +go.opentelemetry.io/collector/config/configtelemetry v0.115.0 h1:U07FinCDop+r2RjWQ3aP9ZWONC7r7kQIp1GkXQi6nsI= +go.opentelemetry.io/collector/config/configtelemetry v0.115.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= +go.opentelemetry.io/collector/config/configtls v1.21.0 h1:ZfrlAYgBD8lzp04W0GxwiDmUbrvKsvDYJi+wkyiXlpA= +go.opentelemetry.io/collector/config/configtls v1.21.0/go.mod h1:5EsNefPfVCMOTlOrr3wyj7LrsOgY7V8iqRl8oFZEqtw= +go.opentelemetry.io/collector/config/internal v0.115.0 h1:eVk57iufZpUXyPJFKTb1Ebx5tmcCyroIlt427r5pxS8= +go.opentelemetry.io/collector/config/internal v0.115.0/go.mod h1:OVkadRWlKAoWjHslqjWtBLAne8ceQm8WYT71ZcBWLFc= +go.opentelemetry.io/collector/confmap v1.21.0 h1:1tIcx2/Suwg8VhuPmQw87ba0ludPmumpFCFRZZa6RXA= +go.opentelemetry.io/collector/confmap v1.21.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= +go.opentelemetry.io/collector/consumer v1.21.0 h1:THKZ2Vbi6GkamjTBI2hFq5Dc4kINZTWGwQNa8d/Ty9g= +go.opentelemetry.io/collector/consumer v1.21.0/go.mod h1:FQcC4ThMtRYY41dv+IPNK8POLLhAFY3r1YR5fuP7iiY= +go.opentelemetry.io/collector/consumer/consumererror v0.115.0 h1:yli//xBCQMPZKXNgNlXemo4dvqhnFrAmCZ11DvQgmcY= +go.opentelemetry.io/collector/consumer/consumererror v0.115.0/go.mod h1:LwVzAvQ6ZVNG7mbOvurbAo+W/rKws0IcjOwriuZXqPE= +go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 h1:H3fDuyQW1t2HWHkz96WMBQJKUevypOCjBqnqtaAWyoA= +go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0/go.mod h1:IzEmZ91Tp7TBxVDq8Cc9xvLsmO7H08njr6Pu9P5d9ns= +go.opentelemetry.io/collector/consumer/consumertest v0.115.0 h1:hru0I2447y0TluCdwlKYFFtgcpyCnlM+LiOK1JZyA70= +go.opentelemetry.io/collector/consumer/consumertest v0.115.0/go.mod h1:ybjALRJWR6aKNOzEMy1T1ruCULVDEjj4omtOJMrH/kU= +go.opentelemetry.io/collector/exporter v0.115.0 h1:JnxfpOnsuqhTPKJXVKJLS1Cv3BiVrVLzpHOjJEQw+xw= +go.opentelemetry.io/collector/exporter v0.115.0/go.mod h1:xof3fHQK8wADhaKLIJcQ7ChZaFLNC+haRdPN0wgl6kY= +go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0 h1:lSQEleCn/q9eFufcuK61NdFKU70ZlgI9dBjPCO/4CrE= +go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0/go.mod h1:7l5K2AecimX2kx+nZC1gKG3QkP247CO1+SodmJ4fFkQ= +go.opentelemetry.io/collector/exporter/exportertest v0.115.0 h1:P9SMTUXQOtcaq40bGtnnAe14zRmR4/yUgj/Tb2BEf/k= +go.opentelemetry.io/collector/exporter/exportertest v0.115.0/go.mod h1:1jMZ9gFGXglb8wfNrBZIgd+RvpZhSyFwdfE+Jtf9w4U= +go.opentelemetry.io/collector/extension v0.115.0 h1:/cBb8AUdD0KMWC6V3lvCC16eP9Fg0wd1Upcp5rgvuGI= +go.opentelemetry.io/collector/extension v0.115.0/go.mod h1:HI7Ak6loyi6ZrZPsQJW1OO1wbaAW8OqXLFNQlTZnreQ= +go.opentelemetry.io/collector/extension/auth v0.115.0 h1:TTMokbBsSHZRFH48PvGSJmgSS8F3Rkr9MWGHZn8eJDk= +go.opentelemetry.io/collector/extension/auth v0.115.0/go.mod h1:3w+2mzeb2OYNOO4Bi41TUo4jr32ap2y7AOq64IDpxQo= +go.opentelemetry.io/collector/extension/auth/authtest v0.115.0 h1:OZe7dKbZ01qodSpZU0ZYzI6zpmmzJ3UvfdBSFAbSgDw= +go.opentelemetry.io/collector/extension/auth/authtest v0.115.0/go.mod h1:fk9WCXP0x91Q64Z8HZKWTHh9PWtgoWE1KXe3n2Bff3U= +go.opentelemetry.io/collector/extension/experimental/storage v0.115.0 h1:sZXw0+77092pq24CkUoTRoHQPLQUsDq6HFRNB0g5yR4= +go.opentelemetry.io/collector/extension/experimental/storage v0.115.0/go.mod h1:qjFH7Y3QYYs88By2ZB5GMSUN5k3ul4Brrq2J6lKACA0= +go.opentelemetry.io/collector/extension/extensiontest v0.115.0 h1:GBVFxFEskR8jSdu9uaQh2qpXnN5VNXhXjpJ2UjxtE8I= +go.opentelemetry.io/collector/extension/extensiontest v0.115.0/go.mod h1:eu1ecbz5mT+cHoH2H3GmD/rOO0WsicSJD2RLrYuOmRA= +go.opentelemetry.io/collector/featuregate v1.21.0 h1:+EULHPJDLMipcwAGZVp9Nm8NriRvoBBMxp7MSiIZVMI= +go.opentelemetry.io/collector/featuregate v1.21.0/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= +go.opentelemetry.io/collector/internal/sharedcomponent v0.115.0 h1:9TL6T6ALqDpumUJ0tYIuPIg5LGo4r6eoqlNArYX116o= +go.opentelemetry.io/collector/internal/sharedcomponent v0.115.0/go.mod h1:SgBLKMh11bOTPR1bdDZbi5MlqsoDBBFI3uBIwnei+0k= +go.opentelemetry.io/collector/pdata v1.21.0 h1:PG+UbiFMJ35X/WcAR7Rf/PWmWtRdW0aHlOidsR6c5MA= +go.opentelemetry.io/collector/pdata v1.21.0/go.mod h1:GKb1/zocKJMvxKbS+sl0W85lxhYBTFJ6h6I1tphVyDU= +go.opentelemetry.io/collector/pdata/pprofile v0.115.0 h1:NI89hy13vNDw7EOnQf7Jtitks4HJFO0SUWznTssmP94= +go.opentelemetry.io/collector/pdata/pprofile v0.115.0/go.mod h1:jGzdNfO0XTtfLjXCL/uCC1livg1LlfR+ix2WE/z3RpQ= +go.opentelemetry.io/collector/pdata/testdata v0.115.0 h1:Rblz+AKXdo3fG626jS+KSd0OSA4uMXcTQfpwed6P8LI= +go.opentelemetry.io/collector/pdata/testdata v0.115.0/go.mod h1:inNnRt6S2Nn260EfCBEcjesjlKOSsr0jPwkPqpBkt4s= +go.opentelemetry.io/collector/pipeline v0.115.0 h1:bmACBqb0e8U9ag+vGGHUP7kCfAO7HHROdtzIEg8ulus= +go.opentelemetry.io/collector/pipeline v0.115.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= +go.opentelemetry.io/collector/receiver v0.115.0 h1:55Q3Jvj6zHCIA1psKqi/3kEMJO4OqUF5tNAEYNdB1U8= +go.opentelemetry.io/collector/receiver v0.115.0/go.mod h1:nBSCh2O/WUcfgpJ+Jpz+B0z0Hn5jHeRvF2WmLij5EIY= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.115.0 h1:NqMWsGuVy6y6VKTaPeJS7NZ9KAxhE/xyGUC7GaLYm/o= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.115.0/go.mod h1:9ituzngnjsh/YvO+Phayq9BTk/nw0rgK5ZVvX1oxULk= +go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0 h1:R9JLaj2Al93smIPUkbJshAkb/cY0H5JBOxIx+Zu0NG4= +go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0/go.mod h1:05E5hGujWeeXJmzKZwTdHyZ/+rRyrQlQB5p5Q2XY39M= +go.opentelemetry.io/collector/receiver/receivertest v0.115.0 h1:OiB684SbHQi6/Pd3ZH0cXjYvCpBS9ilQBfTQx0wVXHg= +go.opentelemetry.io/collector/receiver/receivertest v0.115.0/go.mod h1:Y8Z9U/bz9Xpyt8GI8DxZZgryw3mnnIw+AeKVLTD2cP8= +go.opentelemetry.io/collector/semconv v0.115.0 h1:SoqMvg4ZEB3mz2EdAb6XYa+TuMo5Mir5FRBr3nVFUDY= +go.opentelemetry.io/collector/semconv v0.115.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk= -go.opentelemetry.io/otel/exporters/prometheus v0.50.0 h1:2Ewsda6hejmbhGFyUvWZjUThC98Cf8Zy6g0zkIimOng= -go.opentelemetry.io/otel/exporters/prometheus v0.50.0/go.mod h1:pMm5PkUo5YwbLiuEf7t2xg4wbP0/eSJrMxIMxKosynY= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.28.0 h1:BJee2iLkfRfl9lc7aFmBwkWxY/RI1RDdXepSF6y8TPE= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.28.0/go.mod h1:DIzlHs3DRscCIBU3Y9YSzPfScwnYnzfnCd4g8zA7bZc= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y= -go.opentelemetry.io/otel/log v0.4.0 h1:/vZ+3Utqh18e8TPjuc3ecg284078KWrR8BRz+PQAj3o= -go.opentelemetry.io/otel/log v0.4.0/go.mod h1:DhGnQvky7pHy82MIRV43iXh3FlKN8UUKftn0KbLOq6I= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/sdk/log v0.4.0 h1:1mMI22L82zLqf6KtkjrRy5BbagOTWdJsqMY/HSqILAA= -go.opentelemetry.io/otel/sdk/log v0.4.0/go.mod h1:AYJ9FVF0hNOgAVzUG/ybg/QttnXhUePWAupmCqtdESo= -go.opentelemetry.io/otel/sdk/metric v1.28.0 h1:OkuaKgKrgAbYrrY0t92c+cC+2F6hsFNnCQArXCKlg08= -go.opentelemetry.io/otel/sdk/metric v1.28.0/go.mod h1:cWPjykihLAPvXKi4iZc1dpER3Jdq2Z0YLse3moQUCpg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= +go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -1413,11 +1411,10 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1506,8 +1503,8 @@ golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1531,8 +1528,8 @@ golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1548,8 +1545,8 @@ golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1627,8 +1624,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1640,8 +1637,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -1657,8 +1654,8 @@ golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1837,20 +1834,20 @@ google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOl google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= -google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= +google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 h1:1hfbdAfFbkmpg41000wDVqr7jUpK/Yo+LPnIxxGzmkg= google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0= -google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 h1:QCqS/PdaHTSWGvupk2F/ehwHtGc0/GYkT+3GAcR1CCc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1885,8 +1882,8 @@ google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1905,8 +1902,8 @@ google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1915,6 +1912,8 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= @@ -1929,37 +1928,36 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -helm.sh/helm/v3 v3.15.3 h1:HcZDaVFe9uHa6hpsR54mJjYyRy4uz/pc6csg27nxFOc= -helm.sh/helm/v3 v3.15.3/go.mod h1:FzSIP8jDQaa6WAVg9F+OkKz7J0ZmAga4MABtTbsb9WQ= +helm.sh/helm/v3 v3.16.3 h1:kb8bSxMeRJ+knsK/ovvlaVPfdis0X3/ZhYCSFRP+YmY= +helm.sh/helm/v3 v3.16.3/go.mod h1:zeVWGDR4JJgiRbT3AnNsjYaX8OTJlIE9zC+Q7F7iUSU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -k8s.io/api v0.30.3 h1:ImHwK9DCsPA9uoU3rVh4QHAHHK5dTSv1nxJUapx8hoQ= -k8s.io/api v0.30.3/go.mod h1:GPc8jlzoe5JG3pb0KJCSLX5oAFIW3/qNJITlDj8BH04= -k8s.io/apiextensions-apiserver v0.30.1 h1:4fAJZ9985BmpJG6PkoxVRpXv9vmPUOVzl614xarePws= -k8s.io/apiextensions-apiserver v0.30.1/go.mod h1:R4GuSrlhgq43oRY9sF2IToFh7PVlF1JjfWdoG3pixk4= -k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= -k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/apiserver v0.30.1 h1:BEWEe8bzS12nMtDKXzCF5Q5ovp6LjjYkSp8qOPk8LZ8= -k8s.io/apiserver v0.30.1/go.mod h1:i87ZnQ+/PGAmSbD/iEKM68bm1D5reX8fO4Ito4B01mo= -k8s.io/cli-runtime v0.30.0 h1:0vn6/XhOvn1RJ2KJOC6IRR2CGqrpT6QQF4+8pYpWQ48= -k8s.io/cli-runtime v0.30.0/go.mod h1:vATpDMATVTMA79sZ0YUCzlMelf6rUjoBzlp+RnoM+cg= -k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= -k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= -k8s.io/component-base v0.30.1 h1:bvAtlPh1UrdaZL20D9+sWxsJljMi0QZ3Lmw+kmZAaxQ= -k8s.io/component-base v0.30.1/go.mod h1:e/X9kDiOebwlI41AvBHuWdqFriSRrX50CdwA9TFaHLI= +k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8= +k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE= +k8s.io/apiextensions-apiserver v0.31.3 h1:+GFGj2qFiU7rGCsA5o+p/rul1OQIq6oYpQw4+u+nciE= +k8s.io/apiextensions-apiserver v0.31.3/go.mod h1:2DSpFhUZZJmn/cr/RweH1cEVVbzFw9YBu4T+U3mf1e4= +k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4= +k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/apiserver v0.31.3 h1:+1oHTtCB+OheqFEz375D0IlzHZ5VeQKX1KGXnx+TTuY= +k8s.io/apiserver v0.31.3/go.mod h1:PrxVbebxrxQPFhJk4powDISIROkNMKHibTg9lTRQ0Qg= +k8s.io/cli-runtime v0.31.1 h1:/ZmKhmZ6hNqDM+yf9s3Y4KEYakNXUn5sod2LWGGwCuk= +k8s.io/cli-runtime v0.31.1/go.mod h1:pKv1cDIaq7ehWGuXQ+A//1OIF+7DI+xudXtExMCbe9U= +k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4= +k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs= +k8s.io/component-base v0.31.3 h1:DMCXXVx546Rfvhj+3cOm2EUxhS+EyztH423j+8sOwhQ= +k8s.io/component-base v0.31.3/go.mod h1:xME6BHfUOafRgT0rGVBGl7TuSg8Z9/deT7qq6w7qjIU= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/kubectl v0.30.0 h1:xbPvzagbJ6RNYVMVuiHArC1grrV5vSmmIcSZuCdzRyk= -k8s.io/kubectl v0.30.0/go.mod h1:zgolRw2MQXLPwmic2l/+iHs239L49fhSeICuMhQQXTI= +k8s.io/kubectl v0.31.1 h1:ih4JQJHxsEggFqDJEHSOdJ69ZxZftgeZvYo7M/cpp24= +k8s.io/kubectl v0.31.1/go.mod h1:aNuQoR43W6MLAtXQ/Bu4GDmoHlbhHKuyD49lmTC8eJM= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= @@ -2001,10 +1999,10 @@ oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= -sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= -sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U= -sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag= +sigs.k8s.io/kustomize/api v0.17.2 h1:E7/Fjk7V5fboiuijoZHgs4aHuexi5Y2loXlVOAVAG5g= +sigs.k8s.io/kustomize/api v0.17.2/go.mod h1:UWTz9Ct+MvoeQsHcJ5e+vziRRkwimm3HytpZgIYqye0= +sigs.k8s.io/kustomize/kyaml v0.17.1 h1:TnxYQxFXzbmNG6gOINgGWQt09GghzgTP6mIurOgrLCQ= +sigs.k8s.io/kustomize/kyaml v0.17.1/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/functional_tests/histogram_test.go b/functional_tests/histogram_test.go index 5f5ebdfe8..fb6e48904 100644 --- a/functional_tests/histogram_test.go +++ b/functional_tests/histogram_test.go @@ -66,7 +66,7 @@ func setupOtlpReceiver(t *testing.T, port int) *consumertest.MetricsSink { cfg := f.CreateDefaultConfig().(*signalfxreceiver.Config) cfg.Endpoint = fmt.Sprintf("0.0.0.0:%d", port) - rcvr, err := f.CreateMetricsReceiver(context.Background(), receivertest.NewNopSettings(), cfg, mc) + rcvr, err := f.CreateMetrics(context.Background(), receivertest.NewNopSettings(), cfg, mc) require.NoError(t, err) require.NoError(t, rcvr.Start(context.Background(), componenttest.NewNopHost())) @@ -258,6 +258,7 @@ func testHistogramMetrics(t *testing.T) { pmetrictest.IgnoreResourceAttributeValue("net.host.port"), pmetrictest.IgnoreResourceMetricsOrder(), pmetrictest.IgnoreScopeMetricsOrder(), + pmetrictest.IgnoreScopeVersion(), pmetrictest.IgnoreMetricsOrder(), pmetrictest.IgnoreMetricDataPointsOrder(), pmetrictest.IgnoreSubsequentDataPoints("coredns_forward_request_duration_seconds"), @@ -288,6 +289,7 @@ func testHistogramMetrics(t *testing.T) { pmetrictest.IgnoreMetricAttributeValue("net.host.port"), pmetrictest.IgnoreResourceMetricsOrder(), pmetrictest.IgnoreScopeMetricsOrder(), + pmetrictest.IgnoreScopeVersion(), pmetrictest.IgnoreMetricsOrder(), pmetrictest.IgnoreMetricDataPointsOrder(), ) @@ -316,6 +318,7 @@ func testHistogramMetrics(t *testing.T) { pmetrictest.IgnoreMetricAttributeValue("server_go_version", "etcd_server_go_version"), pmetrictest.IgnoreResourceMetricsOrder(), pmetrictest.IgnoreScopeMetricsOrder(), + pmetrictest.IgnoreScopeVersion(), pmetrictest.IgnoreMetricsOrder(), pmetrictest.IgnoreMetricDataPointsOrder(), ) @@ -336,6 +339,7 @@ func testHistogramMetrics(t *testing.T) { pmetrictest.IgnoreMetricAttributeValue("net.host.port"), pmetrictest.IgnoreResourceMetricsOrder(), pmetrictest.IgnoreScopeMetricsOrder(), + pmetrictest.IgnoreScopeVersion(), pmetrictest.IgnoreMetricsOrder(), pmetrictest.IgnoreMetricDataPointsOrder(), ) @@ -364,6 +368,7 @@ func testHistogramMetrics(t *testing.T) { pmetrictest.IgnoreResourceAttributeValue("server.port"), pmetrictest.IgnoreResourceMetricsOrder(), pmetrictest.IgnoreScopeMetricsOrder(), + pmetrictest.IgnoreScopeVersion(), pmetrictest.IgnoreMetricsOrder(), pmetrictest.IgnoreMetricDataPointsOrder(), ) @@ -394,6 +399,7 @@ func testHistogramMetrics(t *testing.T) { pmetrictest.IgnoreResourceAttributeValue("net.host.port"), pmetrictest.IgnoreResourceMetricsOrder(), pmetrictest.IgnoreScopeMetricsOrder(), + pmetrictest.IgnoreScopeVersion(), pmetrictest.IgnoreMetricsOrder(), pmetrictest.IgnoreMetricDataPointsOrder(), ) diff --git a/functional_tests/testdata/expected_kind_values/expected_dotnet_traces.yaml b/functional_tests/testdata/expected_kind_values/expected_dotnet_traces.yaml index 91189e355..c9e825cfd 100644 --- a/functional_tests/testdata/expected_kind_values/expected_dotnet_traces.yaml +++ b/functional_tests/testdata/expected_kind_values/expected_dotnet_traces.yaml @@ -3,13 +3,28 @@ resourceSpans: attributes: - key: splunk.distro.version value: - stringValue: 1.4.0 + stringValue: 1.7.0 - key: telemetry.distro.name value: stringValue: splunk-otel-dotnet - key: telemetry.distro.version value: - stringValue: 1.4.0 + stringValue: 1.7.0 + - key: os.type + value: + stringValue: linux + - key: os.description + value: + stringValue: Alpine Linux v3.19 + - key: os.build_id + value: + stringValue: 6.8.0-1014-azure + - key: os.name + value: + stringValue: Alpine Linux + - key: os.version + value: + stringValue: 3.19.1 - key: host.name value: stringValue: kind-control-plane @@ -30,7 +45,7 @@ resourceSpans: stringValue: 8.0.3 - key: container.id value: - stringValue: efe5e5c4a3ff4cb3620cf1ea1b6210aadc0d78592a001ec6ae52dceacb4a3fbd + stringValue: 66462f58bce1e69e6badf3bd51a4cae560f578e09571acc36810eb0c757dc07e - key: telemetry.sdk.name value: stringValue: opentelemetry @@ -39,13 +54,13 @@ resourceSpans: stringValue: dotnet - key: telemetry.sdk.version value: - stringValue: 1.7.0 + stringValue: 1.9.0 - key: service.name value: stringValue: dotnet-test - key: splunk.zc.method value: - stringValue: splunk-otel-dotnet:v1.4.0 + stringValue: splunk-otel-dotnet:v1.7.0 - key: k8s.container.name value: stringValue: dotnet-test @@ -60,19 +75,19 @@ resourceSpans: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: dotnet-test-764877d877-l94s6 + stringValue: dotnet-test-7fd7cfb786-v88nj - key: k8s.replicaset.name value: - stringValue: dotnet-test-764877d877 + stringValue: dotnet-test-7fd7cfb786 - key: service.version value: stringValue: latest - key: k8s.pod.ip value: - stringValue: 10.244.0.69 + stringValue: 10.244.0.12 - key: k8s.pod.uid value: - stringValue: 290889bb-9f5e-439a-9135-65c604334749 + stringValue: df2380bb-dbf2-4b95-a71a-bb1a865042d9 - key: k8s.pod.labels.app value: stringValue: dotnet-test @@ -82,9 +97,6 @@ resourceSpans: - key: container.image.tag value: stringValue: latest - - key: os.type - value: - stringValue: linux - key: k8s.cluster.name value: stringValue: dev-operator @@ -130,14 +142,15 @@ resourceSpans: - key: http.response.status_code value: intValue: "200" - endTimeUnixNano: "1715969864227389200" + endTimeUnixNano: "1727807785128888700" + flags: 769 kind: 2 name: GET / - parentSpanId: da5dd755f29fad09 - spanId: 0038c8de02b44e52 - startTimeUnixNano: "1715969864226627200" + parentSpanId: "" + spanId: 31108acc858f2148 + startTimeUnixNano: "1727807785128666300" status: {} - traceId: a924e52ebd34a2bb97656d9a60cf8212 + traceId: ce9cd16f34d3616afd09e44c4628b75d - attributes: - key: server.address value: @@ -163,14 +176,15 @@ resourceSpans: - key: http.response.status_code value: intValue: "200" - endTimeUnixNano: "1715969865240829300" + endTimeUnixNano: "1727807786129946900" + flags: 769 kind: 2 name: GET / - parentSpanId: 43dcce3eb67fa6bd - spanId: c567a29214cdf900 - startTimeUnixNano: "1715969865236407900" + parentSpanId: "" + spanId: 517249132e5cf8ce + startTimeUnixNano: "1727807786129653900" status: {} - traceId: 361172a0f5dfd7d548694f8ba9fc7b98 + traceId: 816456d9759a79719c07c4ad88d07704 - attributes: - key: server.address value: @@ -196,14 +210,15 @@ resourceSpans: - key: http.response.status_code value: intValue: "200" - endTimeUnixNano: "1715969866248003700" + endTimeUnixNano: "1727807787131099300" + flags: 769 kind: 2 name: GET / - parentSpanId: 17ba1638a4f7c60d - spanId: 073e155b17f07ad9 - startTimeUnixNano: "1715969866247119500" + parentSpanId: "" + spanId: 318c67cd44774f1d + startTimeUnixNano: "1727807787130851000" status: {} - traceId: 7010aa6cf5bcca3700ef756a8a245861 + traceId: ea5610d4f2c4d95b19ae2de6ca99841e - attributes: - key: server.address value: @@ -229,14 +244,15 @@ resourceSpans: - key: http.response.status_code value: intValue: "200" - endTimeUnixNano: "1715969867251548500" + endTimeUnixNano: "1727807788132273200" + flags: 769 kind: 2 name: GET / - parentSpanId: 72e40086b5b0decd - spanId: 0cbfe346413e4bd2 - startTimeUnixNano: "1715969867250849200" + parentSpanId: "" + spanId: e81aafb08463c8d3 + startTimeUnixNano: "1727807788132032800" status: {} - traceId: d18cbfbb3ebf052ed5ed379787523eea + traceId: 58b393232007b5c9969b7f14e3b724eb - attributes: - key: server.address value: @@ -262,14 +278,15 @@ resourceSpans: - key: http.response.status_code value: intValue: "200" - endTimeUnixNano: "1715969868255270900" + endTimeUnixNano: "1727807789133394300" + flags: 769 kind: 2 name: GET / - parentSpanId: 3f5a1f8c161e1850 - spanId: 4ac4972d600be731 - startTimeUnixNano: "1715969868254656300" + parentSpanId: "" + spanId: 46b3491b73f3cdd5 + startTimeUnixNano: "1727807789133067800" status: {} - traceId: ddbc75950dd2f4469d767a7ce569d3cd + traceId: fe1b7b58ca8832523a6f6bdfc7111297 - scope: name: System.Net.Http spans: @@ -292,14 +309,15 @@ resourceSpans: - key: http.response.status_code value: intValue: "200" - endTimeUnixNano: "1715969864228877600" + endTimeUnixNano: "1727807785128911400" + flags: 257 kind: 3 name: GET parentSpanId: "" - spanId: da5dd755f29fad09 - startTimeUnixNano: "1715969864225322300" + spanId: 9973276344d39e0a + startTimeUnixNano: "1727807785128450300" status: {} - traceId: a924e52ebd34a2bb97656d9a60cf8212 + traceId: ce9cd16f34d3616afd09e44c4628b75d - attributes: - key: http.request.method value: @@ -319,14 +337,15 @@ resourceSpans: - key: http.response.status_code value: intValue: "200" - endTimeUnixNano: "1715969865241549900" + endTimeUnixNano: "1727807786130112300" + flags: 257 kind: 3 name: GET parentSpanId: "" - spanId: 43dcce3eb67fa6bd - startTimeUnixNano: "1715969865232068500" + spanId: 39672ff972cb5645 + startTimeUnixNano: "1727807786129389800" status: {} - traceId: 361172a0f5dfd7d548694f8ba9fc7b98 + traceId: 816456d9759a79719c07c4ad88d07704 - attributes: - key: http.request.method value: @@ -346,14 +365,15 @@ resourceSpans: - key: http.response.status_code value: intValue: "200" - endTimeUnixNano: "1715969866248180700" + endTimeUnixNano: "1727807787131368300" + flags: 257 kind: 3 name: GET parentSpanId: "" - spanId: 17ba1638a4f7c60d - startTimeUnixNano: "1715969866246343600" + spanId: 37cc2176bac85907 + startTimeUnixNano: "1727807787130672200" status: {} - traceId: 7010aa6cf5bcca3700ef756a8a245861 + traceId: ea5610d4f2c4d95b19ae2de6ca99841e - attributes: - key: http.request.method value: @@ -373,14 +393,15 @@ resourceSpans: - key: http.response.status_code value: intValue: "200" - endTimeUnixNano: "1715969867251664300" + endTimeUnixNano: "1727807788132407800" + flags: 257 kind: 3 name: GET parentSpanId: "" - spanId: 72e40086b5b0decd - startTimeUnixNano: "1715969867250062200" + spanId: ab1251eadd532e02 + startTimeUnixNano: "1727807788131789800" status: {} - traceId: d18cbfbb3ebf052ed5ed379787523eea + traceId: 58b393232007b5c9969b7f14e3b724eb - attributes: - key: http.request.method value: @@ -400,11 +421,12 @@ resourceSpans: - key: http.response.status_code value: intValue: "200" - endTimeUnixNano: "1715969868255724700" + endTimeUnixNano: "1727807789133486500" + flags: 257 kind: 3 name: GET parentSpanId: "" - spanId: 3f5a1f8c161e1850 - startTimeUnixNano: "1715969868253213300" + spanId: 9f4c3f7b28b30de1 + startTimeUnixNano: "1727807789132858200" status: {} - traceId: ddbc75950dd2f4469d767a7ce569d3cd + traceId: fe1b7b58ca8832523a6f6bdfc7111297 diff --git a/functional_tests/testdata/expected_kind_values/expected_internal_metrics.yaml b/functional_tests/testdata/expected_kind_values/expected_internal_metrics.yaml index 549ea58ae..38435e578 100644 --- a/functional_tests/testdata/expected_kind_values/expected_internal_metrics.yaml +++ b/functional_tests/testdata/expected_kind_values/expected_internal_metrics.yaml @@ -2,11 +2,9 @@ resourceMetrics: - resource: {} scopeMetrics: - metrics: - - name: otelcol_processor_dropped_log_records - sum: - aggregationTemporality: 2 + - gauge: dataPoints: - - asDouble: 0 + - asDouble: 2 attributes: - key: cluster_name value: @@ -34,10 +32,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -47,9 +45,12 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: processor + - key: server.address value: - stringValue: memory_limiter + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -58,20 +59,23 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - isMonotonic: true - - name: otelcol_processor_dropped_spans + name: otelcol_fileconsumer_reading_files + - name: otelcol_otelsvc_k8s_pod_updated sum: aggregationTemporality: 2 dataPoints: - - asDouble: 0 + - asDouble: 112 attributes: - key: cluster_name value: @@ -99,10 +103,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -112,9 +116,12 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: processor + - key: server.address value: - stringValue: memory_limiter + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -123,20 +130,23 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" isMonotonic: true - - name: otelcol_receiver_accepted_spans + - name: otelcol_process_uptime sum: aggregationTemporality: 2 dataPoints: - - asDouble: 96 + - asDouble: 110.707364179 attributes: - key: cluster_name value: @@ -164,10 +174,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -177,9 +187,12 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: server.address value: - stringValue: otlp + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -188,21 +201,19 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 - - key: transport + stringValue: v0.111.0 + - key: url.scheme value: - stringValue: grpc + stringValue: http timeUnixNano: "1000000" isMonotonic: true - - name: otelcol_receiver_refused_spans - sum: - aggregationTemporality: 2 + - gauge: dataPoints: - asDouble: 0 attributes: @@ -215,6 +226,12 @@ resourceMetrics: - key: customfield2 value: stringValue: customvalue2 + - key: data_type + value: + stringValue: logs + - key: exporter + value: + stringValue: otlphttp/entities - key: host.name value: stringValue: kind-control-plane @@ -232,10 +249,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -245,9 +262,12 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: server.address value: - stringValue: otlp + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -256,21 +276,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 - - key: transport + stringValue: v0.111.0 + - key: url.scheme value: - stringValue: grpc + stringValue: http timeUnixNano: "1000000" - isMonotonic: true - - gauge: - dataPoints: - - asDouble: 96 + - asDouble: 0 attributes: - key: cluster_name value: @@ -281,6 +298,12 @@ resourceMetrics: - key: customfield2 value: stringValue: customvalue2 + - key: data_type + value: + stringValue: logs + - key: exporter + value: + stringValue: splunk_hec/platform_logs - key: host.name value: stringValue: kind-control-plane @@ -298,10 +321,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -311,19 +334,32 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 - key: service.name value: stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - name: scrape_series_added - - name: otelcol_exporter_sent_metric_points - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 5985 + - asDouble: 0 attributes: - key: cluster_name value: @@ -334,6 +370,9 @@ resourceMetrics: - key: customfield2 value: stringValue: customvalue2 + - key: data_type + value: + stringValue: metrics - key: exporter value: stringValue: signalfx @@ -354,10 +393,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -367,6 +406,12 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -375,15 +420,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 5985 + - asDouble: 0 attributes: - key: cluster_name value: @@ -394,6 +442,9 @@ resourceMetrics: - key: customfield2 value: stringValue: customvalue2 + - key: data_type + value: + stringValue: metrics - key: exporter value: stringValue: splunk_hec/platform_metrics @@ -414,10 +465,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -427,6 +478,12 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -435,20 +492,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - isMonotonic: true - - name: otelcol_otelsvc_k8s_namespace_added - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 36 + - asDouble: 0 attributes: - key: cluster_name value: @@ -459,6 +514,12 @@ resourceMetrics: - key: customfield2 value: stringValue: customvalue2 + - key: data_type + value: + stringValue: traces + - key: exporter + value: + stringValue: otlp - key: host.name value: stringValue: kind-control-plane @@ -476,10 +537,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -489,6 +550,12 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -497,18 +564,23 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - isMonotonic: true - - gauge: + name: otelcol_exporter_queue_size + - name: otelcol_otelsvc_k8s_ip_lookup_miss + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 0.008026458 + - asDouble: 726 attributes: - key: cluster_name value: @@ -536,10 +608,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -549,17 +621,35 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 - key: service.name value: stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - name: scrape_duration_seconds + isMonotonic: true - gauge: dataPoints: - - asDouble: 96 + - asDouble: 4.22756352e+08 attributes: - key: cluster_name value: @@ -587,10 +677,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -600,17 +690,37 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 - key: service.name value: stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - name: scrape_samples_post_metric_relabeling - - gauge: + name: otelcol_process_memory_rss + - name: otelcol_processor_filter_logs_filtered + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 1000 + - asDouble: 150 attributes: - key: cluster_name value: @@ -621,9 +731,9 @@ resourceMetrics: - key: customfield2 value: stringValue: customvalue2 - - key: exporter + - key: filter value: - stringValue: otlp + stringValue: filter/logs - key: host.name value: stringValue: kind-control-plane @@ -641,10 +751,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -654,6 +764,12 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -662,14 +778,20 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" + isMonotonic: true + - gauge: + dataPoints: - asDouble: 1000 attributes: - key: cluster_name @@ -683,7 +805,7 @@ resourceMetrics: stringValue: customvalue2 - key: exporter value: - stringValue: signalfx + stringValue: otlp - key: host.name value: stringValue: kind-control-plane @@ -701,10 +823,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -714,6 +836,12 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -722,15 +850,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 5000 + - asDouble: 1000 attributes: - key: cluster_name value: @@ -743,7 +874,7 @@ resourceMetrics: stringValue: customvalue2 - key: exporter value: - stringValue: splunk_hec/platform_logs + stringValue: otlphttp/entities - key: host.name value: stringValue: kind-control-plane @@ -761,10 +892,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -774,6 +905,12 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -782,15 +919,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 5000 + - asDouble: 1000 attributes: - key: cluster_name value: @@ -803,7 +943,7 @@ resourceMetrics: stringValue: customvalue2 - key: exporter value: - stringValue: splunk_hec/platform_metrics + stringValue: signalfx - key: host.name value: stringValue: kind-control-plane @@ -821,10 +961,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -834,6 +974,12 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -842,18 +988,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - name: otelcol_exporter_queue_capacity - - gauge: - dataPoints: - - asDouble: 0 + - asDouble: 1000 attributes: - key: cluster_name value: @@ -866,7 +1012,7 @@ resourceMetrics: stringValue: customvalue2 - key: exporter value: - stringValue: otlp + stringValue: splunk_hec/platform_logs - key: host.name value: stringValue: kind-control-plane @@ -884,10 +1030,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -897,6 +1043,12 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -905,15 +1057,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1000 attributes: - key: cluster_name value: @@ -926,7 +1081,7 @@ resourceMetrics: stringValue: customvalue2 - key: exporter value: - stringValue: signalfx + stringValue: splunk_hec/platform_metrics - key: host.name value: stringValue: kind-control-plane @@ -944,10 +1099,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -957,6 +1112,12 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -965,15 +1126,23 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 0 + name: otelcol_exporter_queue_capacity + - name: otelcol_processor_incoming_items + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 10300 attributes: - key: cluster_name value: @@ -984,9 +1153,6 @@ resourceMetrics: - key: customfield2 value: stringValue: customvalue2 - - key: exporter - value: - stringValue: splunk_hec/platform_logs - key: host.name value: stringValue: kind-control-plane @@ -1004,10 +1170,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -1017,6 +1183,18 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: otel_signal + value: + stringValue: logs + - key: processor + value: + stringValue: filter/logs + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -1025,15 +1203,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 10336 attributes: - key: cluster_name value: @@ -1044,9 +1225,6 @@ resourceMetrics: - key: customfield2 value: stringValue: customvalue2 - - key: exporter - value: - stringValue: splunk_hec/platform_metrics - key: host.name value: stringValue: kind-control-plane @@ -1064,10 +1242,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -1077,6 +1255,18 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: otel_signal + value: + stringValue: logs + - key: processor + value: + stringValue: k8sattributes + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -1085,20 +1275,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - name: otelcol_exporter_queue_size - - name: otelcol_exporter_send_failed_log_records - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 121 + - asDouble: 10336 attributes: - key: cluster_name value: @@ -1109,9 +1297,6 @@ resourceMetrics: - key: customfield2 value: stringValue: customvalue2 - - key: exporter - value: - stringValue: splunk_hec/platform_logs - key: host.name value: stringValue: kind-control-plane @@ -1129,10 +1314,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -1142,6 +1327,18 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: otel_signal + value: + stringValue: logs + - key: processor + value: + stringValue: memory_limiter + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -1150,20 +1347,2536 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 10186 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: otel_signal + value: + stringValue: logs + - key: processor + value: + stringValue: resource + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 10186 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: otel_signal + value: + stringValue: logs + - key: processor + value: + stringValue: resource/add_environment + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 10186 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: otel_signal + value: + stringValue: logs + - key: processor + value: + stringValue: resource/logs + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 10186 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: otel_signal + value: + stringValue: logs + - key: processor + value: + stringValue: resourcedetection + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 11793 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: otel_signal + value: + stringValue: metrics + - key: processor + value: + stringValue: attributes/istio + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 12796 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: otel_signal + value: + stringValue: metrics + - key: processor + value: + stringValue: k8sattributes/metrics + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 12796 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: otel_signal + value: + stringValue: metrics + - key: processor + value: + stringValue: memory_limiter + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 12796 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: otel_signal + value: + stringValue: metrics + - key: processor + value: + stringValue: resource + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 1003 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: otel_signal + value: + stringValue: metrics + - key: processor + value: + stringValue: resource/add_agent_k8s + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 11793 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: otel_signal + value: + stringValue: metrics + - key: processor + value: + stringValue: resource/add_environment + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 12796 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: otel_signal + value: + stringValue: metrics + - key: processor + value: + stringValue: resourcedetection + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 309 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: otel_signal + value: + stringValue: traces + - key: processor + value: + stringValue: k8sattributes + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 309 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: otel_signal + value: + stringValue: traces + - key: processor + value: + stringValue: memory_limiter + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 309 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: otel_signal + value: + stringValue: traces + - key: processor + value: + stringValue: resource + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 309 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: otel_signal + value: + stringValue: traces + - key: processor + value: + stringValue: resource/add_environment + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 309 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: otel_signal + value: + stringValue: traces + - key: processor + value: + stringValue: resourcedetection + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + isMonotonic: true + - name: otelcol_receiver_accepted_metric_points + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 4187 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: receiver + value: + stringValue: hostmetrics + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 5791 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: receiver + value: + stringValue: kubeletstats + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 85 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: receiver + value: + stringValue: otlp + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: transport + value: + stringValue: http + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 1003 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: receiver + value: + stringValue: prometheus/agent + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: transport + value: + stringValue: http + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 39 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: receiver + value: + stringValue: prometheus/ta + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: transport + value: + stringValue: http + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 104 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: receiver + value: + stringValue: prometheus_simple//receiver_creator{endpoint="10.244.0.11:80"}/k8s_observer/28965b31-2355-46de-9e1f-3c0b31c1ef5a + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: transport + value: + stringValue: http + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 286 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: receiver + value: + stringValue: prometheus_simple//receiver_creator{endpoint="10.244.0.6:9402"}/k8s_observer/3101f943-bd4a-437f-93e3-e17342c985ff + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: transport + value: + stringValue: http + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 92 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: receiver + value: + stringValue: smartagent/coredns/receiver_creator{endpoint="10.244.0.2"}/k8s_observer/c1b617cc-4467-4277-a38c-08853d9aba1f + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: transport + value: + stringValue: internal + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 87 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: receiver + value: + stringValue: smartagent/coredns/receiver_creator{endpoint="10.244.0.4"}/k8s_observer/616ee593-bf8f-4ec0-abf4-910da6618cfa + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: transport + value: + stringValue: internal + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 1122 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: receiver + value: + stringValue: smartagent/kubernetes-proxy/receiver_creator{endpoint="172.18.0.2"}/k8s_observer/5053b5b4-54cb-4efc-b19b-7d029ff98930 + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: transport + value: + stringValue: internal + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + isMonotonic: true + - name: otelcol_scraper_errored_metric_points + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: receiver + value: + stringValue: hostmetrics + - key: scraper + value: + stringValue: hostmetrics + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 0 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: receiver + value: + stringValue: kubeletstats + - key: scraper + value: + stringValue: kubeletstats + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + isMonotonic: true + - gauge: + dataPoints: + - asDouble: 0.002243876 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + name: scrape_duration_seconds + - name: otelcol_exporter_send_failed_metric_points + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: exporter + value: + stringValue: signalfx + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 0 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: exporter + value: + stringValue: splunk_hec/platform_metrics + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" isMonotonic: true - - name: otelcol_otelsvc_k8s_ip_lookup_miss + - gauge: + dataPoints: + - asDouble: 26 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + name: otelcol_fileconsumer_open_files + - name: otelcol_processor_accepted_log_records + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 10336 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: processor + value: + stringValue: memory_limiter + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + isMonotonic: true + - name: otelcol_receiver_refused_metric_points sum: aggregationTemporality: 2 dataPoints: - - asDouble: 357 + - asDouble: 0 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: receiver + value: + stringValue: hostmetrics + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 0 + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: http + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-f8bq4 + - key: k8s.pod.uid + value: + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a + - key: net.host.name + value: + stringValue: 172.18.0.2 + - key: net.host.port + value: + stringValue: "8889" + - key: os.type + value: + stringValue: linux + - key: receiver + value: + stringValue: kubeletstats + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" + - key: service.instance.id + value: + stringValue: 172.18.0.2:8889 + - key: service.name + value: + stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http + timeUnixNano: "1000000" + - asDouble: 0 attributes: - key: cluster_name value: @@ -1191,10 +3904,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -1204,6 +3917,15 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: receiver + value: + stringValue: otlp + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -1212,18 +3934,21 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: transport + value: + stringValue: http + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - isMonotonic: true - - gauge: - dataPoints: - - asDouble: 4.94807416e+08 + - asDouble: 0 attributes: - key: cluster_name value: @@ -1251,10 +3976,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -1264,6 +3989,15 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: receiver + value: + stringValue: prometheus/agent + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -1272,19 +4006,20 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: transport + value: + stringValue: http + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - name: otelcol_process_runtime_total_sys_memory_bytes - - name: otelcol_processor_refused_spans - sum: - aggregationTemporality: 2 - dataPoints: - asDouble: 0 attributes: - key: cluster_name @@ -1313,10 +4048,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -1326,9 +4061,15 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: processor + - key: receiver value: - stringValue: memory_limiter + stringValue: prometheus/ta + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -1337,18 +4078,21 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: transport + value: + stringValue: http + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - isMonotonic: true - - gauge: - dataPoints: - - asDouble: 96 + - asDouble: 0 attributes: - key: cluster_name value: @@ -1376,10 +4120,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -1389,19 +4133,38 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: receiver + value: + stringValue: prometheus_simple//receiver_creator{endpoint="10.244.0.11:80"}/k8s_observer/28965b31-2355-46de-9e1f-3c0b31c1ef5a + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 - key: service.name value: stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: transport + value: + stringValue: http + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - name: scrape_samples_scraped - - name: otelcol_exporter_sent_spans - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 96 + - asDouble: 0 attributes: - key: cluster_name value: @@ -1412,9 +4175,6 @@ resourceMetrics: - key: customfield2 value: stringValue: customvalue2 - - key: exporter - value: - stringValue: otlp - key: host.name value: stringValue: kind-control-plane @@ -1432,10 +4192,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -1445,6 +4205,15 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: receiver + value: + stringValue: prometheus_simple//receiver_creator{endpoint="10.244.0.6:9402"}/k8s_observer/3101f943-bd4a-437f-93e3-e17342c985ff + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -1453,20 +4222,21 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: transport + value: + stringValue: http + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - isMonotonic: true - - name: otelcol_otelsvc_k8s_pod_updated - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 180 + - asDouble: 0 attributes: - key: cluster_name value: @@ -1494,10 +4264,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -1507,6 +4277,15 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: receiver + value: + stringValue: smartagent/coredns/receiver_creator{endpoint="10.244.0.2"}/k8s_observer/c1b617cc-4467-4277-a38c-08853d9aba1f + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -1515,20 +4294,21 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: transport + value: + stringValue: internal + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - isMonotonic: true - - name: otelcol_process_cpu_seconds - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 2.1799999999999997 + - asDouble: 0 attributes: - key: cluster_name value: @@ -1556,10 +4336,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -1569,6 +4349,15 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: receiver + value: + stringValue: smartagent/coredns/receiver_creator{endpoint="10.244.0.4"}/k8s_observer/616ee593-bf8f-4ec0-abf4-910da6618cfa + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -1577,20 +4366,21 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: transport + value: + stringValue: internal + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - isMonotonic: true - - name: otelcol_processor_accepted_spans - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 96 + - asDouble: 0 attributes: - key: cluster_name value: @@ -1618,10 +4408,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -1631,9 +4421,15 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: processor + - key: receiver value: - stringValue: memory_limiter + stringValue: smartagent/kubernetes-proxy/receiver_creator{endpoint="172.18.0.2"}/k8s_observer/5053b5b4-54cb-4efc-b19b-7d029ff98930 + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -1642,16 +4438,22 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: transport + value: + stringValue: internal + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" isMonotonic: true - - name: otelcol_processor_refused_log_records + - name: otelcol_receiver_refused_spans sum: aggregationTemporality: 2 dataPoints: @@ -1683,10 +4485,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -1696,9 +4498,15 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: processor + - key: receiver value: - stringValue: memory_limiter + stringValue: otlp + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -1707,19 +4515,20 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: transport + value: + stringValue: grpc + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - isMonotonic: true - - name: otelcol_receiver_refused_log_records - sum: - aggregationTemporality: 2 - dataPoints: - asDouble: 0 attributes: - key: cluster_name @@ -1748,10 +4557,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -1763,7 +4572,13 @@ resourceMetrics: stringValue: linux - key: receiver value: - stringValue: filelog + stringValue: otlp + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -1772,15 +4587,26 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: transport + value: + stringValue: http + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 0 + isMonotonic: true + - name: otelcol_exporter_sent_log_records + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 10186 attributes: - key: cluster_name value: @@ -1791,6 +4617,9 @@ resourceMetrics: - key: customfield2 value: stringValue: customvalue2 + - key: exporter + value: + stringValue: splunk_hec/platform_logs - key: host.name value: stringValue: kind-control-plane @@ -1808,10 +4637,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -1821,9 +4650,12 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port value: - stringValue: journald/containerd + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -1832,15 +4664,23 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 0 + isMonotonic: true + - name: otelcol_exporter_sent_spans + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 309 attributes: - key: cluster_name value: @@ -1851,6 +4691,9 @@ resourceMetrics: - key: customfield2 value: stringValue: customvalue2 + - key: exporter + value: + stringValue: otlp - key: host.name value: stringValue: kind-control-plane @@ -1868,10 +4711,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -1881,9 +4724,12 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port value: - stringValue: journald/kubelet + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -1892,20 +4738,23 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" isMonotonic: true - - name: otelcol_scraper_scraped_metric_points + - name: otelcol_otelsvc_k8s_pod_added sum: aggregationTemporality: 2 dataPoints: - - asDouble: 7 + - asDouble: 104 attributes: - key: cluster_name value: @@ -1933,10 +4782,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -1946,12 +4795,12 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: server.address value: - stringValue: hostmetrics - - key: scraper + stringValue: 172.18.0.2 + - key: server.port value: - stringValue: cpu + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -1960,15 +4809,23 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 49 + isMonotonic: true + - name: otelcol_receiver_refused_log_records + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 attributes: - key: cluster_name value: @@ -1996,10 +4853,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -2011,10 +4868,13 @@ resourceMetrics: stringValue: linux - key: receiver value: - stringValue: hostmetrics - - key: scraper + stringValue: filelog + - key: server.address value: - stringValue: disk + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -2023,15 +4883,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 14 + - asDouble: 0 attributes: - key: cluster_name value: @@ -2059,10 +4922,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -2074,10 +4937,13 @@ resourceMetrics: stringValue: linux - key: receiver value: - stringValue: hostmetrics - - key: scraper + stringValue: otlp + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port value: - stringValue: filesystem + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -2086,15 +4952,26 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: transport + value: + stringValue: http + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 21 + isMonotonic: true + - name: otelcol_exporter_send_failed_log_records + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 attributes: - key: cluster_name value: @@ -2105,6 +4982,9 @@ resourceMetrics: - key: customfield2 value: stringValue: customvalue2 + - key: exporter + value: + stringValue: splunk_hec/platform_logs - key: host.name value: stringValue: kind-control-plane @@ -2122,10 +5002,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -2135,12 +5015,12 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: server.address value: - stringValue: hostmetrics - - key: scraper + stringValue: 172.18.0.2 + - key: server.port value: - stringValue: load + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -2149,15 +5029,23 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 7 + isMonotonic: true + - name: otelcol_exporter_send_failed_spans + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 attributes: - key: cluster_name value: @@ -2168,6 +5056,9 @@ resourceMetrics: - key: customfield2 value: stringValue: customvalue2 + - key: exporter + value: + stringValue: otlp - key: host.name value: stringValue: kind-control-plane @@ -2185,10 +5076,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -2198,12 +5089,12 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: server.address value: - stringValue: hostmetrics - - key: scraper + stringValue: 172.18.0.2 + - key: server.port value: - stringValue: memory + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -2212,15 +5103,23 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 35 + isMonotonic: true + - name: otelcol_exporter_sent_metric_points + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 12796 attributes: - key: cluster_name value: @@ -2231,6 +5130,9 @@ resourceMetrics: - key: customfield2 value: stringValue: customvalue2 + - key: exporter + value: + stringValue: signalfx - key: host.name value: stringValue: kind-control-plane @@ -2248,10 +5150,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -2261,12 +5163,12 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: server.address value: - stringValue: hostmetrics - - key: scraper + stringValue: 172.18.0.2 + - key: server.port value: - stringValue: network + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -2275,15 +5177,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 21 + - asDouble: 12796 attributes: - key: cluster_name value: @@ -2294,6 +5199,9 @@ resourceMetrics: - key: customfield2 value: stringValue: customvalue2 + - key: exporter + value: + stringValue: splunk_hec/platform_metrics - key: host.name value: stringValue: kind-control-plane @@ -2311,10 +5219,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -2324,12 +5232,12 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: server.address value: - stringValue: hostmetrics - - key: scraper + stringValue: 172.18.0.2 + - key: server.port value: - stringValue: paging + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -2338,15 +5246,23 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 14 + isMonotonic: true + - name: otelcol_otelsvc_k8s_namespace_added + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 36 attributes: - key: cluster_name value: @@ -2374,10 +5290,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -2387,12 +5303,12 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: server.address value: - stringValue: hostmetrics - - key: scraper + stringValue: 172.18.0.2 + - key: server.port value: - stringValue: processes + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -2401,15 +5317,23 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 2843 + isMonotonic: true + - name: otelcol_receiver_accepted_spans + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 76 attributes: - key: cluster_name value: @@ -2437,10 +5361,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -2452,10 +5376,13 @@ resourceMetrics: stringValue: linux - key: receiver value: - stringValue: kubeletstats - - key: scraper + stringValue: otlp + - key: server.address value: - stringValue: kubeletstats + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -2464,18 +5391,21 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: transport + value: + stringValue: grpc + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - isMonotonic: true - - gauge: - dataPoints: - - asDouble: 73 + - asDouble: 233 attributes: - key: cluster_name value: @@ -2503,10 +5433,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -2516,6 +5446,15 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: receiver + value: + stringValue: otlp + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -2524,18 +5463,24 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: transport + value: + stringValue: http + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - name: otelcol_otelsvc_k8s_pod_table_size + isMonotonic: true - gauge: dataPoints: - - asDouble: 2.91491872e+08 + - asDouble: 106 attributes: - key: cluster_name value: @@ -2563,10 +5508,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -2576,6 +5521,12 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -2584,20 +5535,21 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - name: otelcol_process_runtime_heap_alloc_bytes - - name: otelcol_processor_accepted_metric_points - sum: - aggregationTemporality: 2 + name: scrape_series_added + - gauge: dataPoints: - - asDouble: 5985 + - asDouble: 85 attributes: - key: cluster_name value: @@ -2625,10 +5577,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -2638,9 +5590,12 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: processor + - key: server.address value: - stringValue: memory_limiter + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -2649,20 +5604,23 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - isMonotonic: true - - name: otelcol_processor_refused_metric_points + name: otelcol_otelsvc_k8s_pod_table_size + - name: otelcol_process_cpu_seconds sum: aggregationTemporality: 2 dataPoints: - - asDouble: 0 + - asDouble: 3.38 attributes: - key: cluster_name value: @@ -2690,10 +5648,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -2703,9 +5661,12 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: processor + - key: server.address value: - stringValue: memory_limiter + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -2714,20 +5675,21 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" isMonotonic: true - - name: otelcol_receiver_accepted_log_records - sum: - aggregationTemporality: 2 + - gauge: dataPoints: - - asDouble: 405 + - asDouble: 2.98417784e+08 attributes: - key: cluster_name value: @@ -2755,10 +5717,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -2768,9 +5730,12 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: server.address value: - stringValue: filelog + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -2779,15 +5744,23 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 392 + name: otelcol_process_runtime_heap_alloc_bytes + - name: otelcol_process_runtime_total_alloc_bytes + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 9.76760176e+08 attributes: - key: cluster_name value: @@ -2815,10 +5788,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -2828,9 +5801,12 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port value: - stringValue: journald/containerd + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -2839,15 +5815,23 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 193 + isMonotonic: true + - name: otelcol_processor_accepted_spans + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 309 attributes: - key: cluster_name value: @@ -2875,10 +5859,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -2888,9 +5872,15 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: processor + value: + stringValue: memory_limiter + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port value: - stringValue: journald/kubelet + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -2899,20 +5889,23 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" isMonotonic: true - - name: otelcol_receiver_accepted_metric_points + - name: otelcol_scraper_scraped_metric_points sum: aggregationTemporality: 2 dataPoints: - - asDouble: 2326 + - asDouble: 264 attributes: - key: cluster_name value: @@ -2940,10 +5933,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -2956,6 +5949,15 @@ resourceMetrics: - key: receiver value: stringValue: hostmetrics + - key: scraper + value: + stringValue: hostmetrics + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -2964,15 +5966,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 3099 + - asDouble: 5267 attributes: - key: cluster_name value: @@ -3000,10 +6005,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -3016,6 +6021,15 @@ resourceMetrics: - key: receiver value: stringValue: kubeletstats + - key: scraper + value: + stringValue: kubeletstats + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -3024,15 +6038,21 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 448 + isMonotonic: true + - gauge: + dataPoints: + - asDouble: 106 attributes: - key: cluster_name value: @@ -3060,10 +6080,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -3073,9 +6093,12 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: server.address value: - stringValue: prometheus/agent + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -3084,18 +6107,21 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 - - key: transport + stringValue: v0.111.0 + - key: url.scheme value: stringValue: http timeUnixNano: "1000000" - - asDouble: 56 + name: scrape_samples_post_metric_relabeling + - gauge: + dataPoints: + - asDouble: 3.2275796e+08 attributes: - key: cluster_name value: @@ -3123,10 +6149,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -3136,9 +6162,12 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port value: - stringValue: smartagent/coredns/receiver_creator{endpoint="10.244.0.3"}/k8s_observer/000a54ec-c41b-4298-a2e5-1cc477e755d2 + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -3147,18 +6176,23 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 - - key: transport + stringValue: v0.111.0 + - key: url.scheme value: - stringValue: internal + stringValue: http timeUnixNano: "1000000" - - asDouble: 56 + name: otelcol_process_runtime_total_sys_memory_bytes + - name: otelcol_processor_accepted_metric_points + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 12796 attributes: - key: cluster_name value: @@ -3186,10 +6220,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -3199,9 +6233,15 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: processor + value: + stringValue: memory_limiter + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port value: - stringValue: smartagent/coredns/receiver_creator{endpoint="10.244.0.5"}/k8s_observer/67bf5661-dff3-4ed3-881e-735a6a797aa6 + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -3210,21 +6250,23 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 - - key: transport + stringValue: v0.111.0 + - key: url.scheme value: - stringValue: internal + stringValue: http timeUnixNano: "1000000" isMonotonic: true - - gauge: + - name: otelcol_processor_outgoing_items + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 9.433088e+06 + - asDouble: 10186 attributes: - key: cluster_name value: @@ -3252,10 +6294,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -3265,6 +6307,18 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: otel_signal + value: + stringValue: logs + - key: processor + value: + stringValue: filter/logs + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -3273,20 +6327,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - name: otelcol_process_memory_rss - - name: otelcol_process_runtime_total_alloc_bytes - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 7.31633328e+08 + - asDouble: 10336 attributes: - key: cluster_name value: @@ -3314,10 +6366,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -3327,6 +6379,18 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: otel_signal + value: + stringValue: logs + - key: processor + value: + stringValue: k8sattributes + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -3335,20 +6399,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - isMonotonic: true - - name: otelcol_process_uptime - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 70.312969005 + - asDouble: 10336 attributes: - key: cluster_name value: @@ -3376,10 +6438,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -3389,6 +6451,18 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: otel_signal + value: + stringValue: logs + - key: processor + value: + stringValue: memory_limiter + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -3397,20 +6471,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - isMonotonic: true - - name: otelcol_receiver_refused_metric_points - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 0 + - asDouble: 10186 attributes: - key: cluster_name value: @@ -3438,10 +6510,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -3451,9 +6523,18 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: otel_signal + value: + stringValue: logs + - key: processor + value: + stringValue: resource + - key: server.address value: - stringValue: hostmetrics + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -3462,15 +6543,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 10186 attributes: - key: cluster_name value: @@ -3498,10 +6582,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -3511,9 +6595,18 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: otel_signal value: - stringValue: kubeletstats + stringValue: logs + - key: processor + value: + stringValue: resource/add_environment + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -3522,15 +6615,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 10186 attributes: - key: cluster_name value: @@ -3558,10 +6654,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -3571,9 +6667,18 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: otel_signal value: - stringValue: prometheus/agent + stringValue: logs + - key: processor + value: + stringValue: resource/logs + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -3582,18 +6687,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 - - key: transport + stringValue: v0.111.0 + - key: url.scheme value: stringValue: http timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 10186 attributes: - key: cluster_name value: @@ -3621,10 +6726,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -3634,9 +6739,18 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: otel_signal + value: + stringValue: logs + - key: processor + value: + stringValue: resourcedetection + - key: server.address value: - stringValue: smartagent/coredns/receiver_creator{endpoint="10.244.0.3"}/k8s_observer/000a54ec-c41b-4298-a2e5-1cc477e755d2 + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -3645,18 +6759,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 - - key: transport + stringValue: v0.111.0 + - key: url.scheme value: - stringValue: internal + stringValue: http timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 11793 attributes: - key: cluster_name value: @@ -3684,10 +6798,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -3697,9 +6811,18 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: otel_signal value: - stringValue: smartagent/coredns/receiver_creator{endpoint="10.244.0.5"}/k8s_observer/67bf5661-dff3-4ed3-881e-735a6a797aa6 + stringValue: metrics + - key: processor + value: + stringValue: attributes/istio + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -3708,23 +6831,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 - - key: transport + stringValue: v0.111.0 + - key: url.scheme value: - stringValue: internal + stringValue: http timeUnixNano: "1000000" - isMonotonic: true - - name: otelcol_exporter_sent_log_records - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 849 + - asDouble: 12796 attributes: - key: cluster_name value: @@ -3735,9 +6853,6 @@ resourceMetrics: - key: customfield2 value: stringValue: customvalue2 - - key: exporter - value: - stringValue: splunk_hec/platform_logs - key: host.name value: stringValue: kind-control-plane @@ -3755,10 +6870,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -3768,6 +6883,18 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: otel_signal + value: + stringValue: metrics + - key: processor + value: + stringValue: k8sattributes/metrics + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -3776,20 +6903,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - isMonotonic: true - - name: otelcol_processor_filter_logs_filtered - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 20 + - asDouble: 12796 attributes: - key: cluster_name value: @@ -3800,9 +6925,6 @@ resourceMetrics: - key: customfield2 value: stringValue: customvalue2 - - key: filter - value: - stringValue: filter/logs - key: host.name value: stringValue: kind-control-plane @@ -3820,10 +6942,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -3833,6 +6955,18 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: otel_signal + value: + stringValue: metrics + - key: processor + value: + stringValue: memory_limiter + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -3841,20 +6975,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - isMonotonic: true - - name: otelcol_scraper_errored_metric_points - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 0 + - asDouble: 12796 attributes: - key: cluster_name value: @@ -3882,10 +7014,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -3895,12 +7027,18 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: otel_signal value: - stringValue: hostmetrics - - key: scraper + stringValue: metrics + - key: processor + value: + stringValue: resource + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port value: - stringValue: cpu + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -3909,15 +7047,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1003 attributes: - key: cluster_name value: @@ -3945,10 +7086,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -3958,12 +7099,18 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: otel_signal value: - stringValue: hostmetrics - - key: scraper + stringValue: metrics + - key: processor + value: + stringValue: resource/add_agent_k8s + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port value: - stringValue: disk + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -3972,15 +7119,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 11793 attributes: - key: cluster_name value: @@ -4008,10 +7158,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -4021,12 +7171,18 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: otel_signal value: - stringValue: hostmetrics - - key: scraper + stringValue: metrics + - key: processor value: - stringValue: filesystem + stringValue: resource/add_environment + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -4035,15 +7191,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 12796 attributes: - key: cluster_name value: @@ -4071,10 +7230,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -4084,12 +7243,18 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: otel_signal value: - stringValue: hostmetrics - - key: scraper + stringValue: metrics + - key: processor + value: + stringValue: resourcedetection + - key: server.address value: - stringValue: load + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -4098,15 +7263,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 309 attributes: - key: cluster_name value: @@ -4134,10 +7302,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -4147,12 +7315,18 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: otel_signal value: - stringValue: hostmetrics - - key: scraper + stringValue: traces + - key: processor + value: + stringValue: k8sattributes + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port value: - stringValue: memory + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -4161,15 +7335,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 309 attributes: - key: cluster_name value: @@ -4197,10 +7374,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -4210,12 +7387,18 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: otel_signal value: - stringValue: hostmetrics - - key: scraper + stringValue: traces + - key: processor + value: + stringValue: memory_limiter + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port value: - stringValue: network + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -4224,15 +7407,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 309 attributes: - key: cluster_name value: @@ -4260,10 +7446,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -4273,12 +7459,18 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: otel_signal value: - stringValue: hostmetrics - - key: scraper + stringValue: traces + - key: processor + value: + stringValue: resource + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port value: - stringValue: paging + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -4287,15 +7479,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 309 attributes: - key: cluster_name value: @@ -4323,10 +7518,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -4336,12 +7531,18 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: otel_signal value: - stringValue: hostmetrics - - key: scraper + stringValue: traces + - key: processor value: - stringValue: processes + stringValue: resource/add_environment + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -4350,15 +7551,18 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 309 attributes: - key: cluster_name value: @@ -4386,10 +7590,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -4399,12 +7603,18 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: receiver + - key: otel_signal value: - stringValue: kubeletstats - - key: scraper + stringValue: traces + - key: processor value: - stringValue: kubeletstats + stringValue: resourcedetection + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -4413,18 +7623,23 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" isMonotonic: true - - gauge: + - name: otelcol_receiver_accepted_log_records + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 1 + - asDouble: 9966 attributes: - key: cluster_name value: @@ -4452,10 +7667,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -4465,19 +7680,35 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: receiver + value: + stringValue: filelog + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 - key: service.name value: stringValue: otel-agent + - key: service_instance_id + value: + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b + - key: service_name + value: + stringValue: otelcol + - key: service_version + value: + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - name: up - - name: otelcol_otelsvc_k8s_pod_added - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 92 + - asDouble: 370 attributes: - key: cluster_name value: @@ -4505,10 +7736,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -4518,6 +7749,15 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: receiver + value: + stringValue: otlp + - key: server.address + value: + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -4526,20 +7766,24 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: transport + value: + stringValue: http + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" isMonotonic: true - - name: otelcol_processor_accepted_log_records - sum: - aggregationTemporality: 2 + - gauge: dataPoints: - - asDouble: 990 + - asDouble: 1 attributes: - key: cluster_name value: @@ -4567,10 +7811,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -4580,9 +7824,12 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: processor + - key: server.address value: - stringValue: memory_limiter + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -4591,20 +7838,21 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - isMonotonic: true - - name: otelcol_processor_dropped_metric_points - sum: - aggregationTemporality: 2 + name: up + - gauge: dataPoints: - - asDouble: 0 + - asDouble: 133 attributes: - key: cluster_name value: @@ -4632,10 +7880,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-tg5xw + stringValue: sock-splunk-otel-collector-agent-f8bq4 - key: k8s.pod.uid value: - stringValue: d220b04c-095f-47c9-bc2b-b784f002548d + stringValue: 56675921-f6f7-4af4-84b8-a4f83ecbc48a - key: net.host.name value: stringValue: 172.18.0.2 @@ -4645,9 +7893,12 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: processor + - key: server.address value: - stringValue: memory_limiter + stringValue: 172.18.0.2 + - key: server.port + value: + stringValue: "8889" - key: service.instance.id value: stringValue: 172.18.0.2:8889 @@ -4656,13 +7907,16 @@ resourceMetrics: stringValue: otel-agent - key: service_instance_id value: - stringValue: 7aa2bc6b-3bad-4bb8-acc5-cb41e55d03e2 + stringValue: 015468d8-5ec1-4072-bda5-3006450d7d5b - key: service_name value: stringValue: otelcol - key: service_version value: - stringValue: v0.92.0 + stringValue: v0.111.0 + - key: url.scheme + value: + stringValue: http timeUnixNano: "1000000" - isMonotonic: true + name: scrape_samples_scraped scope: {} diff --git a/functional_tests/testdata/expected_kind_values/expected_kubeletstats_metrics.yaml b/functional_tests/testdata/expected_kind_values/expected_kubeletstats_metrics.yaml index 2692fbcba..dcd02be16 100644 --- a/functional_tests/testdata/expected_kind_values/expected_kubeletstats_metrics.yaml +++ b/functional_tests/testdata/expected_kind_values/expected_kubeletstats_metrics.yaml @@ -40,10 +40,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-5c9d8879fd-zqcdv + stringValue: cert-manager-67c98b89c8-dtt9m - key: k8s.pod.uid value: - stringValue: dd68fc3a-2f3f-440d-a7e7-618aebbf2090 + stringValue: 2a18cace-4258-47cc-8724-68105e50a6b0 - key: os.type value: stringValue: linux @@ -82,10 +82,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-cainjector-6cc9b5f678-zm6qg + stringValue: cert-manager-cainjector-5c5695d979-hz2x4 - key: k8s.pod.uid value: - stringValue: c17bb001-2b34-4173-993b-47cc1d285f3d + stringValue: 4d0463e6-e229-4f66-94f8-07e553dbb1bb - key: os.type value: stringValue: linux @@ -124,10 +124,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-webhook-7bb7b75848-kbgq5 + stringValue: cert-manager-webhook-7f9f8648b9-ghhhm - key: k8s.pod.uid value: - stringValue: a21b8615-7e13-46d7-b4e8-8ae1e4a5c79f + stringValue: 1ebf5a38-5152-430a-80fa-3854278e6a26 - key: os.type value: stringValue: linux @@ -166,10 +166,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: java-test-566495f78-6nhnc + stringValue: dotnet-test-5479c475fc-6bbpg - key: k8s.pod.uid value: - stringValue: fbc84ba5-3b1f-4754-b3f3-76e86eace74e + stringValue: b0a76cb7-8faa-47b3-a0e1-5aeee5544e98 - key: os.type value: stringValue: linux @@ -208,10 +208,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: nodejs-test-d454cf769-n4whd + stringValue: java-test-5c4d6479b8-sdpd7 - key: k8s.pod.uid value: - stringValue: 1f680502-7878-451f-8880-2ca25419ce4d + stringValue: a45051ae-1c81-47c0-8de0-e71dfe319bb4 - key: os.type value: stringValue: linux @@ -250,10 +250,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-operator-6cf5597fb6-nkvgb + stringValue: nodejs-test-56b74df9ff-blxvb - key: k8s.pod.uid value: - stringValue: 24d849c7-da2c-4598-a307-9deba65ed5a8 + stringValue: eade6268-3958-4711-8153-38c42d26d5e0 - key: os.type value: stringValue: linux @@ -292,10 +292,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-znqzs + stringValue: prometheus-annotation-test-cfc77c7b9-hlvbs - key: k8s.pod.uid value: - stringValue: 3f2777cd-e7ce-485d-a92c-43c0a2aadee5 + stringValue: 7c5e6a26-0c5b-4cc8-ade8-45efc11329d9 - key: os.type value: stringValue: linux @@ -334,10 +334,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-587576d5842876t + stringValue: sock-operator-786c6c9777-2flf7 - key: k8s.pod.uid value: - stringValue: 73bb8d19-b190-42e9-a77d-c84ef587bd02 + stringValue: d4fc43e2-a114-492f-b477-e0936d63dec4 - key: os.type value: stringValue: linux @@ -370,16 +370,16 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-5dd5756b68-ttp4s + stringValue: sock-splunk-otel-collector-agent-lhkmv - key: k8s.pod.uid value: - stringValue: 8a841abc-5346-4344-81d4-8fc298823892 + stringValue: 4d535893-9ff1-41a0-95cd-704934c3e539 - key: os.type value: stringValue: linux @@ -412,16 +412,16 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-5dd5756b68-x8jcx + stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-69d74b99994lz9z - key: k8s.pod.uid value: - stringValue: 662cb1aa-7d77-4a28-813a-3abe808e9a04 + stringValue: 24a76c39-d3a1-48dd-95cd-7f0196a84087 - key: os.type value: stringValue: linux @@ -454,16 +454,16 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: etcd-kind-control-plane + stringValue: sock-splunk-otel-collector-ta-7f6c9fdf4-kv6zv - key: k8s.pod.uid value: - stringValue: 8640ede3763d523342ced9ba7a5fd080 + stringValue: 13d32d4e-5a6c-4f79-9f22-4a538e421a3c - key: os.type value: stringValue: linux @@ -502,10 +502,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kindnet-wrk68 + stringValue: coredns-5dd5756b68-5rzdc - key: k8s.pod.uid value: - stringValue: e72734dd-e62d-4528-b718-d79e757c0900 + stringValue: 1e801067-cdf8-4d39-adb7-f04927fcbe4d - key: os.type value: stringValue: linux @@ -544,10 +544,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-apiserver-kind-control-plane + stringValue: coredns-5dd5756b68-n2r77 - key: k8s.pod.uid value: - stringValue: 51acb728ea8dc3a01b43334942491036 + stringValue: e9eab946-2f83-4950-9331-8d2140b9bb27 - key: os.type value: stringValue: linux @@ -586,10 +586,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-controller-manager-kind-control-plane + stringValue: etcd-kind-control-plane - key: k8s.pod.uid value: - stringValue: f98b4cfedb968e3c798bae6ff1faa58d + stringValue: a652c9e3a037b7be2906ed0ac7fea632 - key: os.type value: stringValue: linux @@ -628,10 +628,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-proxy-zmds8 + stringValue: kindnet-v8s6x - key: k8s.pod.uid value: - stringValue: 3252bcd4-1aa0-4a8d-ad6e-fa3cee960076 + stringValue: b2d71d05-0357-4dc5-9630-50a2fdf3b9c8 - key: os.type value: stringValue: linux @@ -670,10 +670,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-scheduler-kind-control-plane + stringValue: kube-apiserver-kind-control-plane - key: k8s.pod.uid value: - stringValue: 90194206b3ac749a8da43c61fd4761ca + stringValue: 021ea1f4839617eef00a93c39d62616b - key: os.type value: stringValue: linux @@ -706,16 +706,16 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: local-path-storage + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: local-path-provisioner-6f8956fb48-4bmw5 + stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 6099916d-c417-4145-b1e8-a68a083a4e36 + stringValue: 78e1474ee2ca64de6cd3283ddf989107 - key: os.type value: stringValue: linux @@ -736,7 +736,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -748,16 +748,16 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: cert-manager + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-5c9d8879fd-zqcdv + stringValue: kube-proxy-zh9sm - key: k8s.pod.uid value: - stringValue: dd68fc3a-2f3f-440d-a7e7-618aebbf2090 + stringValue: e049ac1e-d79b-4446-a27a-33db00ab5daf - key: os.type value: stringValue: linux @@ -778,7 +778,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -790,16 +790,16 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: cert-manager + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-cainjector-6cc9b5f678-zm6qg + stringValue: kube-scheduler-kind-control-plane - key: k8s.pod.uid value: - stringValue: c17bb001-2b34-4173-993b-47cc1d285f3d + stringValue: df1f09f3a1f003b8825519a3341f69fe - key: os.type value: stringValue: linux @@ -820,7 +820,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -832,16 +832,16 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: cert-manager + stringValue: local-path-storage - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-webhook-7bb7b75848-kbgq5 + stringValue: local-path-provisioner-6f8956fb48-tjnnh - key: k8s.pod.uid value: - stringValue: a21b8615-7e13-46d7-b4e8-8ae1e4a5c79f + stringValue: 1a0fa2b5-48d1-4246-84f6-614a88fc0723 - key: os.type value: stringValue: linux @@ -862,7 +862,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -874,16 +874,16 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: default + stringValue: ns-w-exclude - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: java-test-566495f78-6nhnc + stringValue: pod-w-index-w-ns-exclude-44q2b - key: k8s.pod.uid value: - stringValue: fbc84ba5-3b1f-4754-b3f3-76e86eace74e + stringValue: 7491b268-a198-4dbc-8ed3-3a27a279a732 - key: os.type value: stringValue: linux @@ -904,7 +904,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -916,16 +916,16 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: default + stringValue: ns-w-index - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: nodejs-test-d454cf769-n4whd + stringValue: pod-w-exclude-wo-ns-exclude-q7cb2 - key: k8s.pod.uid value: - stringValue: 1f680502-7878-451f-8880-2ca25419ce4d + stringValue: a5fddf35-a751-4dea-b45f-c7558fac023d - key: os.type value: stringValue: linux @@ -946,7 +946,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -958,16 +958,16 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: default + stringValue: ns-w-index - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-operator-6cf5597fb6-nkvgb + stringValue: pod-w-index-w-ns-index-j4lrn - key: k8s.pod.uid value: - stringValue: 24d849c7-da2c-4598-a307-9deba65ed5a8 + stringValue: 2c4ad2c5-a791-466c-ae50-f8163efc385c - key: os.type value: stringValue: linux @@ -988,7 +988,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -1000,16 +1000,16 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: default + stringValue: ns-w-index - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-znqzs + stringValue: pod-wo-index-w-ns-index-7w4s8 - key: k8s.pod.uid value: - stringValue: 3f2777cd-e7ce-485d-a92c-43c0a2aadee5 + stringValue: 173f0280-eafc-4c11-b149-b30baa2e1958 - key: os.type value: stringValue: linux @@ -1030,7 +1030,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -1042,16 +1042,16 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: default + stringValue: ns-wo-index - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-587576d5842876t + stringValue: pod-w-index-wo-ns-index-c5xdz - key: k8s.pod.uid value: - stringValue: 73bb8d19-b190-42e9-a77d-c84ef587bd02 + stringValue: 7e87ed20-0fb1-47ad-850e-fd05ff32bf23 - key: os.type value: stringValue: linux @@ -1072,7 +1072,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -1084,16 +1084,16 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: ns-wo-index - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-5dd5756b68-ttp4s + stringValue: pod-wo-index-wo-ns-index-bp8hd - key: k8s.pod.uid value: - stringValue: 8a841abc-5346-4344-81d4-8fc298823892 + stringValue: 2368457e-e367-44b8-90b0-78c751adb967 - key: os.type value: stringValue: linux @@ -1126,16 +1126,16 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: cert-manager - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-5dd5756b68-x8jcx + stringValue: cert-manager-67c98b89c8-dtt9m - key: k8s.pod.uid value: - stringValue: 662cb1aa-7d77-4a28-813a-3abe808e9a04 + stringValue: 2a18cace-4258-47cc-8724-68105e50a6b0 - key: os.type value: stringValue: linux @@ -1168,16 +1168,16 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: cert-manager - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: etcd-kind-control-plane + stringValue: cert-manager-cainjector-5c5695d979-hz2x4 - key: k8s.pod.uid value: - stringValue: 8640ede3763d523342ced9ba7a5fd080 + stringValue: 4d0463e6-e229-4f66-94f8-07e553dbb1bb - key: os.type value: stringValue: linux @@ -1210,16 +1210,16 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: cert-manager - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kindnet-wrk68 + stringValue: cert-manager-webhook-7f9f8648b9-ghhhm - key: k8s.pod.uid value: - stringValue: e72734dd-e62d-4528-b718-d79e757c0900 + stringValue: 1ebf5a38-5152-430a-80fa-3854278e6a26 - key: os.type value: stringValue: linux @@ -1252,16 +1252,16 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-apiserver-kind-control-plane + stringValue: dotnet-test-5479c475fc-6bbpg - key: k8s.pod.uid value: - stringValue: 51acb728ea8dc3a01b43334942491036 + stringValue: b0a76cb7-8faa-47b3-a0e1-5aeee5544e98 - key: os.type value: stringValue: linux @@ -1294,16 +1294,16 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-controller-manager-kind-control-plane + stringValue: java-test-5c4d6479b8-sdpd7 - key: k8s.pod.uid value: - stringValue: f98b4cfedb968e3c798bae6ff1faa58d + stringValue: a45051ae-1c81-47c0-8de0-e71dfe319bb4 - key: os.type value: stringValue: linux @@ -1336,16 +1336,16 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-proxy-zmds8 + stringValue: nodejs-test-56b74df9ff-blxvb - key: k8s.pod.uid value: - stringValue: 3252bcd4-1aa0-4a8d-ad6e-fa3cee960076 + stringValue: eade6268-3958-4711-8153-38c42d26d5e0 - key: os.type value: stringValue: linux @@ -1378,16 +1378,16 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-scheduler-kind-control-plane + stringValue: prometheus-annotation-test-cfc77c7b9-hlvbs - key: k8s.pod.uid value: - stringValue: 90194206b3ac749a8da43c61fd4761ca + stringValue: 7c5e6a26-0c5b-4cc8-ade8-45efc11329d9 - key: os.type value: stringValue: linux @@ -1420,26 +1420,21 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: local-path-storage + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: local-path-provisioner-6f8956fb48-4bmw5 + stringValue: sock-operator-786c6c9777-2flf7 - key: k8s.pod.uid value: - stringValue: 6099916d-c417-4145-b1e8-a68a083a4e36 + stringValue: d4fc43e2-a114-492f-b477-e0936d63dec4 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - isMonotonic: true - - name: k8s.pod.network.io - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "165169" + - asInt: "0" attributes: - key: cluster_name value: @@ -1455,7 +1450,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: receive + stringValue: transmit - key: host.name value: stringValue: kind-control-plane @@ -1467,21 +1462,21 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: cert-manager + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-5c9d8879fd-zqcdv + stringValue: sock-splunk-otel-collector-agent-lhkmv - key: k8s.pod.uid value: - stringValue: dd68fc3a-2f3f-440d-a7e7-618aebbf2090 + stringValue: 4d535893-9ff1-41a0-95cd-704934c3e539 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "1961606" + - asInt: "0" attributes: - key: cluster_name value: @@ -1497,7 +1492,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: receive + stringValue: transmit - key: host.name value: stringValue: kind-control-plane @@ -1509,21 +1504,21 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: cert-manager + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-cainjector-6cc9b5f678-zm6qg + stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-69d74b99994lz9z - key: k8s.pod.uid value: - stringValue: c17bb001-2b34-4173-993b-47cc1d285f3d + stringValue: 24a76c39-d3a1-48dd-95cd-7f0196a84087 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "314682" + - asInt: "0" attributes: - key: cluster_name value: @@ -1539,7 +1534,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: receive + stringValue: transmit - key: host.name value: stringValue: kind-control-plane @@ -1551,21 +1546,21 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: cert-manager + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-webhook-7bb7b75848-kbgq5 + stringValue: sock-splunk-otel-collector-ta-7f6c9fdf4-kv6zv - key: k8s.pod.uid value: - stringValue: a21b8615-7e13-46d7-b4e8-8ae1e4a5c79f + stringValue: 13d32d4e-5a6c-4f79-9f22-4a538e421a3c - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "1700" + - asInt: "0" attributes: - key: cluster_name value: @@ -1581,7 +1576,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: receive + stringValue: transmit - key: host.name value: stringValue: kind-control-plane @@ -1593,21 +1588,21 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: default + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: java-test-566495f78-6nhnc + stringValue: coredns-5dd5756b68-5rzdc - key: k8s.pod.uid value: - stringValue: fbc84ba5-3b1f-4754-b3f3-76e86eace74e + stringValue: 1e801067-cdf8-4d39-adb7-f04927fcbe4d - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "8109" + - asInt: "0" attributes: - key: cluster_name value: @@ -1623,7 +1618,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: receive + stringValue: transmit - key: host.name value: stringValue: kind-control-plane @@ -1635,21 +1630,21 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: default + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: nodejs-test-d454cf769-n4whd + stringValue: coredns-5dd5756b68-n2r77 - key: k8s.pod.uid value: - stringValue: 1f680502-7878-451f-8880-2ca25419ce4d + stringValue: e9eab946-2f83-4950-9331-8d2140b9bb27 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "418247" + - asInt: "0" attributes: - key: cluster_name value: @@ -1665,7 +1660,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: receive + stringValue: transmit - key: host.name value: stringValue: kind-control-plane @@ -1677,21 +1672,21 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: default + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-operator-6cf5597fb6-nkvgb + stringValue: etcd-kind-control-plane - key: k8s.pod.uid value: - stringValue: 24d849c7-da2c-4598-a307-9deba65ed5a8 + stringValue: a652c9e3a037b7be2906ed0ac7fea632 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "448608530" + - asInt: "0" attributes: - key: cluster_name value: @@ -1707,7 +1702,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: receive + stringValue: transmit - key: host.name value: stringValue: kind-control-plane @@ -1719,21 +1714,21 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: default + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-znqzs + stringValue: kindnet-v8s6x - key: k8s.pod.uid value: - stringValue: 3f2777cd-e7ce-485d-a92c-43c0a2aadee5 + stringValue: b2d71d05-0357-4dc5-9630-50a2fdf3b9c8 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "2628682" + - asInt: "0" attributes: - key: cluster_name value: @@ -1749,7 +1744,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: receive + stringValue: transmit - key: host.name value: stringValue: kind-control-plane @@ -1761,21 +1756,21 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: default + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-587576d5842876t + stringValue: kube-apiserver-kind-control-plane - key: k8s.pod.uid value: - stringValue: 73bb8d19-b190-42e9-a77d-c84ef587bd02 + stringValue: 021ea1f4839617eef00a93c39d62616b - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "55831" + - asInt: "0" attributes: - key: cluster_name value: @@ -1791,7 +1786,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: receive + stringValue: transmit - key: host.name value: stringValue: kind-control-plane @@ -1809,15 +1804,15 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-5dd5756b68-ttp4s + stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 8a841abc-5346-4344-81d4-8fc298823892 + stringValue: 78e1474ee2ca64de6cd3283ddf989107 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "56169" + - asInt: "0" attributes: - key: cluster_name value: @@ -1833,7 +1828,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: receive + stringValue: transmit - key: host.name value: stringValue: kind-control-plane @@ -1851,15 +1846,15 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-5dd5756b68-x8jcx + stringValue: kube-proxy-zh9sm - key: k8s.pod.uid value: - stringValue: 662cb1aa-7d77-4a28-813a-3abe808e9a04 + stringValue: e049ac1e-d79b-4446-a27a-33db00ab5daf - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "448622310" + - asInt: "0" attributes: - key: cluster_name value: @@ -1875,7 +1870,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: receive + stringValue: transmit - key: host.name value: stringValue: kind-control-plane @@ -1893,15 +1888,15 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: etcd-kind-control-plane + stringValue: kube-scheduler-kind-control-plane - key: k8s.pod.uid value: - stringValue: 8640ede3763d523342ced9ba7a5fd080 + stringValue: df1f09f3a1f003b8825519a3341f69fe - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "448613705" + - asInt: "0" attributes: - key: cluster_name value: @@ -1917,7 +1912,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: receive + stringValue: transmit - key: host.name value: stringValue: kind-control-plane @@ -1929,21 +1924,21 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: local-path-storage - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kindnet-wrk68 + stringValue: local-path-provisioner-6f8956fb48-tjnnh - key: k8s.pod.uid value: - stringValue: e72734dd-e62d-4528-b718-d79e757c0900 + stringValue: 1a0fa2b5-48d1-4246-84f6-614a88fc0723 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "448619227" + - asInt: "0" attributes: - key: cluster_name value: @@ -1959,7 +1954,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: receive + stringValue: transmit - key: host.name value: stringValue: kind-control-plane @@ -1971,21 +1966,21 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: ns-w-exclude - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-apiserver-kind-control-plane + stringValue: pod-w-index-w-ns-exclude-44q2b - key: k8s.pod.uid value: - stringValue: 51acb728ea8dc3a01b43334942491036 + stringValue: 7491b268-a198-4dbc-8ed3-3a27a279a732 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "448612472" + - asInt: "0" attributes: - key: cluster_name value: @@ -2001,7 +1996,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: receive + stringValue: transmit - key: host.name value: stringValue: kind-control-plane @@ -2013,21 +2008,21 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: ns-w-index - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-controller-manager-kind-control-plane + stringValue: pod-w-exclude-wo-ns-exclude-q7cb2 - key: k8s.pod.uid value: - stringValue: f98b4cfedb968e3c798bae6ff1faa58d + stringValue: a5fddf35-a751-4dea-b45f-c7558fac023d - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "448600146" + - asInt: "0" attributes: - key: cluster_name value: @@ -2043,7 +2038,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: receive + stringValue: transmit - key: host.name value: stringValue: kind-control-plane @@ -2055,21 +2050,21 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: ns-w-index - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-proxy-zmds8 + stringValue: pod-w-index-w-ns-index-j4lrn - key: k8s.pod.uid value: - stringValue: 3252bcd4-1aa0-4a8d-ad6e-fa3cee960076 + stringValue: 2c4ad2c5-a791-466c-ae50-f8163efc385c - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "448593837" + - asInt: "0" attributes: - key: cluster_name value: @@ -2085,7 +2080,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: receive + stringValue: transmit - key: host.name value: stringValue: kind-control-plane @@ -2097,21 +2092,21 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: ns-w-index - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-scheduler-kind-control-plane + stringValue: pod-wo-index-w-ns-index-7w4s8 - key: k8s.pod.uid value: - stringValue: 90194206b3ac749a8da43c61fd4761ca + stringValue: 173f0280-eafc-4c11-b149-b30baa2e1958 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "8485" + - asInt: "0" attributes: - key: cluster_name value: @@ -2127,7 +2122,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: receive + stringValue: transmit - key: host.name value: stringValue: kind-control-plane @@ -2139,21 +2134,21 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: local-path-storage + stringValue: ns-wo-index - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: local-path-provisioner-6f8956fb48-4bmw5 + stringValue: pod-w-index-wo-ns-index-c5xdz - key: k8s.pod.uid value: - stringValue: 6099916d-c417-4145-b1e8-a68a083a4e36 + stringValue: 7e87ed20-0fb1-47ad-850e-fd05ff32bf23 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "104298" + - asInt: "0" attributes: - key: cluster_name value: @@ -2181,21 +2176,26 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: cert-manager + stringValue: ns-wo-index - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-5c9d8879fd-zqcdv + stringValue: pod-wo-index-wo-ns-index-bp8hd - key: k8s.pod.uid value: - stringValue: dd68fc3a-2f3f-440d-a7e7-618aebbf2090 + stringValue: 2368457e-e367-44b8-90b0-78c751adb967 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "130013" + isMonotonic: true + - name: k8s.pod.network.io + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "8191062" attributes: - key: cluster_name value: @@ -2211,7 +2211,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -2229,15 +2229,15 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-cainjector-6cc9b5f678-zm6qg + stringValue: cert-manager-67c98b89c8-dtt9m - key: k8s.pod.uid value: - stringValue: c17bb001-2b34-4173-993b-47cc1d285f3d + stringValue: 2a18cace-4258-47cc-8724-68105e50a6b0 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "392751" + - asInt: "13307862" attributes: - key: cluster_name value: @@ -2253,7 +2253,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -2271,15 +2271,15 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-webhook-7bb7b75848-kbgq5 + stringValue: cert-manager-cainjector-5c5695d979-hz2x4 - key: k8s.pod.uid value: - stringValue: a21b8615-7e13-46d7-b4e8-8ae1e4a5c79f + stringValue: 4d0463e6-e229-4f66-94f8-07e553dbb1bb - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "20081" + - asInt: "4032796" attributes: - key: cluster_name value: @@ -2295,7 +2295,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -2307,21 +2307,21 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: default + stringValue: cert-manager - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: java-test-566495f78-6nhnc + stringValue: cert-manager-webhook-7f9f8648b9-ghhhm - key: k8s.pod.uid value: - stringValue: fbc84ba5-3b1f-4754-b3f3-76e86eace74e + stringValue: 1ebf5a38-5152-430a-80fa-3854278e6a26 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "35283" + - asInt: "3764" attributes: - key: cluster_name value: @@ -2337,7 +2337,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -2355,15 +2355,15 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: nodejs-test-d454cf769-n4whd + stringValue: dotnet-test-5479c475fc-6bbpg - key: k8s.pod.uid value: - stringValue: 1f680502-7878-451f-8880-2ca25419ce4d + stringValue: b0a76cb7-8faa-47b3-a0e1-5aeee5544e98 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "48461" + - asInt: "5682" attributes: - key: cluster_name value: @@ -2379,7 +2379,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -2397,15 +2397,15 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-operator-6cf5597fb6-nkvgb + stringValue: java-test-5c4d6479b8-sdpd7 - key: k8s.pod.uid value: - stringValue: 24d849c7-da2c-4598-a307-9deba65ed5a8 + stringValue: a45051ae-1c81-47c0-8de0-e71dfe319bb4 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "11089099" + - asInt: "5959" attributes: - key: cluster_name value: @@ -2421,7 +2421,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -2439,15 +2439,15 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-znqzs + stringValue: nodejs-test-56b74df9ff-blxvb - key: k8s.pod.uid value: - stringValue: 3f2777cd-e7ce-485d-a92c-43c0a2aadee5 + stringValue: eade6268-3958-4711-8153-38c42d26d5e0 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "1192006" + - asInt: "6267" attributes: - key: cluster_name value: @@ -2463,7 +2463,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -2481,15 +2481,15 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-587576d5842876t + stringValue: prometheus-annotation-test-cfc77c7b9-hlvbs - key: k8s.pod.uid value: - stringValue: 73bb8d19-b190-42e9-a77d-c84ef587bd02 + stringValue: 7c5e6a26-0c5b-4cc8-ade8-45efc11329d9 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "45510" + - asInt: "179820" attributes: - key: cluster_name value: @@ -2505,7 +2505,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -2517,21 +2517,21 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-5dd5756b68-ttp4s + stringValue: sock-operator-786c6c9777-2flf7 - key: k8s.pod.uid value: - stringValue: 8a841abc-5346-4344-81d4-8fc298823892 + stringValue: d4fc43e2-a114-492f-b477-e0936d63dec4 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "43608" + - asInt: "514201815" attributes: - key: cluster_name value: @@ -2547,7 +2547,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -2559,21 +2559,21 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-5dd5756b68-x8jcx + stringValue: sock-splunk-otel-collector-agent-lhkmv - key: k8s.pod.uid value: - stringValue: 662cb1aa-7d77-4a28-813a-3abe808e9a04 + stringValue: 4d535893-9ff1-41a0-95cd-704934c3e539 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "11764064" + - asInt: "2751815" attributes: - key: cluster_name value: @@ -2589,7 +2589,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -2601,21 +2601,21 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: etcd-kind-control-plane + stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-69d74b99994lz9z - key: k8s.pod.uid value: - stringValue: 8640ede3763d523342ced9ba7a5fd080 + stringValue: 24a76c39-d3a1-48dd-95cd-7f0196a84087 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "11525429" + - asInt: "379375" attributes: - key: cluster_name value: @@ -2631,7 +2631,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -2643,21 +2643,21 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kindnet-wrk68 + stringValue: sock-splunk-otel-collector-ta-7f6c9fdf4-kv6zv - key: k8s.pod.uid value: - stringValue: e72734dd-e62d-4528-b718-d79e757c0900 + stringValue: 13d32d4e-5a6c-4f79-9f22-4a538e421a3c - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "11658390" + - asInt: "31313862" attributes: - key: cluster_name value: @@ -2673,7 +2673,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -2691,15 +2691,15 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-apiserver-kind-control-plane + stringValue: coredns-5dd5756b68-5rzdc - key: k8s.pod.uid value: - stringValue: 51acb728ea8dc3a01b43334942491036 + stringValue: 1e801067-cdf8-4d39-adb7-f04927fcbe4d - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "11520408" + - asInt: "31260066" attributes: - key: cluster_name value: @@ -2715,7 +2715,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -2733,15 +2733,15 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-controller-manager-kind-control-plane + stringValue: coredns-5dd5756b68-n2r77 - key: k8s.pod.uid value: - stringValue: f98b4cfedb968e3c798bae6ff1faa58d + stringValue: e9eab946-2f83-4950-9331-8d2140b9bb27 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "10856516" + - asInt: "514180516" attributes: - key: cluster_name value: @@ -2757,7 +2757,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -2775,15 +2775,15 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-proxy-zmds8 + stringValue: etcd-kind-control-plane - key: k8s.pod.uid value: - stringValue: 3252bcd4-1aa0-4a8d-ad6e-fa3cee960076 + stringValue: a652c9e3a037b7be2906ed0ac7fea632 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "10786169" + - asInt: "514194479" attributes: - key: cluster_name value: @@ -2799,7 +2799,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -2817,15 +2817,15 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-scheduler-kind-control-plane + stringValue: kindnet-v8s6x - key: k8s.pod.uid value: - stringValue: 90194206b3ac749a8da43c61fd4761ca + stringValue: b2d71d05-0357-4dc5-9630-50a2fdf3b9c8 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "4939" + - asInt: "514178644" attributes: - key: cluster_name value: @@ -2841,7 +2841,7 @@ resourceMetrics: stringValue: dev - key: direction value: - stringValue: transmit + stringValue: receive - key: host.name value: stringValue: kind-control-plane @@ -2853,31 +2853,25 @@ resourceMetrics: stringValue: dev-operator - key: k8s.namespace.name value: - stringValue: local-path-storage + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: local-path-provisioner-6f8956fb48-4bmw5 + stringValue: kube-apiserver-kind-control-plane - key: k8s.pod.uid value: - stringValue: 6099916d-c417-4145-b1e8-a68a083a4e36 + stringValue: 021ea1f4839617eef00a93c39d62616b - key: os.type value: stringValue: linux timeUnixNano: "1000000" - isMonotonic: true - - gauge: - dataPoints: - - asInt: "10294927360" + - asInt: "514181628" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - - key: container.id - value: - stringValue: 0200fab216c7ccb291be3d1799f862bd8488cb786b4b1f50abc4e778dc130a3d - key: customfield1 value: stringValue: customvalue1 @@ -2887,15 +2881,18 @@ resourceMetrics: - key: deployment.environment value: stringValue: dev + - key: direction + value: + stringValue: receive - key: host.name value: stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 - key: k8s.cluster.name value: stringValue: dev-operator - - key: k8s.container.name - value: - stringValue: kube-apiserver - key: k8s.namespace.name value: stringValue: kube-system @@ -2904,22 +2901,19 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-apiserver-kind-control-plane + stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 51acb728ea8dc3a01b43334942491036 + stringValue: 78e1474ee2ca64de6cd3283ddf989107 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "10294927360" + - asInt: "514202691" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - - key: container.id - value: - stringValue: 021327b7039db9dd913ba2beb4625bf3f2d522805decbeb30d3844953d689895 - key: customfield1 value: stringValue: customvalue1 @@ -2929,39 +2923,39 @@ resourceMetrics: - key: deployment.environment value: stringValue: dev + - key: direction + value: + stringValue: receive - key: host.name value: stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 - key: k8s.cluster.name value: stringValue: dev-operator - - key: k8s.container.name - value: - stringValue: otel-collector - key: k8s.namespace.name value: - stringValue: default + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-znqzs + stringValue: kube-proxy-zh9sm - key: k8s.pod.uid value: - stringValue: 3f2777cd-e7ce-485d-a92c-43c0a2aadee5 + stringValue: e049ac1e-d79b-4446-a27a-33db00ab5daf - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "10294927360" + - asInt: "514210647" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - - key: container.id - value: - stringValue: 0a1903dcb4d0fdd4279b401318013e772a755df50a54714195cf5b81132ae688 - key: customfield1 value: stringValue: customvalue1 @@ -2971,39 +2965,39 @@ resourceMetrics: - key: deployment.environment value: stringValue: dev + - key: direction + value: + stringValue: receive - key: host.name value: stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 - key: k8s.cluster.name value: stringValue: dev-operator - - key: k8s.container.name - value: - stringValue: manager - key: k8s.namespace.name value: - stringValue: default + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-operator-6cf5597fb6-nkvgb + stringValue: kube-scheduler-kind-control-plane - key: k8s.pod.uid value: - stringValue: 24d849c7-da2c-4598-a307-9deba65ed5a8 + stringValue: df1f09f3a1f003b8825519a3341f69fe - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "10294927360" + - asInt: "495994" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - - key: container.id - value: - stringValue: 0b7de24593b9e9b9c4dcb6424f1e829e3232aac96921586aa5b4876e34fffead - key: customfield1 value: stringValue: customvalue1 @@ -3013,7 +3007,3250 @@ resourceMetrics: - key: deployment.environment value: stringValue: dev - - key: host.name + - key: direction + value: + stringValue: receive + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: local-path-storage + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: local-path-provisioner-6f8956fb48-tjnnh + - key: k8s.pod.uid + value: + stringValue: 1a0fa2b5-48d1-4246-84f6-614a88fc0723 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "446" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: receive + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: ns-w-exclude + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-index-w-ns-exclude-44q2b + - key: k8s.pod.uid + value: + stringValue: 7491b268-a198-4dbc-8ed3-3a27a279a732 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "446" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: receive + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: ns-w-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-exclude-wo-ns-exclude-q7cb2 + - key: k8s.pod.uid + value: + stringValue: a5fddf35-a751-4dea-b45f-c7558fac023d + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "446" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: receive + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: ns-w-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-index-w-ns-index-j4lrn + - key: k8s.pod.uid + value: + stringValue: 2c4ad2c5-a791-466c-ae50-f8163efc385c + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "446" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: receive + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: ns-w-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-wo-index-w-ns-index-7w4s8 + - key: k8s.pod.uid + value: + stringValue: 173f0280-eafc-4c11-b149-b30baa2e1958 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "446" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: receive + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: ns-wo-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-index-wo-ns-index-c5xdz + - key: k8s.pod.uid + value: + stringValue: 7e87ed20-0fb1-47ad-850e-fd05ff32bf23 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "446" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: receive + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: ns-wo-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-wo-index-wo-ns-index-bp8hd + - key: k8s.pod.uid + value: + stringValue: 2368457e-e367-44b8-90b0-78c751adb967 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "13723565" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: cert-manager-67c98b89c8-dtt9m + - key: k8s.pod.uid + value: + stringValue: 2a18cace-4258-47cc-8724-68105e50a6b0 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "3148598" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: cert-manager-cainjector-5c5695d979-hz2x4 + - key: k8s.pod.uid + value: + stringValue: 4d0463e6-e229-4f66-94f8-07e553dbb1bb + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "3846546" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: cert-manager-webhook-7f9f8648b9-ghhhm + - key: k8s.pod.uid + value: + stringValue: 1ebf5a38-5152-430a-80fa-3854278e6a26 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "73240" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: dotnet-test-5479c475fc-6bbpg + - key: k8s.pod.uid + value: + stringValue: b0a76cb7-8faa-47b3-a0e1-5aeee5544e98 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "104092" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: java-test-5c4d6479b8-sdpd7 + - key: k8s.pod.uid + value: + stringValue: a45051ae-1c81-47c0-8de0-e71dfe319bb4 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "68855" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: nodejs-test-56b74df9ff-blxvb + - key: k8s.pod.uid + value: + stringValue: eade6268-3958-4711-8153-38c42d26d5e0 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "69601" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: prometheus-annotation-test-cfc77c7b9-hlvbs + - key: k8s.pod.uid + value: + stringValue: 7c5e6a26-0c5b-4cc8-ade8-45efc11329d9 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "38187" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-operator-786c6c9777-2flf7 + - key: k8s.pod.uid + value: + stringValue: d4fc43e2-a114-492f-b477-e0936d63dec4 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "568479800" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-lhkmv + - key: k8s.pod.uid + value: + stringValue: 4d535893-9ff1-41a0-95cd-704934c3e539 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "2951277" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-69d74b99994lz9z + - key: k8s.pod.uid + value: + stringValue: 24a76c39-d3a1-48dd-95cd-7f0196a84087 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "44546" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-ta-7f6c9fdf4-kv6zv + - key: k8s.pod.uid + value: + stringValue: 13d32d4e-5a6c-4f79-9f22-4a538e421a3c + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "60154788" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: coredns-5dd5756b68-5rzdc + - key: k8s.pod.uid + value: + stringValue: 1e801067-cdf8-4d39-adb7-f04927fcbe4d + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "60056947" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: coredns-5dd5756b68-n2r77 + - key: k8s.pod.uid + value: + stringValue: e9eab946-2f83-4950-9331-8d2140b9bb27 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "566545673" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: etcd-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: a652c9e3a037b7be2906ed0ac7fea632 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "568014803" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kindnet-v8s6x + - key: k8s.pod.uid + value: + stringValue: b2d71d05-0357-4dc5-9630-50a2fdf3b9c8 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "566424250" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-apiserver-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: 021ea1f4839617eef00a93c39d62616b + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "566663960" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-controller-manager-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: 78e1474ee2ca64de6cd3283ddf989107 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "568486245" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-proxy-zh9sm + - key: k8s.pod.uid + value: + stringValue: e049ac1e-d79b-4446-a27a-33db00ab5daf + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "569046645" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-scheduler-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: df1f09f3a1f003b8825519a3341f69fe + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "224040" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: local-path-storage + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: local-path-provisioner-6f8956fb48-tjnnh + - key: k8s.pod.uid + value: + stringValue: 1a0fa2b5-48d1-4246-84f6-614a88fc0723 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "796" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: ns-w-exclude + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-index-w-ns-exclude-44q2b + - key: k8s.pod.uid + value: + stringValue: 7491b268-a198-4dbc-8ed3-3a27a279a732 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "796" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: ns-w-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-exclude-wo-ns-exclude-q7cb2 + - key: k8s.pod.uid + value: + stringValue: a5fddf35-a751-4dea-b45f-c7558fac023d + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "796" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: ns-w-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-index-w-ns-index-j4lrn + - key: k8s.pod.uid + value: + stringValue: 2c4ad2c5-a791-466c-ae50-f8163efc385c + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "796" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: ns-w-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-wo-index-w-ns-index-7w4s8 + - key: k8s.pod.uid + value: + stringValue: 173f0280-eafc-4c11-b149-b30baa2e1958 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "796" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: ns-wo-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-index-wo-ns-index-c5xdz + - key: k8s.pod.uid + value: + stringValue: 7e87ed20-0fb1-47ad-850e-fd05ff32bf23 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "796" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: direction + value: + stringValue: transmit + - key: host.name + value: + stringValue: kind-control-plane + - key: interface + value: + stringValue: eth0 + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.namespace.name + value: + stringValue: ns-wo-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-wo-index-wo-ns-index-bp8hd + - key: k8s.pod.uid + value: + stringValue: 2368457e-e367-44b8-90b0-78c751adb967 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + isMonotonic: true + - gauge: + dataPoints: + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 0fafe483dffee884af267b7155e8d8b3fc0c04d8df26cb1efa7cb108cd352bcf + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: manager + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-operator-786c6c9777-2flf7 + - key: k8s.pod.uid + value: + stringValue: d4fc43e2-a114-492f-b477-e0936d63dec4 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 222c940f309d9bcd66429c5239a6e16654ee8fcf97d67cc888cacddddd099f55 + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: targetallocator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-ta-7f6c9fdf4-kv6zv + - key: k8s.pod.uid + value: + stringValue: 13d32d4e-5a6c-4f79-9f22-4a538e421a3c + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 24b3f88060c9edadee5ff09089f3029a1a8b9f4d90db77b0298711bd01e90b72 + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: pod-w-index-wo-ns-index + - key: k8s.namespace.name + value: + stringValue: ns-wo-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-index-wo-ns-index-c5xdz + - key: k8s.pod.uid + value: + stringValue: 7e87ed20-0fb1-47ad-850e-fd05ff32bf23 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 24befb850c6f1fb6b511631eec70e54a07a9b0f5ee40fb76f19da95f3b3a6c7f + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: pod-wo-index-wo-ns-index + - key: k8s.namespace.name + value: + stringValue: ns-wo-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-wo-index-wo-ns-index-bp8hd + - key: k8s.pod.uid + value: + stringValue: 2368457e-e367-44b8-90b0-78c751adb967 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 33f86cf3de649408503e17237017b79e5cc4cd0b58ffb25cc924b4579963638c + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: prometheus-annotation-test + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: prometheus-annotation-test-cfc77c7b9-hlvbs + - key: k8s.pod.uid + value: + stringValue: 7c5e6a26-0c5b-4cc8-ade8-45efc11329d9 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 3f07c1373038d5f7474646dbb567c54ca699439c716bada8bfb0f3a35341890f + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: java-test + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: java-test-5c4d6479b8-sdpd7 + - key: k8s.pod.uid + value: + stringValue: a45051ae-1c81-47c0-8de0-e71dfe319bb4 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 46787827ec56c0df1950e55b771be6ebe4028d9d1c15ad752c346798491ca2a9 + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-controller-manager + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-controller-manager-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: 78e1474ee2ca64de6cd3283ddf989107 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 4f8457613c97428a381ae6349ea7a9758e1184eea7addb8228ca73e450f01597 + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: pod-wo-index-w-ns-index + - key: k8s.namespace.name + value: + stringValue: ns-w-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-wo-index-w-ns-index-7w4s8 + - key: k8s.pod.uid + value: + stringValue: 173f0280-eafc-4c11-b149-b30baa2e1958 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 5b90948da2af5a7ec45193eaf7e8a763ac5724185a84c41182efc278ee245e1b + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-proxy + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-proxy-zh9sm + - key: k8s.pod.uid + value: + stringValue: e049ac1e-d79b-4446-a27a-33db00ab5daf + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 6a5058048fcacfa65d6419d37f3e1377fd48c0eeb07afdf0f6eaefd8cdbf1d53 + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: nodejs-test + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: nodejs-test-56b74df9ff-blxvb + - key: k8s.pod.uid + value: + stringValue: eade6268-3958-4711-8153-38c42d26d5e0 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 6d3225876c70f364fb49189bd9c20dd170fbc49c621b786b2a2c338334c3dd1a + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: pod-w-index-w-ns-exclude + - key: k8s.namespace.name + value: + stringValue: ns-w-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-exclude-wo-ns-exclude-q7cb2 + - key: k8s.pod.uid + value: + stringValue: a5fddf35-a751-4dea-b45f-c7558fac023d + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 7f7bdf9904a22d51232483a680516023f4d4f53aa40535d3625e1786c0db784f + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: local-path-provisioner + - key: k8s.namespace.name + value: + stringValue: local-path-storage + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: local-path-provisioner-6f8956fb48-tjnnh + - key: k8s.pod.uid + value: + stringValue: 1a0fa2b5-48d1-4246-84f6-614a88fc0723 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 84b1b7d1397ace112e05e999658b9d64c2e1b70bfd4e01c2728fe8a1ffc8848f + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: cert-manager-webhook + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: cert-manager-webhook-7f9f8648b9-ghhhm + - key: k8s.pod.uid + value: + stringValue: 1ebf5a38-5152-430a-80fa-3854278e6a26 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 907b3374be621b2f943096103fa14b993759f1a8b889a792a4906fef2bc21d7d + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: otel-collector + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-lhkmv + - key: k8s.pod.uid + value: + stringValue: 4d535893-9ff1-41a0-95cd-704934c3e539 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 94b6a464867b38cc97d28d9a6ff2101f6db08d9ab5b652fd95f5fb7b56719673 + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: cert-manager-controller + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: cert-manager-67c98b89c8-dtt9m + - key: k8s.pod.uid + value: + stringValue: 2a18cace-4258-47cc-8724-68105e50a6b0 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 99c2ece6ee80be4814266e49ea7ddcc57b678344658f0f83780156b39d892999 + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kindnet-cni + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kindnet-v8s6x + - key: k8s.pod.uid + value: + stringValue: b2d71d05-0357-4dc5-9630-50a2fdf3b9c8 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: aa95d51f5ba3a56291b99a3c3f7d09cdcddaa40b64aeaf3ed078ef71c7192e55 + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: coredns + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: coredns-5dd5756b68-n2r77 + - key: k8s.pod.uid + value: + stringValue: e9eab946-2f83-4950-9331-8d2140b9bb27 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: ac22883ea76623227dff15a22321c7c5306180d143e321cda5b3400952d0e14b + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-apiserver + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-apiserver-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: 021ea1f4839617eef00a93c39d62616b + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: c9af4eaede7b980b1d771af749f2370e6d3a88e89abd198b7d6142a8b41a27e0 + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: pod-w-index-w-ns-index + - key: k8s.namespace.name + value: + stringValue: ns-w-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-index-w-ns-index-j4lrn + - key: k8s.pod.uid + value: + stringValue: 2c4ad2c5-a791-466c-ae50-f8163efc385c + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: cb1aba654caa6d98519882865244158df6203f599bb35ed6e9e45e71430f7cea + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-rbac-proxy + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-operator-786c6c9777-2flf7 + - key: k8s.pod.uid + value: + stringValue: d4fc43e2-a114-492f-b477-e0936d63dec4 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: d1cba7f3f652854d2dd9c41e7d0398fbc479188abcd88af5b9d9ca68327cf532 + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: otel-collector + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-69d74b99994lz9z + - key: k8s.pod.uid + value: + stringValue: 24a76c39-d3a1-48dd-95cd-7f0196a84087 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: dc8158388f0fb8b20fdfa3938940ab3deabb37cb5236eff283068b7349e976ef + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: etcd + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: etcd-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: a652c9e3a037b7be2906ed0ac7fea632 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: df79bbf992582662811e9ddc802cc3fae756ac95e4a7cd4db76419e9e060dabc + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: coredns + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: coredns-5dd5756b68-5rzdc + - key: k8s.pod.uid + value: + stringValue: 1e801067-cdf8-4d39-adb7-f04927fcbe4d + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: e6d4b6ded54e9e103f3642b9d4a22c8b244462a8dc125ad1df1161e04c6ddeb7 + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: dotnet-test + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: dotnet-test-5479c475fc-6bbpg + - key: k8s.pod.uid + value: + stringValue: b0a76cb7-8faa-47b3-a0e1-5aeee5544e98 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: e8e78be19779f3697b37013f8e63b4268e5520ab87a1ff95f6d2232116e1ad4f + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-scheduler + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-scheduler-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: df1f09f3a1f003b8825519a3341f69fe + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: e8ead1ee3bb65cb19abe591a137c5124683eef66f2faa499c07893928c3360e6 + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: pod-w-index-w-ns-exclude + - key: k8s.namespace.name + value: + stringValue: ns-w-exclude + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-index-w-ns-exclude-44q2b + - key: k8s.pod.uid + value: + stringValue: 7491b268-a198-4dbc-8ed3-3a27a279a732 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "46293225472" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: ea927afa8bdefb1693209fbbf133edbba8648c99a9b7d6b5fe87a18b5993c910 + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: cert-manager-cainjector + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: cert-manager-cainjector-5c5695d979-hz2x4 + - key: k8s.pod.uid + value: + stringValue: 4d0463e6-e229-4f66-94f8-07e553dbb1bb + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + name: container.filesystem.available + - gauge: + dataPoints: + - asInt: "149281173504" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 0fafe483dffee884af267b7155e8d8b3fc0c04d8df26cb1efa7cb108cd352bcf + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: manager + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-operator-786c6c9777-2flf7 + - key: k8s.pod.uid + value: + stringValue: d4fc43e2-a114-492f-b477-e0936d63dec4 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "149281173504" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 222c940f309d9bcd66429c5239a6e16654ee8fcf97d67cc888cacddddd099f55 + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: targetallocator + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-ta-7f6c9fdf4-kv6zv + - key: k8s.pod.uid + value: + stringValue: 13d32d4e-5a6c-4f79-9f22-4a538e421a3c + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "149281173504" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 24b3f88060c9edadee5ff09089f3029a1a8b9f4d90db77b0298711bd01e90b72 + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: pod-w-index-wo-ns-index + - key: k8s.namespace.name + value: + stringValue: ns-wo-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-index-wo-ns-index-c5xdz + - key: k8s.pod.uid + value: + stringValue: 7e87ed20-0fb1-47ad-850e-fd05ff32bf23 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "149281173504" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 24befb850c6f1fb6b511631eec70e54a07a9b0f5ee40fb76f19da95f3b3a6c7f + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: pod-wo-index-wo-ns-index + - key: k8s.namespace.name + value: + stringValue: ns-wo-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-wo-index-wo-ns-index-bp8hd + - key: k8s.pod.uid + value: + stringValue: 2368457e-e367-44b8-90b0-78c751adb967 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "149281173504" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 33f86cf3de649408503e17237017b79e5cc4cd0b58ffb25cc924b4579963638c + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: prometheus-annotation-test + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: prometheus-annotation-test-cfc77c7b9-hlvbs + - key: k8s.pod.uid + value: + stringValue: 7c5e6a26-0c5b-4cc8-ade8-45efc11329d9 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "149281173504" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 3f07c1373038d5f7474646dbb567c54ca699439c716bada8bfb0f3a35341890f + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: java-test + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: java-test-5c4d6479b8-sdpd7 + - key: k8s.pod.uid + value: + stringValue: a45051ae-1c81-47c0-8de0-e71dfe319bb4 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "149281173504" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 46787827ec56c0df1950e55b771be6ebe4028d9d1c15ad752c346798491ca2a9 + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-controller-manager + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-controller-manager-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: 78e1474ee2ca64de6cd3283ddf989107 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "149281173504" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 4f8457613c97428a381ae6349ea7a9758e1184eea7addb8228ca73e450f01597 + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: pod-wo-index-w-ns-index + - key: k8s.namespace.name + value: + stringValue: ns-w-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-wo-index-w-ns-index-7w4s8 + - key: k8s.pod.uid + value: + stringValue: 173f0280-eafc-4c11-b149-b30baa2e1958 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "149281173504" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 5b90948da2af5a7ec45193eaf7e8a763ac5724185a84c41182efc278ee245e1b + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-proxy + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-proxy-zh9sm + - key: k8s.pod.uid + value: + stringValue: e049ac1e-d79b-4446-a27a-33db00ab5daf + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "149281173504" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 6a5058048fcacfa65d6419d37f3e1377fd48c0eeb07afdf0f6eaefd8cdbf1d53 + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: nodejs-test + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: nodejs-test-56b74df9ff-blxvb + - key: k8s.pod.uid + value: + stringValue: eade6268-3958-4711-8153-38c42d26d5e0 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "149281173504" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 6d3225876c70f364fb49189bd9c20dd170fbc49c621b786b2a2c338334c3dd1a + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: pod-w-index-w-ns-exclude + - key: k8s.namespace.name + value: + stringValue: ns-w-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-exclude-wo-ns-exclude-q7cb2 + - key: k8s.pod.uid + value: + stringValue: a5fddf35-a751-4dea-b45f-c7558fac023d + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "149281173504" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 7f7bdf9904a22d51232483a680516023f4d4f53aa40535d3625e1786c0db784f + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: local-path-provisioner + - key: k8s.namespace.name + value: + stringValue: local-path-storage + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: local-path-provisioner-6f8956fb48-tjnnh + - key: k8s.pod.uid + value: + stringValue: 1a0fa2b5-48d1-4246-84f6-614a88fc0723 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "149281173504" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 84b1b7d1397ace112e05e999658b9d64c2e1b70bfd4e01c2728fe8a1ffc8848f + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: cert-manager-webhook + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: cert-manager-webhook-7f9f8648b9-ghhhm + - key: k8s.pod.uid + value: + stringValue: 1ebf5a38-5152-430a-80fa-3854278e6a26 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "149281173504" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 907b3374be621b2f943096103fa14b993759f1a8b889a792a4906fef2bc21d7d + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: otel-collector + - key: k8s.namespace.name + value: + stringValue: default + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: sock-splunk-otel-collector-agent-lhkmv + - key: k8s.pod.uid + value: + stringValue: 4d535893-9ff1-41a0-95cd-704934c3e539 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "149281173504" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 94b6a464867b38cc97d28d9a6ff2101f6db08d9ab5b652fd95f5fb7b56719673 + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: cert-manager-controller + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: cert-manager-67c98b89c8-dtt9m + - key: k8s.pod.uid + value: + stringValue: 2a18cace-4258-47cc-8724-68105e50a6b0 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "149281173504" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 99c2ece6ee80be4814266e49ea7ddcc57b678344658f0f83780156b39d892999 + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kindnet-cni + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kindnet-v8s6x + - key: k8s.pod.uid + value: + stringValue: b2d71d05-0357-4dc5-9630-50a2fdf3b9c8 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "149281173504" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: aa95d51f5ba3a56291b99a3c3f7d09cdcddaa40b64aeaf3ed078ef71c7192e55 + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: coredns + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: coredns-5dd5756b68-n2r77 + - key: k8s.pod.uid + value: + stringValue: e9eab946-2f83-4950-9331-8d2140b9bb27 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "149281173504" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: ac22883ea76623227dff15a22321c7c5306180d143e321cda5b3400952d0e14b + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name value: stringValue: kind-control-plane - key: k8s.cluster.name @@ -3021,7 +6258,91 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: nodejs-test + stringValue: kube-apiserver + - key: k8s.namespace.name + value: + stringValue: kube-system + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: kube-apiserver-kind-control-plane + - key: k8s.pod.uid + value: + stringValue: 021ea1f4839617eef00a93c39d62616b + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "149281173504" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: c9af4eaede7b980b1d771af749f2370e6d3a88e89abd198b7d6142a8b41a27e0 + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: pod-w-index-w-ns-index + - key: k8s.namespace.name + value: + stringValue: ns-w-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-w-index-w-ns-index-j4lrn + - key: k8s.pod.uid + value: + stringValue: 2c4ad2c5-a791-466c-ae50-f8163efc385c + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "149281173504" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: cb1aba654caa6d98519882865244158df6203f599bb35ed6e9e45e71430f7cea + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kube-rbac-proxy - key: k8s.namespace.name value: stringValue: default @@ -3030,22 +6351,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: nodejs-test-d454cf769-n4whd + stringValue: sock-operator-786c6c9777-2flf7 - key: k8s.pod.uid value: - stringValue: 1f680502-7878-451f-8880-2ca25419ce4d + stringValue: d4fc43e2-a114-492f-b477-e0936d63dec4 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "10294927360" + - asInt: "149281173504" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 0f663224843198b71d0acbd6738008173478ad97f79ed2411ba7a3d6014f69f4 + stringValue: d1cba7f3f652854d2dd9c41e7d0398fbc479188abcd88af5b9d9ca68327cf532 - key: customfield1 value: stringValue: customvalue1 @@ -3063,31 +6384,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: cert-manager-cainjector + stringValue: otel-collector - key: k8s.namespace.name value: - stringValue: cert-manager + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-cainjector-6cc9b5f678-zm6qg + stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-69d74b99994lz9z - key: k8s.pod.uid value: - stringValue: c17bb001-2b34-4173-993b-47cc1d285f3d + stringValue: 24a76c39-d3a1-48dd-95cd-7f0196a84087 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "10294927360" + - asInt: "149281173504" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 2118c058c65a24bcf948d3b082c7d24a957ee636d8bfc9a31dec1c700912daf2 + stringValue: dc8158388f0fb8b20fdfa3938940ab3deabb37cb5236eff283068b7349e976ef - key: customfield1 value: stringValue: customvalue1 @@ -3105,7 +6426,7 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: coredns + stringValue: etcd - key: k8s.namespace.name value: stringValue: kube-system @@ -3114,22 +6435,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-5dd5756b68-ttp4s + stringValue: etcd-kind-control-plane - key: k8s.pod.uid value: - stringValue: 8a841abc-5346-4344-81d4-8fc298823892 + stringValue: a652c9e3a037b7be2906ed0ac7fea632 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "10294927360" + - asInt: "149281173504" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 2188e5dbd44970ada6bb9b4adc1190534cec2034ed4bbbefe39b998b762c8090 + stringValue: df79bbf992582662811e9ddc802cc3fae756ac95e4a7cd4db76419e9e060dabc - key: customfield1 value: stringValue: customvalue1 @@ -3147,7 +6468,7 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kube-controller-manager + stringValue: coredns - key: k8s.namespace.name value: stringValue: kube-system @@ -3156,22 +6477,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-controller-manager-kind-control-plane + stringValue: coredns-5dd5756b68-5rzdc - key: k8s.pod.uid value: - stringValue: f98b4cfedb968e3c798bae6ff1faa58d + stringValue: 1e801067-cdf8-4d39-adb7-f04927fcbe4d - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "10294927360" + - asInt: "149281173504" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 23c653fde53fcb3b3dd7f586ed165f5621f786f1bd68f9ed18caaa132177a05e + stringValue: e6d4b6ded54e9e103f3642b9d4a22c8b244462a8dc125ad1df1161e04c6ddeb7 - key: customfield1 value: stringValue: customvalue1 @@ -3189,7 +6510,7 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: otel-collector + stringValue: dotnet-test - key: k8s.namespace.name value: stringValue: default @@ -3198,22 +6519,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-587576d5842876t + stringValue: dotnet-test-5479c475fc-6bbpg - key: k8s.pod.uid value: - stringValue: 73bb8d19-b190-42e9-a77d-c84ef587bd02 + stringValue: b0a76cb7-8faa-47b3-a0e1-5aeee5544e98 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "10294927360" + - asInt: "149281173504" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 3c4c69e6395265782987df7e1a9ad2ce992b5fdce80124c6608594c54d069b4a + stringValue: e8e78be19779f3697b37013f8e63b4268e5520ab87a1ff95f6d2232116e1ad4f - key: customfield1 value: stringValue: customvalue1 @@ -3231,31 +6552,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: local-path-provisioner + stringValue: kube-scheduler - key: k8s.namespace.name value: - stringValue: local-path-storage + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: local-path-provisioner-6f8956fb48-4bmw5 + stringValue: kube-scheduler-kind-control-plane - key: k8s.pod.uid value: - stringValue: 6099916d-c417-4145-b1e8-a68a083a4e36 + stringValue: df1f09f3a1f003b8825519a3341f69fe - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "10294927360" + - asInt: "149281173504" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 450386c43051a8d554ba7b41fb6fd35740fad2ee77f6a5be5f7d7b366934fa37 + stringValue: e8ead1ee3bb65cb19abe591a137c5124683eef66f2faa499c07893928c3360e6 - key: customfield1 value: stringValue: customvalue1 @@ -3273,31 +6594,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: etcd + stringValue: pod-w-index-w-ns-exclude - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: ns-w-exclude - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: etcd-kind-control-plane + stringValue: pod-w-index-w-ns-exclude-44q2b - key: k8s.pod.uid value: - stringValue: 8640ede3763d523342ced9ba7a5fd080 + stringValue: 7491b268-a198-4dbc-8ed3-3a27a279a732 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "10294927360" + - asInt: "149281173504" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 47a8c0881856e8eebf6dead289f5605bf7e6e159131148313ff44b43f93792ab + stringValue: ea927afa8bdefb1693209fbbf133edbba8648c99a9b7d6b5fe87a18b5993c910 - key: customfield1 value: stringValue: customvalue1 @@ -3315,7 +6636,7 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: cert-manager-controller + stringValue: cert-manager-cainjector - key: k8s.namespace.name value: stringValue: cert-manager @@ -3324,22 +6645,25 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-5c9d8879fd-zqcdv + stringValue: cert-manager-cainjector-5c5695d979-hz2x4 - key: k8s.pod.uid value: - stringValue: dd68fc3a-2f3f-440d-a7e7-618aebbf2090 + stringValue: 4d0463e6-e229-4f66-94f8-07e553dbb1bb - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "10294927360" + name: container.filesystem.capacity + - gauge: + dataPoints: + - asInt: "57344" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 49285c01fa935e13e79252d57fb188b1c72b317328df453f855fd7307bf6166a + stringValue: 0fafe483dffee884af267b7155e8d8b3fc0c04d8df26cb1efa7cb108cd352bcf - key: customfield1 value: stringValue: customvalue1 @@ -3357,31 +6681,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: coredns + stringValue: manager - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-5dd5756b68-x8jcx + stringValue: sock-operator-786c6c9777-2flf7 - key: k8s.pod.uid value: - stringValue: 662cb1aa-7d77-4a28-813a-3abe808e9a04 + stringValue: d4fc43e2-a114-492f-b477-e0936d63dec4 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "10294927360" + - asInt: "49152" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 4bd3dfa8576441902eb944836f6752aeb3d5fbd24b17e3984e9f88b205096130 + stringValue: 222c940f309d9bcd66429c5239a6e16654ee8fcf97d67cc888cacddddd099f55 - key: customfield1 value: stringValue: customvalue1 @@ -3399,31 +6723,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kube-proxy + stringValue: targetallocator - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-proxy-zmds8 + stringValue: sock-splunk-otel-collector-ta-7f6c9fdf4-kv6zv - key: k8s.pod.uid value: - stringValue: 3252bcd4-1aa0-4a8d-ad6e-fa3cee960076 + stringValue: 13d32d4e-5a6c-4f79-9f22-4a538e421a3c - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "10294927360" + - asInt: "323584" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 63c80e0ba2de4a1f83678a6dd53d96b507a07a8a18a45b9819e10498c9d851f0 + stringValue: 24b3f88060c9edadee5ff09089f3029a1a8b9f4d90db77b0298711bd01e90b72 - key: customfield1 value: stringValue: customvalue1 @@ -3441,31 +6765,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: java-test + stringValue: pod-w-index-wo-ns-index - key: k8s.namespace.name value: - stringValue: default + stringValue: ns-wo-index - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: java-test-566495f78-6nhnc + stringValue: pod-w-index-wo-ns-index-c5xdz - key: k8s.pod.uid value: - stringValue: fbc84ba5-3b1f-4754-b3f3-76e86eace74e + stringValue: 7e87ed20-0fb1-47ad-850e-fd05ff32bf23 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "10294927360" + - asInt: "323584" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 9eec542c85daa232c5488c4f587671a6cf01f52ccddf663abd74b8b2f158e962 + stringValue: 24befb850c6f1fb6b511631eec70e54a07a9b0f5ee40fb76f19da95f3b3a6c7f - key: customfield1 value: stringValue: customvalue1 @@ -3483,7 +6807,49 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kube-rbac-proxy + stringValue: pod-wo-index-wo-ns-index + - key: k8s.namespace.name + value: + stringValue: ns-wo-index + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: pod-wo-index-wo-ns-index-bp8hd + - key: k8s.pod.uid + value: + stringValue: 2368457e-e367-44b8-90b0-78c751adb967 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "24576" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 33f86cf3de649408503e17237017b79e5cc4cd0b58ffb25cc924b4579963638c + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: prometheus-annotation-test - key: k8s.namespace.name value: stringValue: default @@ -3492,22 +6858,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-operator-6cf5597fb6-nkvgb + stringValue: prometheus-annotation-test-cfc77c7b9-hlvbs - key: k8s.pod.uid value: - stringValue: 24d849c7-da2c-4598-a307-9deba65ed5a8 + stringValue: 7c5e6a26-0c5b-4cc8-ade8-45efc11329d9 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "10294927360" + - asInt: "217088" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: cdf2eb0fde3a64bdcb49a31753d8030ee0d3c9d55e27099c582ed524e7fa58e3 + stringValue: 3f07c1373038d5f7474646dbb567c54ca699439c716bada8bfb0f3a35341890f - key: customfield1 value: stringValue: customvalue1 @@ -3525,31 +6891,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: cert-manager-webhook + stringValue: java-test - key: k8s.namespace.name value: - stringValue: cert-manager + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-webhook-7bb7b75848-kbgq5 + stringValue: java-test-5c4d6479b8-sdpd7 - key: k8s.pod.uid value: - stringValue: a21b8615-7e13-46d7-b4e8-8ae1e4a5c79f + stringValue: a45051ae-1c81-47c0-8de0-e71dfe319bb4 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "10294927360" + - asInt: "73728" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: dd4fd2423e56794193a5ccb18b4fd39dbf27ed97eacb1f282e92d108bc0bb2e9 + stringValue: 46787827ec56c0df1950e55b771be6ebe4028d9d1c15ad752c346798491ca2a9 - key: customfield1 value: stringValue: customvalue1 @@ -3567,7 +6933,7 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kube-scheduler + stringValue: kube-controller-manager - key: k8s.namespace.name value: stringValue: kube-system @@ -3576,22 +6942,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-scheduler-kind-control-plane + stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 90194206b3ac749a8da43c61fd4761ca + stringValue: 78e1474ee2ca64de6cd3283ddf989107 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "10294927360" + - asInt: "323584" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: f42485d9874a9b0458270281ba2b5c76679bf622b30c92c54b696a5a81b5d7c0 + stringValue: 4f8457613c97428a381ae6349ea7a9758e1184eea7addb8228ca73e450f01597 - key: customfield1 value: stringValue: customvalue1 @@ -3609,34 +6975,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kindnet-cni + stringValue: pod-wo-index-w-ns-index - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: ns-w-index - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kindnet-wrk68 + stringValue: pod-wo-index-w-ns-index-7w4s8 - key: k8s.pod.uid value: - stringValue: e72734dd-e62d-4528-b718-d79e757c0900 + stringValue: 173f0280-eafc-4c11-b149-b30baa2e1958 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - name: container.filesystem.available - - gauge: - dataPoints: - - asInt: "125658222592" + - asInt: "69632" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 0200fab216c7ccb291be3d1799f862bd8488cb786b4b1f50abc4e778dc130a3d + stringValue: 5b90948da2af5a7ec45193eaf7e8a763ac5724185a84c41182efc278ee245e1b - key: customfield1 value: stringValue: customvalue1 @@ -3654,7 +7017,7 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kube-apiserver + stringValue: kube-proxy - key: k8s.namespace.name value: stringValue: kube-system @@ -3663,22 +7026,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-apiserver-kind-control-plane + stringValue: kube-proxy-zh9sm - key: k8s.pod.uid value: - stringValue: 51acb728ea8dc3a01b43334942491036 + stringValue: e049ac1e-d79b-4446-a27a-33db00ab5daf - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "125658222592" + - asInt: "16384" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 021327b7039db9dd913ba2beb4625bf3f2d522805decbeb30d3844953d689895 + stringValue: 6a5058048fcacfa65d6419d37f3e1377fd48c0eeb07afdf0f6eaefd8cdbf1d53 - key: customfield1 value: stringValue: customvalue1 @@ -3696,7 +7059,7 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: otel-collector + stringValue: nodejs-test - key: k8s.namespace.name value: stringValue: default @@ -3705,22 +7068,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-znqzs + stringValue: nodejs-test-56b74df9ff-blxvb - key: k8s.pod.uid value: - stringValue: 3f2777cd-e7ce-485d-a92c-43c0a2aadee5 + stringValue: eade6268-3958-4711-8153-38c42d26d5e0 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "125658222592" + - asInt: "323584" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 0a1903dcb4d0fdd4279b401318013e772a755df50a54714195cf5b81132ae688 + stringValue: 6d3225876c70f364fb49189bd9c20dd170fbc49c621b786b2a2c338334c3dd1a - key: customfield1 value: stringValue: customvalue1 @@ -3738,31 +7101,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: manager + stringValue: pod-w-index-w-ns-exclude - key: k8s.namespace.name value: - stringValue: default + stringValue: ns-w-index - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-operator-6cf5597fb6-nkvgb + stringValue: pod-w-exclude-wo-ns-exclude-q7cb2 - key: k8s.pod.uid value: - stringValue: 24d849c7-da2c-4598-a307-9deba65ed5a8 + stringValue: a5fddf35-a751-4dea-b45f-c7558fac023d - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "125658222592" + - asInt: "40960" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 0b7de24593b9e9b9c4dcb6424f1e829e3232aac96921586aa5b4876e34fffead + stringValue: 7f7bdf9904a22d51232483a680516023f4d4f53aa40535d3625e1786c0db784f - key: customfield1 value: stringValue: customvalue1 @@ -3780,31 +7143,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: nodejs-test + stringValue: local-path-provisioner - key: k8s.namespace.name value: - stringValue: default + stringValue: local-path-storage - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: nodejs-test-d454cf769-n4whd + stringValue: local-path-provisioner-6f8956fb48-tjnnh - key: k8s.pod.uid value: - stringValue: 1f680502-7878-451f-8880-2ca25419ce4d + stringValue: 1a0fa2b5-48d1-4246-84f6-614a88fc0723 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "125658222592" + - asInt: "36864" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 0f663224843198b71d0acbd6738008173478ad97f79ed2411ba7a3d6014f69f4 + stringValue: 84b1b7d1397ace112e05e999658b9d64c2e1b70bfd4e01c2728fe8a1ffc8848f - key: customfield1 value: stringValue: customvalue1 @@ -3822,7 +7185,7 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: cert-manager-cainjector + stringValue: cert-manager-webhook - key: k8s.namespace.name value: stringValue: cert-manager @@ -3831,22 +7194,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-cainjector-6cc9b5f678-zm6qg + stringValue: cert-manager-webhook-7f9f8648b9-ghhhm - key: k8s.pod.uid value: - stringValue: c17bb001-2b34-4173-993b-47cc1d285f3d + stringValue: 1ebf5a38-5152-430a-80fa-3854278e6a26 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "125658222592" + - asInt: "143360" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 2118c058c65a24bcf948d3b082c7d24a957ee636d8bfc9a31dec1c700912daf2 + stringValue: 907b3374be621b2f943096103fa14b993759f1a8b889a792a4906fef2bc21d7d - key: customfield1 value: stringValue: customvalue1 @@ -3864,31 +7227,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: coredns + stringValue: otel-collector - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-5dd5756b68-ttp4s + stringValue: sock-splunk-otel-collector-agent-lhkmv - key: k8s.pod.uid value: - stringValue: 8a841abc-5346-4344-81d4-8fc298823892 + stringValue: 4d535893-9ff1-41a0-95cd-704934c3e539 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "125658222592" + - asInt: "36864" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 2188e5dbd44970ada6bb9b4adc1190534cec2034ed4bbbefe39b998b762c8090 + stringValue: 94b6a464867b38cc97d28d9a6ff2101f6db08d9ab5b652fd95f5fb7b56719673 - key: customfield1 value: stringValue: customvalue1 @@ -3906,7 +7269,49 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kube-controller-manager + stringValue: cert-manager-controller + - key: k8s.namespace.name + value: + stringValue: cert-manager + - key: k8s.node.name + value: + stringValue: kind-control-plane + - key: k8s.pod.name + value: + stringValue: cert-manager-67c98b89c8-dtt9m + - key: k8s.pod.uid + value: + stringValue: 2a18cace-4258-47cc-8724-68105e50a6b0 + - key: os.type + value: + stringValue: linux + timeUnixNano: "1000000" + - asInt: "69632" + attributes: + - key: cluster_name + value: + stringValue: ci-k8s-cluster + - key: container.id + value: + stringValue: 99c2ece6ee80be4814266e49ea7ddcc57b678344658f0f83780156b39d892999 + - key: customfield1 + value: + stringValue: customvalue1 + - key: customfield2 + value: + stringValue: customvalue2 + - key: deployment.environment + value: + stringValue: dev + - key: host.name + value: + stringValue: kind-control-plane + - key: k8s.cluster.name + value: + stringValue: dev-operator + - key: k8s.container.name + value: + stringValue: kindnet-cni - key: k8s.namespace.name value: stringValue: kube-system @@ -3915,22 +7320,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-controller-manager-kind-control-plane + stringValue: kindnet-v8s6x - key: k8s.pod.uid value: - stringValue: f98b4cfedb968e3c798bae6ff1faa58d + stringValue: b2d71d05-0357-4dc5-9630-50a2fdf3b9c8 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "125658222592" + - asInt: "49152" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 23c653fde53fcb3b3dd7f586ed165f5621f786f1bd68f9ed18caaa132177a05e + stringValue: aa95d51f5ba3a56291b99a3c3f7d09cdcddaa40b64aeaf3ed078ef71c7192e55 - key: customfield1 value: stringValue: customvalue1 @@ -3948,31 +7353,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: otel-collector + stringValue: coredns - key: k8s.namespace.name value: - stringValue: default + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-587576d5842876t + stringValue: coredns-5dd5756b68-n2r77 - key: k8s.pod.uid value: - stringValue: 73bb8d19-b190-42e9-a77d-c84ef587bd02 + stringValue: e9eab946-2f83-4950-9331-8d2140b9bb27 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "125658222592" + - asInt: "53248" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 3c4c69e6395265782987df7e1a9ad2ce992b5fdce80124c6608594c54d069b4a + stringValue: ac22883ea76623227dff15a22321c7c5306180d143e321cda5b3400952d0e14b - key: customfield1 value: stringValue: customvalue1 @@ -3990,31 +7395,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: local-path-provisioner + stringValue: kube-apiserver - key: k8s.namespace.name value: - stringValue: local-path-storage + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: local-path-provisioner-6f8956fb48-4bmw5 + stringValue: kube-apiserver-kind-control-plane - key: k8s.pod.uid value: - stringValue: 6099916d-c417-4145-b1e8-a68a083a4e36 + stringValue: 021ea1f4839617eef00a93c39d62616b - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "125658222592" + - asInt: "323584" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 450386c43051a8d554ba7b41fb6fd35740fad2ee77f6a5be5f7d7b366934fa37 + stringValue: c9af4eaede7b980b1d771af749f2370e6d3a88e89abd198b7d6142a8b41a27e0 - key: customfield1 value: stringValue: customvalue1 @@ -4032,31 +7437,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: etcd + stringValue: pod-w-index-w-ns-index - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: ns-w-index - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: etcd-kind-control-plane + stringValue: pod-w-index-w-ns-index-j4lrn - key: k8s.pod.uid value: - stringValue: 8640ede3763d523342ced9ba7a5fd080 + stringValue: 2c4ad2c5-a791-466c-ae50-f8163efc385c - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "125658222592" + - asInt: "36864" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 47a8c0881856e8eebf6dead289f5605bf7e6e159131148313ff44b43f93792ab + stringValue: cb1aba654caa6d98519882865244158df6203f599bb35ed6e9e45e71430f7cea - key: customfield1 value: stringValue: customvalue1 @@ -4074,31 +7479,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: cert-manager-controller + stringValue: kube-rbac-proxy - key: k8s.namespace.name value: - stringValue: cert-manager + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-5c9d8879fd-zqcdv + stringValue: sock-operator-786c6c9777-2flf7 - key: k8s.pod.uid value: - stringValue: dd68fc3a-2f3f-440d-a7e7-618aebbf2090 + stringValue: d4fc43e2-a114-492f-b477-e0936d63dec4 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "125658222592" + - asInt: "49152" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 49285c01fa935e13e79252d57fb188b1c72b317328df453f855fd7307bf6166a + stringValue: d1cba7f3f652854d2dd9c41e7d0398fbc479188abcd88af5b9d9ca68327cf532 - key: customfield1 value: stringValue: customvalue1 @@ -4116,31 +7521,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: coredns + stringValue: otel-collector - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-5dd5756b68-x8jcx + stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-69d74b99994lz9z - key: k8s.pod.uid value: - stringValue: 662cb1aa-7d77-4a28-813a-3abe808e9a04 + stringValue: 24a76c39-d3a1-48dd-95cd-7f0196a84087 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "125658222592" + - asInt: "40960" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 4bd3dfa8576441902eb944836f6752aeb3d5fbd24b17e3984e9f88b205096130 + stringValue: dc8158388f0fb8b20fdfa3938940ab3deabb37cb5236eff283068b7349e976ef - key: customfield1 value: stringValue: customvalue1 @@ -4158,7 +7563,7 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kube-proxy + stringValue: etcd - key: k8s.namespace.name value: stringValue: kube-system @@ -4167,22 +7572,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-proxy-zmds8 + stringValue: etcd-kind-control-plane - key: k8s.pod.uid value: - stringValue: 3252bcd4-1aa0-4a8d-ad6e-fa3cee960076 + stringValue: a652c9e3a037b7be2906ed0ac7fea632 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "125658222592" + - asInt: "49152" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 63c80e0ba2de4a1f83678a6dd53d96b507a07a8a18a45b9819e10498c9d851f0 + stringValue: df79bbf992582662811e9ddc802cc3fae756ac95e4a7cd4db76419e9e060dabc - key: customfield1 value: stringValue: customvalue1 @@ -4200,31 +7605,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: java-test + stringValue: coredns - key: k8s.namespace.name value: - stringValue: default + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: java-test-566495f78-6nhnc + stringValue: coredns-5dd5756b68-5rzdc - key: k8s.pod.uid value: - stringValue: fbc84ba5-3b1f-4754-b3f3-76e86eace74e + stringValue: 1e801067-cdf8-4d39-adb7-f04927fcbe4d - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "125658222592" + - asInt: "69632" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 9eec542c85daa232c5488c4f587671a6cf01f52ccddf663abd74b8b2f158e962 + stringValue: e6d4b6ded54e9e103f3642b9d4a22c8b244462a8dc125ad1df1161e04c6ddeb7 - key: customfield1 value: stringValue: customvalue1 @@ -4242,7 +7647,7 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kube-rbac-proxy + stringValue: dotnet-test - key: k8s.namespace.name value: stringValue: default @@ -4251,22 +7656,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-operator-6cf5597fb6-nkvgb + stringValue: dotnet-test-5479c475fc-6bbpg - key: k8s.pod.uid value: - stringValue: 24d849c7-da2c-4598-a307-9deba65ed5a8 + stringValue: b0a76cb7-8faa-47b3-a0e1-5aeee5544e98 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "125658222592" + - asInt: "16384" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: cdf2eb0fde3a64bdcb49a31753d8030ee0d3c9d55e27099c582ed524e7fa58e3 + stringValue: e8e78be19779f3697b37013f8e63b4268e5520ab87a1ff95f6d2232116e1ad4f - key: customfield1 value: stringValue: customvalue1 @@ -4284,31 +7689,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: cert-manager-webhook + stringValue: kube-scheduler - key: k8s.namespace.name value: - stringValue: cert-manager + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-webhook-7bb7b75848-kbgq5 + stringValue: kube-scheduler-kind-control-plane - key: k8s.pod.uid value: - stringValue: a21b8615-7e13-46d7-b4e8-8ae1e4a5c79f + stringValue: df1f09f3a1f003b8825519a3341f69fe - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "125658222592" + - asInt: "323584" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: dd4fd2423e56794193a5ccb18b4fd39dbf27ed97eacb1f282e92d108bc0bb2e9 + stringValue: e8ead1ee3bb65cb19abe591a137c5124683eef66f2faa499c07893928c3360e6 - key: customfield1 value: stringValue: customvalue1 @@ -4326,31 +7731,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kube-scheduler + stringValue: pod-w-index-w-ns-exclude - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: ns-w-exclude - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-scheduler-kind-control-plane + stringValue: pod-w-index-w-ns-exclude-44q2b - key: k8s.pod.uid value: - stringValue: 90194206b3ac749a8da43c61fd4761ca + stringValue: 7491b268-a198-4dbc-8ed3-3a27a279a732 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "125658222592" + - asInt: "36864" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: f42485d9874a9b0458270281ba2b5c76679bf622b30c92c54b696a5a81b5d7c0 + stringValue: ea927afa8bdefb1693209fbbf133edbba8648c99a9b7d6b5fe87a18b5993c910 - key: customfield1 value: stringValue: customvalue1 @@ -4368,34 +7773,34 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kindnet-cni + stringValue: cert-manager-cainjector - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: cert-manager - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kindnet-wrk68 + stringValue: cert-manager-cainjector-5c5695d979-hz2x4 - key: k8s.pod.uid value: - stringValue: e72734dd-e62d-4528-b718-d79e757c0900 + stringValue: 4d0463e6-e229-4f66-94f8-07e553dbb1bb - key: os.type value: stringValue: linux timeUnixNano: "1000000" - name: container.filesystem.capacity + name: container.filesystem.usage - gauge: dataPoints: - - asInt: "53248" + - asInt: "20213760" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 0200fab216c7ccb291be3d1799f862bd8488cb786b4b1f50abc4e778dc130a3d + stringValue: 0fafe483dffee884af267b7155e8d8b3fc0c04d8df26cb1efa7cb108cd352bcf - key: customfield1 value: stringValue: customvalue1 @@ -4413,31 +7818,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kube-apiserver + stringValue: manager - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-apiserver-kind-control-plane + stringValue: sock-operator-786c6c9777-2flf7 - key: k8s.pod.uid value: - stringValue: 51acb728ea8dc3a01b43334942491036 + stringValue: d4fc43e2-a114-492f-b477-e0936d63dec4 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "143360" + - asInt: "24891392" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 021327b7039db9dd913ba2beb4625bf3f2d522805decbeb30d3844953d689895 + stringValue: 222c940f309d9bcd66429c5239a6e16654ee8fcf97d67cc888cacddddd099f55 - key: customfield1 value: stringValue: customvalue1 @@ -4455,7 +7860,7 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: otel-collector + stringValue: targetallocator - key: k8s.namespace.name value: stringValue: default @@ -4464,22 +7869,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-znqzs + stringValue: sock-splunk-otel-collector-ta-7f6c9fdf4-kv6zv - key: k8s.pod.uid value: - stringValue: 3f2777cd-e7ce-485d-a92c-43c0a2aadee5 + stringValue: 13d32d4e-5a6c-4f79-9f22-4a538e421a3c - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "57344" + - asInt: "10686464" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 0a1903dcb4d0fdd4279b401318013e772a755df50a54714195cf5b81132ae688 + stringValue: 24b3f88060c9edadee5ff09089f3029a1a8b9f4d90db77b0298711bd01e90b72 - key: customfield1 value: stringValue: customvalue1 @@ -4497,31 +7902,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: manager + stringValue: pod-w-index-wo-ns-index - key: k8s.namespace.name value: - stringValue: default + stringValue: ns-wo-index - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-operator-6cf5597fb6-nkvgb + stringValue: pod-w-index-wo-ns-index-c5xdz - key: k8s.pod.uid value: - stringValue: 24d849c7-da2c-4598-a307-9deba65ed5a8 + stringValue: 7e87ed20-0fb1-47ad-850e-fd05ff32bf23 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "32768" + - asInt: "10682368" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 0b7de24593b9e9b9c4dcb6424f1e829e3232aac96921586aa5b4876e34fffead + stringValue: 24befb850c6f1fb6b511631eec70e54a07a9b0f5ee40fb76f19da95f3b3a6c7f - key: customfield1 value: stringValue: customvalue1 @@ -4539,31 +7944,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: nodejs-test + stringValue: pod-wo-index-wo-ns-index - key: k8s.namespace.name value: - stringValue: default + stringValue: ns-wo-index - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: nodejs-test-d454cf769-n4whd + stringValue: pod-wo-index-wo-ns-index-bp8hd - key: k8s.pod.uid value: - stringValue: 1f680502-7878-451f-8880-2ca25419ce4d + stringValue: 2368457e-e367-44b8-90b0-78c751adb967 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "36864" + - asInt: "171048960" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 0f663224843198b71d0acbd6738008173478ad97f79ed2411ba7a3d6014f69f4 + stringValue: 33f86cf3de649408503e17237017b79e5cc4cd0b58ffb25cc924b4579963638c - key: customfield1 value: stringValue: customvalue1 @@ -4581,31 +7986,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: cert-manager-cainjector + stringValue: prometheus-annotation-test - key: k8s.namespace.name value: - stringValue: cert-manager + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-cainjector-6cc9b5f678-zm6qg + stringValue: prometheus-annotation-test-cfc77c7b9-hlvbs - key: k8s.pod.uid value: - stringValue: c17bb001-2b34-4173-993b-47cc1d285f3d + stringValue: 7c5e6a26-0c5b-4cc8-ade8-45efc11329d9 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "49152" + - asInt: "285708288" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 2118c058c65a24bcf948d3b082c7d24a957ee636d8bfc9a31dec1c700912daf2 + stringValue: 3f07c1373038d5f7474646dbb567c54ca699439c716bada8bfb0f3a35341890f - key: customfield1 value: stringValue: customvalue1 @@ -4623,31 +8028,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: coredns + stringValue: java-test - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-5dd5756b68-ttp4s + stringValue: java-test-5c4d6479b8-sdpd7 - key: k8s.pod.uid value: - stringValue: 8a841abc-5346-4344-81d4-8fc298823892 + stringValue: a45051ae-1c81-47c0-8de0-e71dfe319bb4 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "73728" + - asInt: "74686464" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 2188e5dbd44970ada6bb9b4adc1190534cec2034ed4bbbefe39b998b762c8090 + stringValue: 46787827ec56c0df1950e55b771be6ebe4028d9d1c15ad752c346798491ca2a9 - key: customfield1 value: stringValue: customvalue1 @@ -4677,19 +8082,19 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: f98b4cfedb968e3c798bae6ff1faa58d + stringValue: 78e1474ee2ca64de6cd3283ddf989107 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "49152" + - asInt: "10686464" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 23c653fde53fcb3b3dd7f586ed165f5621f786f1bd68f9ed18caaa132177a05e + stringValue: 4f8457613c97428a381ae6349ea7a9758e1184eea7addb8228ca73e450f01597 - key: customfield1 value: stringValue: customvalue1 @@ -4707,31 +8112,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: otel-collector + stringValue: pod-wo-index-w-ns-index - key: k8s.namespace.name value: - stringValue: default + stringValue: ns-w-index - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-587576d5842876t + stringValue: pod-wo-index-w-ns-index-7w4s8 - key: k8s.pod.uid value: - stringValue: 73bb8d19-b190-42e9-a77d-c84ef587bd02 + stringValue: 173f0280-eafc-4c11-b149-b30baa2e1958 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "40960" + - asInt: "34668544" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 3c4c69e6395265782987df7e1a9ad2ce992b5fdce80124c6608594c54d069b4a + stringValue: 5b90948da2af5a7ec45193eaf7e8a763ac5724185a84c41182efc278ee245e1b - key: customfield1 value: stringValue: customvalue1 @@ -4749,31 +8154,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: local-path-provisioner + stringValue: kube-proxy - key: k8s.namespace.name value: - stringValue: local-path-storage + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: local-path-provisioner-6f8956fb48-4bmw5 + stringValue: kube-proxy-zh9sm - key: k8s.pod.uid value: - stringValue: 6099916d-c417-4145-b1e8-a68a083a4e36 + stringValue: e049ac1e-d79b-4446-a27a-33db00ab5daf - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "40960" + - asInt: "46919680" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 450386c43051a8d554ba7b41fb6fd35740fad2ee77f6a5be5f7d7b366934fa37 + stringValue: 6a5058048fcacfa65d6419d37f3e1377fd48c0eeb07afdf0f6eaefd8cdbf1d53 - key: customfield1 value: stringValue: customvalue1 @@ -4791,31 +8196,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: etcd + stringValue: nodejs-test - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: etcd-kind-control-plane + stringValue: nodejs-test-56b74df9ff-blxvb - key: k8s.pod.uid value: - stringValue: 8640ede3763d523342ced9ba7a5fd080 + stringValue: eade6268-3958-4711-8153-38c42d26d5e0 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "36864" + - asInt: "10686464" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 47a8c0881856e8eebf6dead289f5605bf7e6e159131148313ff44b43f93792ab + stringValue: 6d3225876c70f364fb49189bd9c20dd170fbc49c621b786b2a2c338334c3dd1a - key: customfield1 value: stringValue: customvalue1 @@ -4833,31 +8238,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: cert-manager-controller + stringValue: pod-w-index-w-ns-exclude - key: k8s.namespace.name value: - stringValue: cert-manager + stringValue: ns-w-index - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-5c9d8879fd-zqcdv + stringValue: pod-w-exclude-wo-ns-exclude-q7cb2 - key: k8s.pod.uid value: - stringValue: dd68fc3a-2f3f-440d-a7e7-618aebbf2090 + stringValue: a5fddf35-a751-4dea-b45f-c7558fac023d - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "49152" + - asInt: "13357056" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 49285c01fa935e13e79252d57fb188b1c72b317328df453f855fd7307bf6166a + stringValue: 7f7bdf9904a22d51232483a680516023f4d4f53aa40535d3625e1786c0db784f - key: customfield1 value: stringValue: customvalue1 @@ -4875,31 +8280,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: coredns + stringValue: local-path-provisioner - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: local-path-storage - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-5dd5756b68-x8jcx + stringValue: local-path-provisioner-6f8956fb48-tjnnh - key: k8s.pod.uid value: - stringValue: 662cb1aa-7d77-4a28-813a-3abe808e9a04 + stringValue: 1a0fa2b5-48d1-4246-84f6-614a88fc0723 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "69632" + - asInt: "17485824" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 4bd3dfa8576441902eb944836f6752aeb3d5fbd24b17e3984e9f88b205096130 + stringValue: 84b1b7d1397ace112e05e999658b9d64c2e1b70bfd4e01c2728fe8a1ffc8848f - key: customfield1 value: stringValue: customvalue1 @@ -4917,31 +8322,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kube-proxy + stringValue: cert-manager-webhook - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: cert-manager - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-proxy-zmds8 + stringValue: cert-manager-webhook-7f9f8648b9-ghhhm - key: k8s.pod.uid value: - stringValue: 3252bcd4-1aa0-4a8d-ad6e-fa3cee960076 + stringValue: 1ebf5a38-5152-430a-80fa-3854278e6a26 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "229376" + - asInt: "315674624" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 63c80e0ba2de4a1f83678a6dd53d96b507a07a8a18a45b9819e10498c9d851f0 + stringValue: 907b3374be621b2f943096103fa14b993759f1a8b889a792a4906fef2bc21d7d - key: customfield1 value: stringValue: customvalue1 @@ -4959,7 +8364,7 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: java-test + stringValue: otel-collector - key: k8s.namespace.name value: stringValue: default @@ -4968,22 +8373,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: java-test-566495f78-6nhnc + stringValue: sock-splunk-otel-collector-agent-lhkmv - key: k8s.pod.uid value: - stringValue: fbc84ba5-3b1f-4754-b3f3-76e86eace74e + stringValue: 4d535893-9ff1-41a0-95cd-704934c3e539 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "36864" + - asInt: "31850496" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 9eec542c85daa232c5488c4f587671a6cf01f52ccddf663abd74b8b2f158e962 + stringValue: 94b6a464867b38cc97d28d9a6ff2101f6db08d9ab5b652fd95f5fb7b56719673 - key: customfield1 value: stringValue: customvalue1 @@ -5001,31 +8406,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kube-rbac-proxy + stringValue: cert-manager-controller - key: k8s.namespace.name value: - stringValue: default + stringValue: cert-manager - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-operator-6cf5597fb6-nkvgb + stringValue: cert-manager-67c98b89c8-dtt9m - key: k8s.pod.uid value: - stringValue: 24d849c7-da2c-4598-a307-9deba65ed5a8 + stringValue: 2a18cace-4258-47cc-8724-68105e50a6b0 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "36864" + - asInt: "17469440" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: cdf2eb0fde3a64bdcb49a31753d8030ee0d3c9d55e27099c582ed524e7fa58e3 + stringValue: 99c2ece6ee80be4814266e49ea7ddcc57b678344658f0f83780156b39d892999 - key: customfield1 value: stringValue: customvalue1 @@ -5043,31 +8448,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: cert-manager-webhook + stringValue: kindnet-cni - key: k8s.namespace.name value: - stringValue: cert-manager + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-webhook-7bb7b75848-kbgq5 + stringValue: kindnet-v8s6x - key: k8s.pod.uid value: - stringValue: a21b8615-7e13-46d7-b4e8-8ae1e4a5c79f + stringValue: b2d71d05-0357-4dc5-9630-50a2fdf3b9c8 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "16384" + - asInt: "35889152" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: dd4fd2423e56794193a5ccb18b4fd39dbf27ed97eacb1f282e92d108bc0bb2e9 + stringValue: aa95d51f5ba3a56291b99a3c3f7d09cdcddaa40b64aeaf3ed078ef71c7192e55 - key: customfield1 value: stringValue: customvalue1 @@ -5085,7 +8490,7 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kube-scheduler + stringValue: coredns - key: k8s.namespace.name value: stringValue: kube-system @@ -5094,22 +8499,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-scheduler-kind-control-plane + stringValue: coredns-5dd5756b68-n2r77 - key: k8s.pod.uid value: - stringValue: 90194206b3ac749a8da43c61fd4761ca + stringValue: e9eab946-2f83-4950-9331-8d2140b9bb27 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "69632" + - asInt: "507699200" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: f42485d9874a9b0458270281ba2b5c76679bf622b30c92c54b696a5a81b5d7c0 + stringValue: ac22883ea76623227dff15a22321c7c5306180d143e321cda5b3400952d0e14b - key: customfield1 value: stringValue: customvalue1 @@ -5127,7 +8532,7 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kindnet-cni + stringValue: kube-apiserver - key: k8s.namespace.name value: stringValue: kube-system @@ -5136,25 +8541,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kindnet-wrk68 + stringValue: kube-apiserver-kind-control-plane - key: k8s.pod.uid value: - stringValue: e72734dd-e62d-4528-b718-d79e757c0900 + stringValue: 021ea1f4839617eef00a93c39d62616b - key: os.type value: stringValue: linux timeUnixNano: "1000000" - name: container.filesystem.usage - - gauge: - dataPoints: - - asInt: "409710592" + - asInt: "10690560" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 0200fab216c7ccb291be3d1799f862bd8488cb786b4b1f50abc4e778dc130a3d + stringValue: c9af4eaede7b980b1d771af749f2370e6d3a88e89abd198b7d6142a8b41a27e0 - key: customfield1 value: stringValue: customvalue1 @@ -5172,31 +8574,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kube-apiserver + stringValue: pod-w-index-w-ns-index - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: ns-w-index - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-apiserver-kind-control-plane + stringValue: pod-w-index-w-ns-index-j4lrn - key: k8s.pod.uid value: - stringValue: 51acb728ea8dc3a01b43334942491036 + stringValue: 2c4ad2c5-a791-466c-ae50-f8163efc385c - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "208982016" + - asInt: "13656064" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 021327b7039db9dd913ba2beb4625bf3f2d522805decbeb30d3844953d689895 + stringValue: cb1aba654caa6d98519882865244158df6203f599bb35ed6e9e45e71430f7cea - key: customfield1 value: stringValue: customvalue1 @@ -5214,7 +8616,7 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: otel-collector + stringValue: kube-rbac-proxy - key: k8s.namespace.name value: stringValue: default @@ -5223,22 +8625,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-znqzs + stringValue: sock-operator-786c6c9777-2flf7 - key: k8s.pod.uid value: - stringValue: 3f2777cd-e7ce-485d-a92c-43c0a2aadee5 + stringValue: d4fc43e2-a114-492f-b477-e0936d63dec4 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "20529152" + - asInt: "145403904" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 0a1903dcb4d0fdd4279b401318013e772a755df50a54714195cf5b81132ae688 + stringValue: d1cba7f3f652854d2dd9c41e7d0398fbc479188abcd88af5b9d9ca68327cf532 - key: customfield1 value: stringValue: customvalue1 @@ -5256,7 +8658,7 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: manager + stringValue: otel-collector - key: k8s.namespace.name value: stringValue: default @@ -5265,22 +8667,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-operator-6cf5597fb6-nkvgb + stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-69d74b99994lz9z - key: k8s.pod.uid value: - stringValue: 24d849c7-da2c-4598-a307-9deba65ed5a8 + stringValue: 24a76c39-d3a1-48dd-95cd-7f0196a84087 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "43945984" + - asInt: "167100416" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 0b7de24593b9e9b9c4dcb6424f1e829e3232aac96921586aa5b4876e34fffead + stringValue: dc8158388f0fb8b20fdfa3938940ab3deabb37cb5236eff283068b7349e976ef - key: customfield1 value: stringValue: customvalue1 @@ -5298,31 +8700,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: nodejs-test + stringValue: etcd - key: k8s.namespace.name value: - stringValue: default + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: nodejs-test-d454cf769-n4whd + stringValue: etcd-kind-control-plane - key: k8s.pod.uid value: - stringValue: 1f680502-7878-451f-8880-2ca25419ce4d + stringValue: a652c9e3a037b7be2906ed0ac7fea632 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "25894912" + - asInt: "42700800" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 0f663224843198b71d0acbd6738008173478ad97f79ed2411ba7a3d6014f69f4 + stringValue: df79bbf992582662811e9ddc802cc3fae756ac95e4a7cd4db76419e9e060dabc - key: customfield1 value: stringValue: customvalue1 @@ -5340,31 +8742,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: cert-manager-cainjector + stringValue: coredns - key: k8s.namespace.name value: - stringValue: cert-manager + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-cainjector-6cc9b5f678-zm6qg + stringValue: coredns-5dd5756b68-5rzdc - key: k8s.pod.uid value: - stringValue: c17bb001-2b34-4173-993b-47cc1d285f3d + stringValue: 1e801067-cdf8-4d39-adb7-f04927fcbe4d - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "19079168" + - asInt: "145186816" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 2118c058c65a24bcf948d3b082c7d24a957ee636d8bfc9a31dec1c700912daf2 + stringValue: e6d4b6ded54e9e103f3642b9d4a22c8b244462a8dc125ad1df1161e04c6ddeb7 - key: customfield1 value: stringValue: customvalue1 @@ -5382,31 +8784,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: coredns + stringValue: dotnet-test - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-5dd5756b68-ttp4s + stringValue: dotnet-test-5479c475fc-6bbpg - key: k8s.pod.uid value: - stringValue: 8a841abc-5346-4344-81d4-8fc298823892 + stringValue: b0a76cb7-8faa-47b3-a0e1-5aeee5544e98 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "54239232" + - asInt: "33562624" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 2188e5dbd44970ada6bb9b4adc1190534cec2034ed4bbbefe39b998b762c8090 + stringValue: e8e78be19779f3697b37013f8e63b4268e5520ab87a1ff95f6d2232116e1ad4f - key: customfield1 value: stringValue: customvalue1 @@ -5424,7 +8826,7 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kube-controller-manager + stringValue: kube-scheduler - key: k8s.namespace.name value: stringValue: kube-system @@ -5433,22 +8835,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-controller-manager-kind-control-plane + stringValue: kube-scheduler-kind-control-plane - key: k8s.pod.uid value: - stringValue: f98b4cfedb968e3c798bae6ff1faa58d + stringValue: df1f09f3a1f003b8825519a3341f69fe - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "84807680" + - asInt: "10686464" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 23c653fde53fcb3b3dd7f586ed165f5621f786f1bd68f9ed18caaa132177a05e + stringValue: e8ead1ee3bb65cb19abe591a137c5124683eef66f2faa499c07893928c3360e6 - key: customfield1 value: stringValue: customvalue1 @@ -5466,31 +8868,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: otel-collector + stringValue: pod-w-index-w-ns-exclude - key: k8s.namespace.name value: - stringValue: default + stringValue: ns-w-exclude - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-587576d5842876t + stringValue: pod-w-index-w-ns-exclude-44q2b - key: k8s.pod.uid value: - stringValue: 73bb8d19-b190-42e9-a77d-c84ef587bd02 + stringValue: 7491b268-a198-4dbc-8ed3-3a27a279a732 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "9072640" + - asInt: "33460224" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 3c4c69e6395265782987df7e1a9ad2ce992b5fdce80124c6608594c54d069b4a + stringValue: ea927afa8bdefb1693209fbbf133edbba8648c99a9b7d6b5fe87a18b5993c910 - key: customfield1 value: stringValue: customvalue1 @@ -5508,31 +8910,36 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: local-path-provisioner + stringValue: cert-manager-cainjector - key: k8s.namespace.name value: - stringValue: local-path-storage + stringValue: cert-manager - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: local-path-provisioner-6f8956fb48-4bmw5 + stringValue: cert-manager-cainjector-5c5695d979-hz2x4 - key: k8s.pod.uid value: - stringValue: 6099916d-c417-4145-b1e8-a68a083a4e36 + stringValue: 4d0463e6-e229-4f66-94f8-07e553dbb1bb - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "63619072" + name: container.memory.usage + - name: container_cpu_utilization + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "16" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 450386c43051a8d554ba7b41fb6fd35740fad2ee77f6a5be5f7d7b366934fa37 + stringValue: 0fafe483dffee884af267b7155e8d8b3fc0c04d8df26cb1efa7cb108cd352bcf - key: customfield1 value: stringValue: customvalue1 @@ -5550,31 +8957,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: etcd + stringValue: manager - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: etcd-kind-control-plane + stringValue: sock-operator-786c6c9777-2flf7 - key: k8s.pod.uid value: - stringValue: 8640ede3763d523342ced9ba7a5fd080 + stringValue: d4fc43e2-a114-492f-b477-e0936d63dec4 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "27291648" + - asInt: "34" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 47a8c0881856e8eebf6dead289f5605bf7e6e159131148313ff44b43f93792ab + stringValue: 222c940f309d9bcd66429c5239a6e16654ee8fcf97d67cc888cacddddd099f55 - key: customfield1 value: stringValue: customvalue1 @@ -5592,31 +8999,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: cert-manager-controller + stringValue: targetallocator - key: k8s.namespace.name value: - stringValue: cert-manager + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-5c9d8879fd-zqcdv + stringValue: sock-splunk-otel-collector-ta-7f6c9fdf4-kv6zv - key: k8s.pod.uid value: - stringValue: dd68fc3a-2f3f-440d-a7e7-618aebbf2090 + stringValue: 13d32d4e-5a6c-4f79-9f22-4a538e421a3c - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "17018880" + - asInt: "28" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 49285c01fa935e13e79252d57fb188b1c72b317328df453f855fd7307bf6166a + stringValue: 24b3f88060c9edadee5ff09089f3029a1a8b9f4d90db77b0298711bd01e90b72 - key: customfield1 value: stringValue: customvalue1 @@ -5634,31 +9041,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: coredns + stringValue: pod-w-index-wo-ns-index - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: ns-wo-index - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-5dd5756b68-x8jcx + stringValue: pod-w-index-wo-ns-index-c5xdz - key: k8s.pod.uid value: - stringValue: 662cb1aa-7d77-4a28-813a-3abe808e9a04 + stringValue: 7e87ed20-0fb1-47ad-850e-fd05ff32bf23 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "18698240" + - asInt: "30" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 4bd3dfa8576441902eb944836f6752aeb3d5fbd24b17e3984e9f88b205096130 + stringValue: 24befb850c6f1fb6b511631eec70e54a07a9b0f5ee40fb76f19da95f3b3a6c7f - key: customfield1 value: stringValue: customvalue1 @@ -5676,31 +9083,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kube-proxy + stringValue: pod-wo-index-wo-ns-index - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: ns-wo-index - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-proxy-zmds8 + stringValue: pod-wo-index-wo-ns-index-bp8hd - key: k8s.pod.uid value: - stringValue: 3252bcd4-1aa0-4a8d-ad6e-fa3cee960076 + stringValue: 2368457e-e367-44b8-90b0-78c751adb967 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "256647168" + - asInt: "32" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 63c80e0ba2de4a1f83678a6dd53d96b507a07a8a18a45b9819e10498c9d851f0 + stringValue: 33f86cf3de649408503e17237017b79e5cc4cd0b58ffb25cc924b4579963638c - key: customfield1 value: stringValue: customvalue1 @@ -5718,7 +9125,7 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: java-test + stringValue: prometheus-annotation-test - key: k8s.namespace.name value: stringValue: default @@ -5727,22 +9134,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: java-test-566495f78-6nhnc + stringValue: prometheus-annotation-test-cfc77c7b9-hlvbs - key: k8s.pod.uid value: - stringValue: fbc84ba5-3b1f-4754-b3f3-76e86eace74e + stringValue: 7c5e6a26-0c5b-4cc8-ade8-45efc11329d9 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "10100736" + - asInt: "1294" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 9eec542c85daa232c5488c4f587671a6cf01f52ccddf663abd74b8b2f158e962 + stringValue: 3f07c1373038d5f7474646dbb567c54ca699439c716bada8bfb0f3a35341890f - key: customfield1 value: stringValue: customvalue1 @@ -5760,7 +9167,7 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kube-rbac-proxy + stringValue: java-test - key: k8s.namespace.name value: stringValue: default @@ -5769,22 +9176,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-operator-6cf5597fb6-nkvgb + stringValue: java-test-5c4d6479b8-sdpd7 - key: k8s.pod.uid value: - stringValue: 24d849c7-da2c-4598-a307-9deba65ed5a8 + stringValue: a45051ae-1c81-47c0-8de0-e71dfe319bb4 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "16199680" + - asInt: "48844" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: cdf2eb0fde3a64bdcb49a31753d8030ee0d3c9d55e27099c582ed524e7fa58e3 + stringValue: 46787827ec56c0df1950e55b771be6ebe4028d9d1c15ad752c346798491ca2a9 - key: customfield1 value: stringValue: customvalue1 @@ -5802,31 +9209,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: cert-manager-webhook + stringValue: kube-controller-manager - key: k8s.namespace.name value: - stringValue: cert-manager + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-webhook-7bb7b75848-kbgq5 + stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: a21b8615-7e13-46d7-b4e8-8ae1e4a5c79f + stringValue: 78e1474ee2ca64de6cd3283ddf989107 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "21921792" + - asInt: "27" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: dd4fd2423e56794193a5ccb18b4fd39dbf27ed97eacb1f282e92d108bc0bb2e9 + stringValue: 4f8457613c97428a381ae6349ea7a9758e1184eea7addb8228ca73e450f01597 - key: customfield1 value: stringValue: customvalue1 @@ -5844,31 +9251,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kube-scheduler + stringValue: pod-wo-index-w-ns-index - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: ns-w-index - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-scheduler-kind-control-plane + stringValue: pod-wo-index-w-ns-index-7w4s8 - key: k8s.pod.uid value: - stringValue: 90194206b3ac749a8da43c61fd4761ca + stringValue: 173f0280-eafc-4c11-b149-b30baa2e1958 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "8232960" + - asInt: "3047" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: f42485d9874a9b0458270281ba2b5c76679bf622b30c92c54b696a5a81b5d7c0 + stringValue: 5b90948da2af5a7ec45193eaf7e8a763ac5724185a84c41182efc278ee245e1b - key: customfield1 value: stringValue: customvalue1 @@ -5886,7 +9293,7 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kindnet-cni + stringValue: kube-proxy - key: k8s.namespace.name value: stringValue: kube-system @@ -5895,27 +9302,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kindnet-wrk68 + stringValue: kube-proxy-zh9sm - key: k8s.pod.uid value: - stringValue: e72734dd-e62d-4528-b718-d79e757c0900 + stringValue: e049ac1e-d79b-4446-a27a-33db00ab5daf - key: os.type value: stringValue: linux timeUnixNano: "1000000" - name: container.memory.usage - - name: container_cpu_utilization - sum: - aggregationTemporality: 2 - dataPoints: - - asInt: "1690" + - asInt: "187" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 0200fab216c7ccb291be3d1799f862bd8488cb786b4b1f50abc4e778dc130a3d + stringValue: 6a5058048fcacfa65d6419d37f3e1377fd48c0eeb07afdf0f6eaefd8cdbf1d53 - key: customfield1 value: stringValue: customvalue1 @@ -5933,31 +9335,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kube-apiserver + stringValue: nodejs-test - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-apiserver-kind-control-plane + stringValue: nodejs-test-56b74df9ff-blxvb - key: k8s.pod.uid value: - stringValue: 51acb728ea8dc3a01b43334942491036 + stringValue: eade6268-3958-4711-8153-38c42d26d5e0 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "272" + - asInt: "25" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 021327b7039db9dd913ba2beb4625bf3f2d522805decbeb30d3844953d689895 + stringValue: 6d3225876c70f364fb49189bd9c20dd170fbc49c621b786b2a2c338334c3dd1a - key: customfield1 value: stringValue: customvalue1 @@ -5975,31 +9377,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: otel-collector + stringValue: pod-w-index-w-ns-exclude - key: k8s.namespace.name value: - stringValue: default + stringValue: ns-w-index - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-agent-znqzs + stringValue: pod-w-exclude-wo-ns-exclude-q7cb2 - key: k8s.pod.uid value: - stringValue: 3f2777cd-e7ce-485d-a92c-43c0a2aadee5 + stringValue: a5fddf35-a751-4dea-b45f-c7558fac023d - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "38" + - asInt: "1789" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 0a1903dcb4d0fdd4279b401318013e772a755df50a54714195cf5b81132ae688 + stringValue: 7f7bdf9904a22d51232483a680516023f4d4f53aa40535d3625e1786c0db784f - key: customfield1 value: stringValue: customvalue1 @@ -6017,31 +9419,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: manager + stringValue: local-path-provisioner - key: k8s.namespace.name value: - stringValue: default + stringValue: local-path-storage - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-operator-6cf5597fb6-nkvgb + stringValue: local-path-provisioner-6f8956fb48-tjnnh - key: k8s.pod.uid value: - stringValue: 24d849c7-da2c-4598-a307-9deba65ed5a8 + stringValue: 1a0fa2b5-48d1-4246-84f6-614a88fc0723 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "106" + - asInt: "1653" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 0b7de24593b9e9b9c4dcb6424f1e829e3232aac96921586aa5b4876e34fffead + stringValue: 84b1b7d1397ace112e05e999658b9d64c2e1b70bfd4e01c2728fe8a1ffc8848f - key: customfield1 value: stringValue: customvalue1 @@ -6059,31 +9461,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: nodejs-test + stringValue: cert-manager-webhook - key: k8s.namespace.name value: - stringValue: default + stringValue: cert-manager - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: nodejs-test-d454cf769-n4whd + stringValue: cert-manager-webhook-7f9f8648b9-ghhhm - key: k8s.pod.uid value: - stringValue: 1f680502-7878-451f-8880-2ca25419ce4d + stringValue: 1ebf5a38-5152-430a-80fa-3854278e6a26 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "35" + - asInt: "539" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 0f663224843198b71d0acbd6738008173478ad97f79ed2411ba7a3d6014f69f4 + stringValue: 907b3374be621b2f943096103fa14b993759f1a8b889a792a4906fef2bc21d7d - key: customfield1 value: stringValue: customvalue1 @@ -6101,31 +9503,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: cert-manager-cainjector + stringValue: otel-collector - key: k8s.namespace.name value: - stringValue: cert-manager + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-cainjector-6cc9b5f678-zm6qg + stringValue: sock-splunk-otel-collector-agent-lhkmv - key: k8s.pod.uid value: - stringValue: c17bb001-2b34-4173-993b-47cc1d285f3d + stringValue: 4d535893-9ff1-41a0-95cd-704934c3e539 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "44" + - asInt: "1159" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 2118c058c65a24bcf948d3b082c7d24a957ee636d8bfc9a31dec1c700912daf2 + stringValue: 94b6a464867b38cc97d28d9a6ff2101f6db08d9ab5b652fd95f5fb7b56719673 - key: customfield1 value: stringValue: customvalue1 @@ -6143,31 +9545,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: coredns + stringValue: cert-manager-controller - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: cert-manager - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-5dd5756b68-ttp4s + stringValue: cert-manager-67c98b89c8-dtt9m - key: k8s.pod.uid value: - stringValue: 8a841abc-5346-4344-81d4-8fc298823892 + stringValue: 2a18cace-4258-47cc-8724-68105e50a6b0 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "410" + - asInt: "1395" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 2188e5dbd44970ada6bb9b4adc1190534cec2034ed4bbbefe39b998b762c8090 + stringValue: 99c2ece6ee80be4814266e49ea7ddcc57b678344658f0f83780156b39d892999 - key: customfield1 value: stringValue: customvalue1 @@ -6185,7 +9587,7 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kube-controller-manager + stringValue: kindnet-cni - key: k8s.namespace.name value: stringValue: kube-system @@ -6194,22 +9596,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-controller-manager-kind-control-plane + stringValue: kindnet-v8s6x - key: k8s.pod.uid value: - stringValue: f98b4cfedb968e3c798bae6ff1faa58d + stringValue: b2d71d05-0357-4dc5-9630-50a2fdf3b9c8 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "87" + - asInt: "12054" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 23c653fde53fcb3b3dd7f586ed165f5621f786f1bd68f9ed18caaa132177a05e + stringValue: aa95d51f5ba3a56291b99a3c3f7d09cdcddaa40b64aeaf3ed078ef71c7192e55 - key: customfield1 value: stringValue: customvalue1 @@ -6227,31 +9629,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: otel-collector + stringValue: coredns - key: k8s.namespace.name value: - stringValue: default + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-587576d5842876t + stringValue: coredns-5dd5756b68-n2r77 - key: k8s.pod.uid value: - stringValue: 73bb8d19-b190-42e9-a77d-c84ef587bd02 + stringValue: e9eab946-2f83-4950-9331-8d2140b9bb27 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "17" + - asInt: "167948" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 3c4c69e6395265782987df7e1a9ad2ce992b5fdce80124c6608594c54d069b4a + stringValue: ac22883ea76623227dff15a22321c7c5306180d143e321cda5b3400952d0e14b - key: customfield1 value: stringValue: customvalue1 @@ -6269,31 +9671,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: local-path-provisioner + stringValue: kube-apiserver - key: k8s.namespace.name value: - stringValue: local-path-storage + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: local-path-provisioner-6f8956fb48-4bmw5 + stringValue: kube-apiserver-kind-control-plane - key: k8s.pod.uid value: - stringValue: 6099916d-c417-4145-b1e8-a68a083a4e36 + stringValue: 021ea1f4839617eef00a93c39d62616b - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "592" + - asInt: "28" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 450386c43051a8d554ba7b41fb6fd35740fad2ee77f6a5be5f7d7b366934fa37 + stringValue: c9af4eaede7b980b1d771af749f2370e6d3a88e89abd198b7d6142a8b41a27e0 - key: customfield1 value: stringValue: customvalue1 @@ -6311,31 +9713,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: etcd + stringValue: pod-w-index-w-ns-index - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: ns-w-index - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: etcd-kind-control-plane + stringValue: pod-w-index-w-ns-index-j4lrn - key: k8s.pod.uid value: - stringValue: 8640ede3763d523342ced9ba7a5fd080 + stringValue: 2c4ad2c5-a791-466c-ae50-f8163efc385c - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "59" + - asInt: "30" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 47a8c0881856e8eebf6dead289f5605bf7e6e159131148313ff44b43f93792ab + stringValue: cb1aba654caa6d98519882865244158df6203f599bb35ed6e9e45e71430f7cea - key: customfield1 value: stringValue: customvalue1 @@ -6353,31 +9755,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: cert-manager-controller + stringValue: kube-rbac-proxy - key: k8s.namespace.name value: - stringValue: cert-manager + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-5c9d8879fd-zqcdv + stringValue: sock-operator-786c6c9777-2flf7 - key: k8s.pod.uid value: - stringValue: dd68fc3a-2f3f-440d-a7e7-618aebbf2090 + stringValue: d4fc43e2-a114-492f-b477-e0936d63dec4 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "50" + - asInt: "126" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 49285c01fa935e13e79252d57fb188b1c72b317328df453f855fd7307bf6166a + stringValue: d1cba7f3f652854d2dd9c41e7d0398fbc479188abcd88af5b9d9ca68327cf532 - key: customfield1 value: stringValue: customvalue1 @@ -6395,31 +9797,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: coredns + stringValue: otel-collector - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-5dd5756b68-x8jcx + stringValue: sock-splunk-otel-collector-k8s-cluster-receiver-69d74b99994lz9z - key: k8s.pod.uid value: - stringValue: 662cb1aa-7d77-4a28-813a-3abe808e9a04 + stringValue: 24a76c39-d3a1-48dd-95cd-7f0196a84087 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "40" + - asInt: "60300" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 4bd3dfa8576441902eb944836f6752aeb3d5fbd24b17e3984e9f88b205096130 + stringValue: dc8158388f0fb8b20fdfa3938940ab3deabb37cb5236eff283068b7349e976ef - key: customfield1 value: stringValue: customvalue1 @@ -6437,7 +9839,7 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kube-proxy + stringValue: etcd - key: k8s.namespace.name value: stringValue: kube-system @@ -6446,22 +9848,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-proxy-zmds8 + stringValue: etcd-kind-control-plane - key: k8s.pod.uid value: - stringValue: 3252bcd4-1aa0-4a8d-ad6e-fa3cee960076 + stringValue: a652c9e3a037b7be2906ed0ac7fea632 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "1142" + - asInt: "13279" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 63c80e0ba2de4a1f83678a6dd53d96b507a07a8a18a45b9819e10498c9d851f0 + stringValue: df79bbf992582662811e9ddc802cc3fae756ac95e4a7cd4db76419e9e060dabc - key: customfield1 value: stringValue: customvalue1 @@ -6479,31 +9881,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: java-test + stringValue: coredns - key: k8s.namespace.name value: - stringValue: default + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: java-test-566495f78-6nhnc + stringValue: coredns-5dd5756b68-5rzdc - key: k8s.pod.uid value: - stringValue: fbc84ba5-3b1f-4754-b3f3-76e86eace74e + stringValue: 1e801067-cdf8-4d39-adb7-f04927fcbe4d - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "23" + - asInt: "495" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: 9eec542c85daa232c5488c4f587671a6cf01f52ccddf663abd74b8b2f158e962 + stringValue: e6d4b6ded54e9e103f3642b9d4a22c8b244462a8dc125ad1df1161e04c6ddeb7 - key: customfield1 value: stringValue: customvalue1 @@ -6521,7 +9923,7 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kube-rbac-proxy + stringValue: dotnet-test - key: k8s.namespace.name value: stringValue: default @@ -6530,22 +9932,22 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: sock-operator-6cf5597fb6-nkvgb + stringValue: dotnet-test-5479c475fc-6bbpg - key: k8s.pod.uid value: - stringValue: 24d849c7-da2c-4598-a307-9deba65ed5a8 + stringValue: b0a76cb7-8faa-47b3-a0e1-5aeee5544e98 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "22" + - asInt: "10951" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: cdf2eb0fde3a64bdcb49a31753d8030ee0d3c9d55e27099c582ed524e7fa58e3 + stringValue: e8e78be19779f3697b37013f8e63b4268e5520ab87a1ff95f6d2232116e1ad4f - key: customfield1 value: stringValue: customvalue1 @@ -6563,31 +9965,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: cert-manager-webhook + stringValue: kube-scheduler - key: k8s.namespace.name value: - stringValue: cert-manager + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: cert-manager-webhook-7bb7b75848-kbgq5 + stringValue: kube-scheduler-kind-control-plane - key: k8s.pod.uid value: - stringValue: a21b8615-7e13-46d7-b4e8-8ae1e4a5c79f + stringValue: df1f09f3a1f003b8825519a3341f69fe - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "168" + - asInt: "25" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: dd4fd2423e56794193a5ccb18b4fd39dbf27ed97eacb1f282e92d108bc0bb2e9 + stringValue: e8ead1ee3bb65cb19abe591a137c5124683eef66f2faa499c07893928c3360e6 - key: customfield1 value: stringValue: customvalue1 @@ -6605,31 +10007,31 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kube-scheduler + stringValue: pod-w-index-w-ns-exclude - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: ns-w-exclude - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-scheduler-kind-control-plane + stringValue: pod-w-index-w-ns-exclude-44q2b - key: k8s.pod.uid value: - stringValue: 90194206b3ac749a8da43c61fd4761ca + stringValue: 7491b268-a198-4dbc-8ed3-3a27a279a732 - key: os.type value: stringValue: linux timeUnixNano: "1000000" - - asInt: "20" + - asInt: "3153" attributes: - key: cluster_name value: stringValue: ci-k8s-cluster - key: container.id value: - stringValue: f42485d9874a9b0458270281ba2b5c76679bf622b30c92c54b696a5a81b5d7c0 + stringValue: ea927afa8bdefb1693209fbbf133edbba8648c99a9b7d6b5fe87a18b5993c910 - key: customfield1 value: stringValue: customvalue1 @@ -6647,19 +10049,19 @@ resourceMetrics: stringValue: dev-operator - key: k8s.container.name value: - stringValue: kindnet-cni + stringValue: cert-manager-cainjector - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: cert-manager - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kindnet-wrk68 + stringValue: cert-manager-cainjector-5c5695d979-hz2x4 - key: k8s.pod.uid value: - stringValue: e72734dd-e62d-4528-b718-d79e757c0900 + stringValue: 4d0463e6-e229-4f66-94f8-07e553dbb1bb - key: os.type value: stringValue: linux diff --git a/functional_tests/testdata/expected_kind_values/expected_nodejs_traces.yaml b/functional_tests/testdata/expected_kind_values/expected_nodejs_traces.yaml index 2c9459957..ac4d895cc 100644 --- a/functional_tests/testdata/expected_kind_values/expected_nodejs_traces.yaml +++ b/functional_tests/testdata/expected_kind_values/expected_nodejs_traces.yaml @@ -12,13 +12,13 @@ resourceSpans: stringValue: opentelemetry - key: telemetry.sdk.version value: - stringValue: 1.18.1 + stringValue: 1.26.0 - key: splunk.distro.version value: - stringValue: latest + stringValue: 2.12.0 - key: splunk.zc.method value: - stringValue: splunk-otel-js:latest + stringValue: splunk-otel-js:v2.12.0 - key: k8s.container.name value: stringValue: nodejs-test @@ -33,10 +33,10 @@ resourceSpans: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: nodejs-test-d454cf769-rq7fs + stringValue: nodejs-test-56b74df9ff-dvls2 - key: k8s.replicaset.name value: - stringValue: nodejs-test-d454cf769 + stringValue: nodejs-test-56b74df9ff - key: service.version value: stringValue: latest @@ -51,28 +51,46 @@ resourceSpans: stringValue: linux - key: os.version value: - stringValue: 6.5.11-linuxkit + stringValue: 6.10.0-linuxkit - key: process.pid value: intValue: "10" - key: process.executable.name value: stringValue: node + - key: process.executable.path + value: + stringValue: /usr/local/bin/node + - key: process.command_args + value: + arrayValue: + values: + - stringValue: /usr/local/bin/node + - stringValue: /index.js - key: process.runtime.version value: stringValue: 16.20.2 - key: process.runtime.name value: stringValue: nodejs - - key: k8s.pod.ip + - key: process.runtime.description value: - stringValue: 10.244.0.21 - - key: k8s.pod.uid + stringValue: Node.js + - key: process.command + value: + stringValue: /index.js + - key: process.owner value: - stringValue: 37458d11-a530-4752-9d26-db60433b2744 + stringValue: root + - key: k8s.pod.ip + value: + stringValue: 10.244.0.118 - key: k8s.pod.labels.app value: stringValue: nodejs-test + - key: k8s.pod.uid + value: + stringValue: 5e30d862-b449-41ad-963d-a10b6414336d - key: container.image.name value: stringValue: quay.io/splunko11ytest/nodejs_test @@ -81,7 +99,7 @@ resourceSpans: stringValue: latest - key: container.id value: - stringValue: dc62b8db1df2de43524c114c6b7e1f091b01e4b5079e964772bba0d5be37e415 + stringValue: 35dc5a29a73d04f4231e9f5768e61478fdb9dfd0b6f6f8876ce3beaf06bf1e8d - key: k8s.cluster.name value: stringValue: dev-operator @@ -101,7 +119,6 @@ resourceSpans: scopeSpans: - scope: name: '@opentelemetry/instrumentation-http' - version: 0.48.0 spans: - attributes: - key: http.url @@ -124,7 +141,223 @@ resourceSpans: stringValue: / - key: http.user_agent value: - stringValue: '*' + stringValue: curl/8.4.0 + - key: http.flavor + value: + stringValue: "1.1" + - key: net.transport + value: + stringValue: ip_tcp + - key: net.host.ip + value: + stringValue: 127.0.0.1 + - key: net.host.port + value: + intValue: "3000" + - key: net.peer.ip + value: + stringValue: 127.0.0.1 + - key: net.peer.port + value: + intValue: "43006" + - key: http.status_code + value: + intValue: "200" + - key: http.status_text + value: + stringValue: OK + endTimeUnixNano: "1726502403845388875" + kind: 2 + name: GET + parentSpanId: "" + spanId: 52062154b9ec5290 + startTimeUnixNano: "1726502403845000000" + status: {} + traceId: 7e5bef2baa2346db56e6a30517c71dd6 + - attributes: + - key: http.url + value: + stringValue: http://localhost:3000/ + - key: http.host + value: + stringValue: localhost:3000 + - key: net.host.name + value: + stringValue: localhost + - key: http.method + value: + stringValue: GET + - key: http.scheme + value: + stringValue: http + - key: http.target + value: + stringValue: / + - key: http.user_agent + value: + stringValue: curl/8.4.0 + - key: http.flavor + value: + stringValue: "1.1" + - key: net.transport + value: + stringValue: ip_tcp + - key: net.host.ip + value: + stringValue: 127.0.0.1 + - key: net.host.port + value: + intValue: "3000" + - key: net.peer.ip + value: + stringValue: 127.0.0.1 + - key: net.peer.port + value: + intValue: "43014" + - key: http.status_code + value: + intValue: "200" + - key: http.status_text + value: + stringValue: OK + endTimeUnixNano: "1726502404852339167" + kind: 2 + name: GET + parentSpanId: "" + spanId: 31da8ae156184ae0 + startTimeUnixNano: "1726502404852000000" + status: {} + traceId: aa565d9b13731fde2e28512e6623b6b6 + - attributes: + - key: http.url + value: + stringValue: http://localhost:3000/ + - key: http.host + value: + stringValue: localhost:3000 + - key: net.host.name + value: + stringValue: localhost + - key: http.method + value: + stringValue: GET + - key: http.scheme + value: + stringValue: http + - key: http.target + value: + stringValue: / + - key: http.user_agent + value: + stringValue: curl/8.4.0 + - key: http.flavor + value: + stringValue: "1.1" + - key: net.transport + value: + stringValue: ip_tcp + - key: net.host.ip + value: + stringValue: 127.0.0.1 + - key: net.host.port + value: + intValue: "3000" + - key: net.peer.ip + value: + stringValue: 127.0.0.1 + - key: net.peer.port + value: + intValue: "43018" + - key: http.status_code + value: + intValue: "200" + - key: http.status_text + value: + stringValue: OK + endTimeUnixNano: "1726502405859321125" + kind: 2 + name: GET + parentSpanId: "" + spanId: ffbefd6fc04ecab3 + startTimeUnixNano: "1726502405859000000" + status: {} + traceId: bfb8870087b0d9f03659681cbc30801b + - attributes: + - key: http.url + value: + stringValue: http://localhost:3000/ + - key: http.host + value: + stringValue: localhost:3000 + - key: net.host.name + value: + stringValue: localhost + - key: http.method + value: + stringValue: GET + - key: http.scheme + value: + stringValue: http + - key: http.target + value: + stringValue: / + - key: http.user_agent + value: + stringValue: curl/8.4.0 + - key: http.flavor + value: + stringValue: "1.1" + - key: net.transport + value: + stringValue: ip_tcp + - key: net.host.ip + value: + stringValue: 127.0.0.1 + - key: net.host.port + value: + intValue: "3000" + - key: net.peer.ip + value: + stringValue: 127.0.0.1 + - key: net.peer.port + value: + intValue: "43024" + - key: http.status_code + value: + intValue: "200" + - key: http.status_text + value: + stringValue: OK + endTimeUnixNano: "1726502406863394916" + kind: 2 + name: GET + parentSpanId: "" + spanId: 7f34b9ea0aa0e993 + startTimeUnixNano: "1726502406863000000" + status: {} + traceId: 1d3abed17ff80cd0e8ca9914dd545e94 + - attributes: + - key: http.url + value: + stringValue: http://localhost:3000/ + - key: http.host + value: + stringValue: localhost:3000 + - key: net.host.name + value: + stringValue: localhost + - key: http.method + value: + stringValue: GET + - key: http.scheme + value: + stringValue: http + - key: http.target + value: + stringValue: / + - key: http.user_agent + value: + stringValue: curl/8.4.0 - key: http.flavor value: stringValue: "1.1" @@ -142,16 +375,18 @@ resourceSpans: stringValue: 127.0.0.1 - key: net.peer.port value: - stringValue: '*' + intValue: "43036" - key: http.status_code value: intValue: "200" - key: http.status_text value: stringValue: OK + endTimeUnixNano: "1726502407872361666" kind: 2 name: GET parentSpanId: "" - spanId: "" + spanId: 304bd20b92f9c8ff + startTimeUnixNano: "1726502407872000000" status: {} - traceId: "" + traceId: 1e0ded79cf35b487d2f02207cfe58da4 diff --git a/functional_tests/testdata_configuration_switching/values/values_cluster_receiver_switching.yaml.tmpl b/functional_tests/testdata_configuration_switching/values/values_cluster_receiver_switching.yaml.tmpl new file mode 100644 index 000000000..a3fe9722b --- /dev/null +++ b/functional_tests/testdata_configuration_switching/values/values_cluster_receiver_switching.yaml.tmpl @@ -0,0 +1,32 @@ +--- +splunkPlatform: + token: foobar + endpoint: {{ .LogHecEndpoint }} + metricsEnabled: true + metricsIndex: myMetricsIndex + +logsCollection: + journald: + enabled: true + directory: /run/log/journal + +agent: + config: + exporters: + splunk_hec/platform_metrics: + endpoint: {{ .MetricHecEndpoint }} + +clusterReceiver: + enabled: {{ .ClusterReceiverEnabled }} + config: + exporters: + splunk_hec/platform_logs: + endpoint: {{ .LogObjectsHecEndpoint }} + k8sObjects: + - name: pods + mode: pull + interval: 5s + - name: events + mode: watch + +clusterName: dev-operator diff --git a/functional_tests/testdata_configuration_switching/values/values_indexes_switching.yaml.tmpl b/functional_tests/testdata_configuration_switching/values/values_indexes_switching.yaml.tmpl new file mode 100644 index 000000000..de8ce1bb5 --- /dev/null +++ b/functional_tests/testdata_configuration_switching/values/values_indexes_switching.yaml.tmpl @@ -0,0 +1,19 @@ +--- +splunkPlatform: + token: foobar + endpoint: {{ .LogHecEndpoint }} + metricsEnabled: true + logsEnabled: true + metricsIndex: {{ .MetricsIndex }} + index: {{ .LogsIndex }} + {{ if .NonDefaultSourcetype }} + sourcetype: {{ .Sourcetype }} + {{ end }} + +agent: + config: + exporters: + splunk_hec/platform_metrics: + endpoint: {{ .MetricHecEndpoint }} + +clusterName: dev-operator diff --git a/functional_tests/testdata_configuration_switching/values/values_logs_and_metrics_switching.yaml.tmpl b/functional_tests/testdata_configuration_switching/values/values_logs_and_metrics_switching.yaml.tmpl new file mode 100644 index 000000000..a9aba362f --- /dev/null +++ b/functional_tests/testdata_configuration_switching/values/values_logs_and_metrics_switching.yaml.tmpl @@ -0,0 +1,16 @@ +--- +splunkPlatform: + token: foobar + endpoint: {{ .LogHecEndpoint }} + metricsEnabled: {{ .MetricsEnabled }} + logsEnabled: {{ .LogsEnabled }} + metricsIndex: myMetricsIndex + +{{ if .MetricsEnabled }} +agent: + config: + exporters: + splunk_hec/platform_metrics: + endpoint: {{ .MetricHecEndpoint }} +{{ end }} +clusterName: dev-operator diff --git a/functional_tests/testdata_histogram/expected/v1.25/controller_manager_metrics.yaml b/functional_tests/testdata_histogram/expected/v1.25/controller_manager_metrics.yaml index 9e7d6c5b8..83643c700 100644 --- a/functional_tests/testdata_histogram/expected/v1.25/controller_manager_metrics.yaml +++ b/functional_tests/testdata_histogram/expected/v1.25/controller_manager_metrics.yaml @@ -21,7 +21,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 16076d7b-14ce-4010-82f3-a71bd8f4cf08 + stringValue: d6ccc2e8-b1ff-4544-84f5-8747faf8baba - key: net.host.name value: stringValue: 172.18.0.2 @@ -49,98 +49,224 @@ resourceMetrics: schemaUrl: https://opentelemetry.io/schemas/1.6.1 scopeMetrics: - metrics: - - description: Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running. + - description: Distribution of freed heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "7612" + - "238609" + - "125185" + - "91120" + - "46694" + - "13438" + - "5225" + - "1637" + - "1640" + - "274" + - "56" + - "84" + count: "531574" + explicitBounds: + - 8.999999999999998 + - 24.999999999999996 + - 64.99999999999999 + - 144.99999999999997 + - 320.99999999999994 + - 704.9999999999999 + - 1536.9999999999998 + - 3200.9999999999995 + - 6528.999999999999 + - 13568.999999999998 + - 27264.999999999996 + startTimeUnixNano: "1000000" + sum: 6.4207936e+07 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: go_gc_heap_frees_by_size_bytes_total + - description: '[ALPHA] Distribution of the remaining lifetime on the certificate used to authenticate a request.' histogram: aggregationTemporality: 2 dataPoints: - bucketCounts: - "0" - - "1040" - "0" - - "389" - - "1979" - - "2873" - - "782" - - "259" - - "16" - "0" - "0" - count: "7338" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" explicitBounds: - - -5e-324 - - 9.999999999999999e-10 - - 9.999999999999999e-09 - - 9.999999999999998e-08 - - 1.0239999999999999e-06 - - 1.0239999999999999e-05 - - 0.00010239999999999998 - - 0.0010485759999999998 - - 0.010485759999999998 - - 0.10485759999999998 + - 0 + - 1800 + - 3600 + - 7200 + - 21600 + - 43200 + - 86400 + - 172800 + - 345600 + - 604800 + - 2.592e+06 + - 7.776e+06 + - 1.5552e+07 + - 3.1104e+07 startTimeUnixNano: "1000000" - sum: NaN + sum: 0 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: go_sched_latencies_seconds - - description: '[ALPHA] Request latency in seconds. Broken down by verb, and host.' + name: apiserver_client_certificate_expiration_seconds + - description: '[ALPHA] Request latency in seconds. Broken down by status code.' histogram: aggregationTemporality: 2 dataPoints: - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: code value: - stringValue: GET + stringValue: "201" bucketCounts: - - "321" - - "186" - - "39" - - "11" - - "9" - - "2" - - "0" - "1" - "0" - "0" - "0" - "0" - "0" - count: "569" + - "0" + - "0" + - "0" + count: "1" explicitBounds: - - 0.005 - - 0.025 - - 0.1 - 0.25 - 0.5 + - 0.7 - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 1.5 + - 3 + - 5 + - 10 + startTimeUnixNano: "1000000" + sum: 0.001115911 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: apiserver_delegated_authz_request_duration_seconds + - description: '[ALPHA] Histogram of the number of seconds the last auth exec plugin client certificate lived before being rotated. If auth exec plugin client certificates are unused, histogram will contain no data.' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + explicitBounds: + - 600 + - 1800 + - 3600 + - 14400 + - 86400 + - 604800 + - 2.592e+06 + - 7.776e+06 + - 1.5552e+07 + - 3.1104e+07 + - 1.24416e+08 startTimeUnixNano: "1000000" - sum: 14.302989660999994 + sum: 0 timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: rest_client_exec_plugin_certificate_rotation_age + - description: '[ALPHA] Number of namespace syncs happened in root ca cert publisher.' + histogram: + aggregationTemporality: 2 + dataPoints: - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: code value: - stringValue: PATCH + stringValue: "200" bucketCounts: - - "9" - - "5" - "0" - "0" + - "4" - "1" - - "3" + - "0" + - "0" + - "1" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + count: "6" + explicitBounds: + - 0.001 + - 0.002 + - 0.004 + - 0.008 + - 0.016 + - 0.032 + - 0.064 + - 0.128 + - 0.256 + - 0.512 + - 1.024 + - 2.048 + - 4.096 + - 8.192 + - 16.384 + startTimeUnixNano: "1000000" + sum: 0.05278572 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: root_ca_cert_publisher_sync_duration_seconds + - description: '[ALPHA] A metric measuring the latency for updating each load balancer hosts.' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" - "0" - "0" - "0" @@ -148,37 +274,48 @@ resourceMetrics: - "0" - "0" - "0" - count: "18" explicitBounds: - - 0.005 - - 0.025 - - 0.1 - - 0.25 - - 0.5 - 1 - 2 - 4 - 8 - - 15 - - 30 - - 60 + - 16 + - 32 + - 64 + - 128 + - 256 + - 512 + - 1024 + - 2048 + - 4096 + - 8192 + - 16384 startTimeUnixNano: "1000000" - sum: 2.578822609999999 + sum: 0 timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: service_controller_update_loadbalancer_host_latency_seconds + - description: '[ALPHA] Client side rate limiter latency in seconds. Broken down by verb, and host.' + histogram: + aggregationTemporality: 2 + dataPoints: - attributes: - key: host value: stringValue: 172.18.0.2:6443 - key: verb value: - stringValue: POST + stringValue: GET bucketCounts: - - "55" - - "13" - - "15" + - "581" + - "0" + - "48" + - "3" - "5" - - "10" - - "6" + - "2" - "0" - "0" - "0" @@ -186,7 +323,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "104" + count: "639" explicitBounds: - 0.005 - 0.025 @@ -201,7 +338,7 @@ resourceMetrics: - 30 - 60 startTimeUnixNano: "1000000" - sum: 9.390939016999999 + sum: 6.003480313 timeUnixNano: "1000000" - attributes: - key: host @@ -209,14 +346,11 @@ resourceMetrics: stringValue: 172.18.0.2:6443 - key: verb value: - stringValue: PUT + stringValue: PATCH bucketCounts: - - "91" - - "39" - - "3" + - "16" - "0" - "0" - - "1" - "0" - "0" - "0" @@ -224,7 +358,10 @@ resourceMetrics: - "0" - "0" - "0" - count: "134" + - "0" + - "0" + - "0" + count: "16" explicitBounds: - 0.005 - 0.025 @@ -239,23 +376,19 @@ resourceMetrics: - 30 - 60 startTimeUnixNano: "1000000" - sum: 1.3560852340000007 + sum: 2.1541e-05 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: rest_client_request_duration_seconds - - description: '[ALPHA] The time it took to delete the job since it became eligible for deletion' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "0" + - attributes: + - key: host + value: + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: POST + bucketCounts: + - "80" + - "2" + - "19" - "0" - "0" - "0" @@ -266,37 +399,32 @@ resourceMetrics: - "0" - "0" - "0" + count: "101" explicitBounds: + - 0.005 + - 0.025 - 0.1 - - 0.2 - - 0.4 - - 0.8 - - 1.6 - - 3.2 - - 6.4 - - 12.8 - - 25.6 - - 51.2 - - 102.4 - - 204.8 - - 409.6 - - 819.2 + - 0.25 + - 0.5 + - 1 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0 + sum: 1.0515303650000007 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: ttl_after_finished_controller_job_deletion_duration_seconds - - description: '[ALPHA] Latencies in seconds of data encryption key(DEK) generation operations.' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "0" - - "0" - - "0" + - attributes: + - key: host + value: + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: PUT + bucketCounts: + - "149" - "0" - "0" - "0" @@ -309,29 +437,28 @@ resourceMetrics: - "0" - "0" - "0" + count: "149" explicitBounds: - - 5e-06 - - 1e-05 - - 2e-05 - - 4e-05 - - 8e-05 - - 0.00016 - - 0.00032 - - 0.00064 - - 0.00128 - - 0.00256 - - 0.00512 - - 0.01024 - - 0.02048 - - 0.04096 + - 0.005 + - 0.025 + - 0.1 + - 0.25 + - 0.5 + - 1 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.00021824399999999993 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: apiserver_storage_data_key_generation_duration_seconds + name: rest_client_rate_limiter_duration_seconds - description: '[ALPHA] Authentication duration in seconds broken out by result.' histogram: aggregationTemporality: 2 @@ -341,7 +468,7 @@ resourceMetrics: value: stringValue: success bucketCounts: - - "13" + - "16" - "0" - "0" - "0" @@ -357,7 +484,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "13" + count: "16" explicitBounds: - 0.001 - 0.002 @@ -375,14 +502,14 @@ resourceMetrics: - 8.192 - 16.384 startTimeUnixNano: "1000000" - sum: 0.000339893 + sum: 0.00042820699999999995 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram name: authentication_duration_seconds - - description: '[ALPHA] Distribution of the remaining lifetime on the certificate used to authenticate a request.' + - description: '[ALPHA] Time between when a cronjob is scheduled to be run, and when the corresponding job is created' histogram: aggregationTemporality: 2 dataPoints: @@ -398,25 +525,17 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "0" - - "0" - - "0" explicitBounds: - - 0 - - 1800 - - 3600 - - 7200 - - 21600 - - 43200 - - 86400 - - 172800 - - 345600 - - 604800 - - 2.592e+06 - - 7.776e+06 - - 1.5552e+07 - - 3.1104e+07 + - 1 + - 2 + - 4 + - 8 + - 16 + - 32 + - 64 + - 128 + - 256 + - 512 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" @@ -424,103 +543,276 @@ resourceMetrics: - key: prometheus.type value: stringValue: histogram - name: apiserver_client_certificate_expiration_seconds - - description: '[ALPHA] How long in seconds processing an item from workqueue takes.' + name: cronjob_controller_cronjob_job_creation_skew_duration_seconds + - description: '[ALPHA] Latencies in seconds of data encryption key(DEK) generation operations.' histogram: aggregationTemporality: 2 dataPoints: - - attributes: - - key: name - value: - stringValue: ClusterRoleAggregator - bucketCounts: + - bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" - "0" - "0" - "0" - "0" - "0" - - "47" - - "12" - "0" - - "3" - "0" - "0" - count: "62" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 + - 5e-06 + - 1e-05 + - 2e-05 + - 4e-05 + - 8e-05 + - 0.00016 + - 0.00032 + - 0.00064 + - 0.00128 + - 0.00256 + - 0.00512 + - 0.01024 + - 0.02048 + - 0.04096 startTimeUnixNano: "1000000" - sum: 2.1239064930000002 + sum: 0 timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: DynamicCABundle-client-ca-bundle - bucketCounts: + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: apiserver_storage_data_key_generation_duration_seconds + - description: '[ALPHA] Number of endpoints removed on each Service sync' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "18" - "0" - "0" - "0" - "0" - - "1" - "0" - "0" - "0" - "0" - "0" - "0" - count: "1" + - "0" + - "0" + - "0" + - "0" + - "0" + count: "18" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 + - 2 + - 4 + - 8 + - 16 + - 32 + - 64 + - 128 + - 256 + - 512 + - 1024 + - 2048 + - 4096 + - 8192 + - 16384 + - 32768 startTimeUnixNano: "1000000" - sum: 2.8783e-05 + sum: 0 timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: DynamicCABundle-csr-controller - bucketCounts: + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: endpoint_slice_controller_endpoints_removed_per_sync + - description: '[ALPHA] Duration of syncEndpoints() in seconds' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "11" + - "0" + - "0" + - "0" - "0" - "0" - "0" - "0" - "0" - - "5" - "0" - "0" - "0" - "0" - "0" - count: "5" + - "0" + - "0" + count: "11" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 + - 0.001 + - 0.002 + - 0.004 + - 0.008 + - 0.016 + - 0.032 + - 0.064 + - 0.128 + - 0.256 + - 0.512 + - 1.024 + - 2.048 + - 4.096 + - 8.192 + - 16.384 + startTimeUnixNano: "1000000" + sum: 0 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: endpoint_slice_mirroring_controller_endpoints_sync_duration + - description: Distribution of heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "11087" + - "272841" + - "157529" + - "107198" + - "54116" + - "17250" + - "5953" + - "1962" + - "1771" + - "389" + - "255" + - "186" + count: "630537" + explicitBounds: + - 8.999999999999998 + - 24.999999999999996 + - 64.99999999999999 + - 144.99999999999997 + - 320.99999999999994 + - 704.9999999999999 + - 1536.9999999999998 + - 3200.9999999999995 + - 6528.999999999999 + - 13568.999999999998 + - 27264.999999999996 + startTimeUnixNano: "1000000" + sum: 8.3264944e+07 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: go_gc_heap_allocs_by_size_bytes_total + - description: '[ALPHA] How long in seconds an item stays in workqueue before being requested.' + histogram: + aggregationTemporality: 2 + dataPoints: + - attributes: + - key: name + value: + stringValue: ClusterRoleAggregator + bucketCounts: + - "0" + - "0" + - "1" + - "17" + - "6" + - "21" + - "10" + - "5" + - "0" + - "0" + - "0" + count: "60" + explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 + startTimeUnixNano: "1000000" + sum: 0.33132451600000035 + timeUnixNano: "1000000" + - attributes: + - key: name + value: + stringValue: DynamicCABundle-client-ca-bundle + bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" + - "1" + - "0" + - "0" + - "0" + - "0" + - "0" + count: "1" + explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 + startTimeUnixNano: "1000000" + sum: 0.000447509 + timeUnixNano: "1000000" + - attributes: + - key: name + value: + stringValue: DynamicCABundle-csr-controller + bucketCounts: + - "0" + - "0" + - "0" + - "1" + - "5" + - "1" + - "0" + - "0" + - "0" + - "0" + - "0" + count: "7" + explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 - 0.001 - 0.01 - 0.1 - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.000693797 + sum: 0.000410302 timeUnixNano: "1000000" - attributes: - key: name @@ -551,7 +843,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 2.111e-05 + sum: 5.4803e-05 timeUnixNano: "1000000" - attributes: - key: name @@ -561,8 +853,8 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "3" + - "2" + - "1" - "0" - "0" - "0" @@ -582,7 +874,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 4.286e-05 + sum: 5.5233000000000005e-05 timeUnixNano: "1000000" - attributes: - key: name @@ -593,13 +885,13 @@ resourceMetrics: - "0" - "0" - "0" - - "1" - "0" + - "1" - "0" - "0" - - "1" - "0" - "0" + - "1" count: "2" explicitBounds: - 1e-08 @@ -613,7 +905,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.653536928 + sum: 13.201058558 timeUnixNano: "1000000" - attributes: - key: name @@ -623,12 +915,12 @@ resourceMetrics: - "0" - "0" - "0" - - "25" - - "1" - - "2" - - "1" + - "19" + - "0" - "1" - "0" + - "10" + - "0" - "0" - "0" count: "30" @@ -644,7 +936,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.025223884999999998 + sum: 0.6612735279999998 timeUnixNano: "1000000" - attributes: - key: name @@ -711,18 +1003,18 @@ resourceMetrics: value: stringValue: daemonset bucketCounts: - - "0" - - "0" - "0" - "0" - "0" - "10" - - "3" + - "1" + - "1" + - "2" - "2" - "2" - "0" - "0" - count: "17" + count: "18" explicitBounds: - 1e-08 - 1e-07 @@ -735,7 +1027,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 1.3980634829999998 + sum: 0.27679710300000004 timeUnixNano: "1000000" - attributes: - key: name @@ -745,12 +1037,12 @@ resourceMetrics: - "0" - "0" - "0" + - "16" + - "3" - "0" + - "7" + - "11" - "0" - - "17" - - "12" - - "6" - - "2" - "0" - "0" count: "37" @@ -766,7 +1058,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 1.5891403840000007 + sum: 0.212220942 timeUnixNano: "1000000" - attributes: - key: name @@ -836,15 +1128,15 @@ resourceMetrics: - "0" - "0" - "0" + - "7" + - "3" - "1" - - "5" - "0" - - "8" - - "1" - - "1" + - "2" - "0" - "0" - count: "16" + - "0" + count: "13" explicitBounds: - 1e-08 - 1e-07 @@ -857,7 +1149,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.2739903800000001 + sum: 0.15044078299999997 timeUnixNano: "1000000" - attributes: - key: name @@ -867,15 +1159,15 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "6" - - "4" - - "8" - - "1" + - "16" - "1" - "0" - "0" - count: "20" + - "2" + - "0" + - "0" + - "0" + count: "19" explicitBounds: - 1e-08 - 1e-07 @@ -888,7 +1180,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.7529733830000002 + sum: 0.05013861399999999 timeUnixNano: "1000000" - attributes: - key: name @@ -898,11 +1190,11 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "11" + - "10" - "0" - "0" - "0" + - "1" - "0" - "0" - "0" @@ -919,7 +1211,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.000218359 + sum: 0.07505002100000001 timeUnixNano: "1000000" - attributes: - key: name @@ -959,15 +1251,15 @@ resourceMetrics: - "0" - "0" - "0" - - "1" - "0" - "0" - "0" - "0" - "0" + - "3" - "0" - "0" - count: "1" + count: "3" explicitBounds: - 1e-08 - 1e-07 @@ -980,7 +1272,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 7.634e-06 + sum: 1.9690926960000001 timeUnixNano: "1000000" - attributes: - key: name @@ -1019,16 +1311,16 @@ resourceMetrics: bucketCounts: - "0" - "0" - - "20" - - "530" - - "69" - - "1" + - "4" + - "447" + - "200" + - "2" - "1" - "0" - "0" - "0" - "0" - count: "621" + count: "654" explicitBounds: - 1e-08 - 1e-07 @@ -1041,7 +1333,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.005338852999999999 + sum: 0.013277779000000005 timeUnixNano: "1000000" - attributes: - key: name @@ -1171,15 +1463,15 @@ resourceMetrics: - "0" - "0" - "0" - - "7" + - "5" - "1" - "0" - - "1" - "0" + - "1" - "0" - "0" - "0" - count: "9" + count: "7" explicitBounds: - 1e-08 - 1e-07 @@ -1192,7 +1484,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.007747187 + sum: 0.04661130600000001 timeUnixNano: "1000000" - attributes: - key: name @@ -1202,11 +1494,11 @@ resourceMetrics: - "0" - "0" - "0" - - "2" - - "11" - - "0" - "0" + - "5" + - "4" - "0" + - "4" - "0" - "0" - "0" @@ -1223,7 +1515,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.000312981 + sum: 0.337031921 timeUnixNano: "1000000" - attributes: - key: name @@ -1234,14 +1526,14 @@ resourceMetrics: - "0" - "0" - "0" - - "2" - - "1" - "0" - "0" - "0" + - "1" + - "0" - "0" - "0" - count: "3" + count: "1" explicitBounds: - 1e-08 - 1e-07 @@ -1254,7 +1546,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.000299154 + sum: 0.050201209 timeUnixNano: "1000000" - attributes: - key: name @@ -1264,11 +1556,11 @@ resourceMetrics: - "0" - "0" - "0" - - "17" - - "1" - - "4" + - "15" + - "0" - "0" - "0" + - "7" - "0" - "0" - "0" @@ -1285,7 +1577,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.001146226 + sum: 0.47183939 timeUnixNano: "1000000" - attributes: - key: name @@ -1415,15 +1707,15 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "25" - - "1" - - "11" + - "21" - "4" - - "2" + - "5" + - "5" + - "5" - "0" - "0" - count: "43" + - "0" + count: "40" explicitBounds: - 1e-08 - 1e-07 @@ -1436,7 +1728,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.5348625370000002 + sum: 0.09813436600000003 timeUnixNano: "1000000" - attributes: - key: name @@ -1566,12 +1858,12 @@ resourceMetrics: - "0" - "0" - "0" + - "1" - "0" - "0" - "0" - - "5" - - "0" - "1" + - "4" - "0" - "0" count: "6" @@ -1587,7 +1879,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.37614404599999995 + sum: 0.49406862700000004 timeUnixNano: "1000000" - attributes: - key: name @@ -1627,12 +1919,12 @@ resourceMetrics: - "0" - "0" - "0" + - "1" - "0" - "0" - "0" - "5" - "0" - - "1" - "0" - "0" count: "6" @@ -1648,7 +1940,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.5661856769999999 + sum: 0.147684645 timeUnixNano: "1000000" - attributes: - key: name @@ -1688,11 +1980,11 @@ resourceMetrics: - "0" - "0" - "0" - - "43" - - "3" - - "0" + - "32" + - "1" - "0" - "0" + - "13" - "0" - "0" - "0" @@ -1709,7 +2001,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.0002596959999999999 + sum: 0.8889365320000002 timeUnixNano: "1000000" - attributes: - key: name @@ -1780,8 +2072,8 @@ resourceMetrics: - "0" - "0" - "0" - - "1" - "0" + - "1" - "0" - "0" - "0" @@ -1800,7 +2092,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 3.5476e-05 + sum: 0.00028752 timeUnixNano: "1000000" - attributes: - key: name @@ -1841,14 +2133,14 @@ resourceMetrics: - "0" - "0" - "4" - - "2" - "0" - "1" - "0" - "1" - "0" - "0" - count: "8" + - "0" + count: "6" explicitBounds: - 1e-08 - 1e-07 @@ -1861,7 +2153,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.465505962 + sum: 0.076594883 timeUnixNano: "1000000" - attributes: - key: name @@ -1927,52 +2219,48 @@ resourceMetrics: - key: prometheus.type value: stringValue: histogram - name: workqueue_work_duration_seconds - - description: '[ALPHA] Request latency in seconds. Broken down by status code.' + name: workqueue_queue_duration_seconds + - description: Distribution individual GC-related stop-the-world pause latencies. histogram: aggregationTemporality: 2 dataPoints: - - attributes: - - key: code - value: - stringValue: "201" - bucketCounts: - - "1" + - bucketCounts: - "0" - "0" - "0" - "0" - "0" + - "9" + - "13" + - "4" - "0" - "0" - "0" - count: "1" + count: "26" explicitBounds: - - 0.25 - - 0.5 - - 0.7 - - 1 - - 1.5 - - 3 - - 5 - - 10 + - -5e-324 + - 9.999999999999999e-10 + - 9.999999999999999e-09 + - 9.999999999999998e-08 + - 1.0239999999999999e-06 + - 1.0239999999999999e-05 + - 0.00010239999999999998 + - 0.0010485759999999998 + - 0.010485759999999998 + - 0.10485759999999998 startTimeUnixNano: "1000000" - sum: 0.001235711 + sum: NaN timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: apiserver_delegated_authz_request_duration_seconds - - description: '[ALPHA] Number of endpoints removed on each Service sync' + name: go_gc_pauses_seconds_total + - description: '[ALPHA] The ratio of chosen deleted pod''s ages to the current youngest pod''s age (at the time). Should be <2.The intent of this metric is to measure the rough efficacy of the LogarithmicScaleDown feature gate''s effect onthe sorting (and deletion) of pods when a replicaset scales down. This only considers Ready pods when calculating and reporting.' histogram: aggregationTemporality: 2 dataPoints: - bucketCounts: - - "19" - - "0" - - "0" - - "0" - "0" - "0" - "0" @@ -1980,28 +2268,43 @@ resourceMetrics: - "0" - "0" - "0" + explicitBounds: + - 0.25 + - 0.5 + - 1 + - 2 + - 4 + - 8 + startTimeUnixNano: "1000000" + sum: 0 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: replicaset_controller_sorting_deletion_age_ratio + - description: '[ALPHA] Number of endpoints added on each Service sync' + histogram: + aggregationTemporality: 2 + dataPoints: + - attributes: + - key: clusterCIDR + value: + stringValue: 10.244.0.0/16 + bucketCounts: + - "1" - "0" - "0" - "0" - "0" - "0" - count: "19" + count: "1" explicitBounds: - - 2 - - 4 - - 8 - - 16 - - 32 - - 64 - - 128 - - 256 - - 512 - - 1024 - - 2048 - - 4096 - - 8192 - - 16384 - - 32768 + - 1 + - 5 + - 25 + - 125 + - 625 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" @@ -2009,7 +2312,7 @@ resourceMetrics: - key: prometheus.type value: stringValue: histogram - name: endpoint_slice_controller_endpoints_removed_per_sync + name: node_ipam_controller_cidrset_allocation_tries_per_request - description: '[ALPHA] Request latency in seconds. Broken down by status code.' histogram: aggregationTemporality: 2 @@ -2039,69 +2342,66 @@ resourceMetrics: - 5 - 10 startTimeUnixNano: "1000000" - sum: 0.005255388 + sum: 0.005669137 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram name: apiserver_delegated_authn_request_duration_seconds - - description: '[ALPHA] A metric measuring the latency for updating each load balancer hosts.' + - description: '[ALPHA] Request latency in seconds. Broken down by verb, and host.' histogram: aggregationTemporality: 2 dataPoints: - - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" + - attributes: + - key: host + value: + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: GET + bucketCounts: + - "428" + - "150" + - "47" + - "5" + - "6" + - "2" - "0" + - "1" - "0" - "0" - "0" - "0" - "0" + count: "639" explicitBounds: + - 0.005 + - 0.025 + - 0.1 + - 0.25 + - 0.5 - 1 - 2 - 4 - 8 - - 16 - - 32 - - 64 - - 128 - - 256 - - 512 - - 1024 - - 2048 - - 4096 - - 8192 - - 16384 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0 + sum: 12.581361487000006 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: service_controller_update_loadbalancer_host_latency_seconds - - description: '[ALPHA] A metric measuring the latency for nodesync which updates loadbalancer hosts on cluster node updates.' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" + - attributes: + - key: host + value: + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: PATCH + bucketCounts: + - "2" + - "11" + - "3" - "0" - "0" - "0" @@ -2112,256 +2412,303 @@ resourceMetrics: - "0" - "0" - "0" + count: "16" explicitBounds: + - 0.005 + - 0.025 + - 0.1 + - 0.25 + - 0.5 - 1 - 2 - 4 - 8 - - 16 - - 32 - - 64 - - 128 - - 256 - - 512 - - 1024 - - 2048 - - 4096 - - 8192 - - 16384 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.218911124 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: service_controller_nodesync_latency_seconds - - description: '[ALPHA] How long in seconds an item stays in workqueue before being requested.' - histogram: - aggregationTemporality: 2 - dataPoints: - attributes: - - key: name + - key: host value: - stringValue: ClusterRoleAggregator + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: POST bucketCounts: + - "47" + - "33" + - "18" + - "3" + - "0" - "0" - "0" - "0" - - "20" - - "9" - - "20" - - "10" - - "3" - "0" - "0" - "0" - count: "62" + - "0" + - "0" + count: "101" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0.09050835599999994 + sum: 1.727318864 timeUnixNano: "1000000" - attributes: - - key: name + - key: host value: - stringValue: DynamicCABundle-client-ca-bundle + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: PUT bucketCounts: + - "86" + - "62" + - "1" - "0" - "0" - "0" - "0" - - "1" - "0" - "0" - "0" - "0" - "0" - "0" - count: "1" + count: "149" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 4.4363e-05 + sum: 0.919322586 timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: DynamicCABundle-csr-controller - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "2" - - "1" - - "0" - - "2" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: rest_client_request_duration_seconds + - description: Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running. + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: - "0" + - "1242" + - "0" + - "808" + - "2109" + - "3051" + - "1244" + - "311" + - "33" - "0" - "0" - count: "5" + count: "8798" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 + - -5e-324 + - 9.999999999999999e-10 + - 9.999999999999999e-09 + - 9.999999999999998e-08 + - 1.0239999999999999e-06 + - 1.0239999999999999e-05 + - 0.00010239999999999998 + - 0.0010485759999999998 + - 0.010485759999999998 + - 0.10485759999999998 startTimeUnixNano: "1000000" - sum: 0.025584968 + sum: NaN timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: go_sched_latencies_seconds + - description: '[ALPHA] Request size in bytes. Broken down by verb and host.' + histogram: + aggregationTemporality: 2 + dataPoints: - attributes: - - key: name + - key: host value: - stringValue: DynamicCABundle-request-header + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: GET bucketCounts: + - "639" - "0" - "0" - "0" - "0" - "0" - - "1" - "0" - "0" - "0" - "0" - "0" - count: "1" + - "0" + count: "639" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 + - 64 + - 256 + - 512 + - 1024 + - 4096 + - 16384 + - 65536 + - 262144 + - 1.048576e+06 + - 4.194304e+06 + - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 0.000175388 + sum: 0 timeUnixNano: "1000000" - attributes: - - key: name + - key: host value: - stringValue: DynamicServingCertificateController + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: PATCH bucketCounts: - - "0" - - "0" - - "0" + - "1" - "2" - "1" - "0" + - "8" + - "4" - "0" - "0" - "0" - "0" - "0" - count: "3" + - "0" + count: "16" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 + - 64 + - 256 + - 512 + - 1024 + - 4096 + - 16384 + - 65536 + - 262144 + - 1.048576e+06 + - 4.194304e+06 + - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 3.4063e-05 + sum: 41067 timeUnixNano: "1000000" - attributes: - - key: name + - key: host value: - stringValue: bootstrap_signer_queue + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: POST bucketCounts: + - "1" + - "54" + - "19" + - "7" + - "19" + - "1" - "0" - "0" - "0" - "0" - "0" - - "1" - - "0" - - "0" - - "1" - - "0" - "0" - count: "2" + count: "101" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 + - 64 + - 256 + - 512 + - 1024 + - 4096 + - 16384 + - 65536 + - 262144 + - 1.048576e+06 + - 4.194304e+06 + - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 0.20089047799999998 + sum: 49870 timeUnixNano: "1000000" - attributes: - - key: name + - key: host value: - stringValue: certificate + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: PUT bucketCounts: - "0" - "0" + - "79" + - "10" + - "38" + - "22" - "0" - - "19" - - "1" - "0" - "0" - - "10" - "0" - "0" - "0" - count: "30" + count: "149" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 + - 64 + - 256 + - 512 + - 1024 + - 4096 + - 16384 + - 65536 + - 262144 + - 1.048576e+06 + - 4.194304e+06 + - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 0.4955435670000001 + sum: 285137 timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: rest_client_request_size_bytes + - description: '[ALPHA] ' + histogram: + aggregationTemporality: 2 + dataPoints: - attributes: - - key: name + - key: status value: - stringValue: claims + stringValue: miss bucketCounts: + - "1" - "0" - "0" - "0" @@ -2373,25 +2720,34 @@ resourceMetrics: - "0" - "0" - "0" + count: "1" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 + - 0.005 - 0.01 + - 0.025 + - 0.05 - 0.1 + - 0.25 + - 0.5 - 1 + - 2.5 + - 5 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.005 timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: cronjob - bucketCounts: + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: authentication_token_cache_request_duration_seconds + - description: '[ALPHA] Number of endpoints added on each Service sync' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "18" + - "0" - "0" - "0" - "0" @@ -2403,37 +2759,55 @@ resourceMetrics: - "0" - "0" - "0" + - "0" + - "0" + - "0" + count: "18" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 + - 2 + - 4 + - 8 + - 16 + - 32 + - 64 + - 128 + - 256 + - 512 + - 1024 + - 2048 + - 4096 + - 8192 + - 16384 + - 32768 startTimeUnixNano: "1000000" - sum: 0 + sum: 4 timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: endpoint_slice_controller_endpoints_added_per_sync + - description: '[ALPHA] How long in seconds processing an item from workqueue takes.' + histogram: + aggregationTemporality: 2 + dataPoints: - attributes: - key: name value: - stringValue: daemonset + stringValue: ClusterRoleAggregator bucketCounts: - "0" - "0" - "0" - - "7" - - "2" - - "2" - - "3" + - "0" - "1" - - "2" + - "39" + - "13" + - "7" + - "0" - "0" - "0" - count: "17" + count: "60" explicitBounds: - 1e-08 - 1e-07 @@ -2446,25 +2820,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.404680016 + sum: 0.22590471300000006 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: deployment + stringValue: DynamicCABundle-client-ca-bundle bucketCounts: - "0" - "0" - "0" - - "20" - - "5" - - "3" - - "4" - - "5" + - "0" + - "1" - "0" - "0" - "0" - count: "37" + - "0" + - "0" + - "0" + count: "1" explicitBounds: - 1e-08 - 1e-07 @@ -2477,24 +2851,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.28344076399999996 + sum: 2.6189e-05 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: disruption + stringValue: DynamicCABundle-csr-controller bucketCounts: - "0" - "0" - "0" - "0" - "0" + - "7" - "0" - "0" - "0" - "0" - "0" - - "0" + count: "7" explicitBounds: - 1e-08 - 1e-07 @@ -2507,24 +2882,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.0012221329999999998 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: disruption_recheck + stringValue: DynamicCABundle-request-header bucketCounts: - "0" - "0" - "0" - "0" + - "1" - "0" - "0" - "0" - "0" - "0" - "0" - - "0" + count: "1" explicitBounds: - 1e-08 - 1e-07 @@ -2537,25 +2913,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 3.3934e-05 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: endpoint + stringValue: DynamicServingCertificateController bucketCounts: - "0" - "0" - "0" - - "13" - "1" + - "2" - "0" - "0" - - "2" - "0" - "0" - "0" - count: "16" + - "0" + count: "3" explicitBounds: - 1e-08 - 1e-07 @@ -2568,25 +2944,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.048352159 + sum: 4.3412e-05 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: endpoint_slice + stringValue: bootstrap_signer_queue bucketCounts: - "0" - "0" - "0" - - "18" - "0" - "0" + - "1" - "0" - - "2" + - "1" - "0" - "0" - "0" - count: "20" + count: "2" explicitBounds: - 1e-08 - 1e-07 @@ -2599,25 +2975,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.100636481 + sum: 0.015566043000000002 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: endpoint_slice_mirroring + stringValue: certificate bucketCounts: - "0" - "0" - "0" - - "10" - - "0" - - "0" - - "0" + - "22" + - "4" + - "2" + - "1" - "1" - "0" - "0" - "0" - count: "11" + count: "30" explicitBounds: - 1e-08 - 1e-07 @@ -2630,12 +3006,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.04809879199999999 + sum: 0.018928732000000004 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: ephemeral_volume + stringValue: claims bucketCounts: - "0" - "0" @@ -2665,7 +3041,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: garbage_collector_attempt_to_delete + stringValue: cronjob bucketCounts: - "0" - "0" @@ -2675,10 +3051,9 @@ resourceMetrics: - "0" - "0" - "0" - - "1" - "0" - "0" - count: "1" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -2691,24 +3066,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.637868367 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: garbage_collector_attempt_to_orphan + stringValue: daemonset bucketCounts: - "0" - "0" - "0" - "0" - "0" + - "9" + - "5" + - "4" - "0" - "0" - "0" - - "0" - - "0" - - "0" + count: "18" explicitBounds: - 1e-08 - 1e-07 @@ -2721,25 +3097,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.126134899 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: garbage_collector_graph_changes + stringValue: deployment bucketCounts: - "0" - "0" - - "5" - - "440" - - "147" - - "29" - "0" - "0" - "0" + - "11" + - "10" + - "16" + - "0" - "0" - "0" - count: "621" + count: "37" explicitBounds: - 1e-08 - 1e-07 @@ -2752,12 +3128,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.010479588000000003 + sum: 0.36744057799999996 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: horizontalpodautoscaler + stringValue: disruption bucketCounts: - "0" - "0" @@ -2787,7 +3163,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: job + stringValue: disruption_recheck bucketCounts: - "0" - "0" @@ -2817,19 +3193,20 @@ resourceMetrics: - attributes: - key: name value: - stringValue: job_orphan_pod + stringValue: endpoint bucketCounts: - "0" - "0" - "0" - "0" + - "3" - "0" + - "10" - "0" - "0" - "0" - "0" - - "0" - - "0" + count: "13" explicitBounds: - 1e-08 - 1e-07 @@ -2842,24 +3219,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.04771414599999999 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: namespace + stringValue: endpoint_slice bucketCounts: - "0" - "0" - "0" - "0" + - "2" + - "7" + - "10" - "0" - "0" - "0" - "0" - - "0" - - "0" - - "0" + count: "19" explicitBounds: - 1e-08 - 1e-07 @@ -2872,25 +3250,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.051048317 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: node_lifecycle_controller + stringValue: endpoint_slice_mirroring bucketCounts: - "0" - "0" - "0" - - "6" - - "1" - - "1" - "0" - - "1" + - "11" + - "0" - "0" - "0" - "0" - count: "9" + - "0" + - "0" + count: "11" explicitBounds: - 1e-08 - 1e-07 @@ -2903,25 +3281,24 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.05540121699999999 + sum: 0.000234172 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: node_lifecycle_controller_pods + stringValue: ephemeral_volume bucketCounts: - "0" - "0" - "0" - - "1" - - "1" - - "7" - "0" - - "4" - "0" - "0" - "0" - count: "13" + - "0" + - "0" + - "0" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -2934,21 +3311,21 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.22932522000000002 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: noexec_taint_node + stringValue: garbage_collector_attempt_to_delete bucketCounts: - "0" - "0" - "0" - - "2" + - "1" - "0" - "0" - "0" - - "1" + - "2" - "0" - "0" - "0" @@ -2965,25 +3342,24 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.055234383 + sum: 0.022924099 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: noexec_taint_pod + stringValue: garbage_collector_attempt_to_orphan bucketCounts: - "0" - "0" - "0" - - "17" - - "1" - "0" - "0" - - "4" - "0" - "0" - "0" - count: "22" + - "0" + - "0" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -2996,24 +3372,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.22827746899999996 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: orphaned_pods_nodes + stringValue: garbage_collector_graph_changes bucketCounts: - "0" - "0" + - "3" + - "564" + - "82" + - "3" + - "2" - "0" - "0" - "0" - "0" - - "0" - - "0" - - "0" - - "0" - - "0" + count: "654" explicitBounds: - 1e-08 - 1e-07 @@ -3026,12 +3403,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.007651735000000003 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: pvcprotection + stringValue: horizontalpodautoscaler bucketCounts: - "0" - "0" @@ -3061,7 +3438,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: pvcs + stringValue: job bucketCounts: - "0" - "0" @@ -3091,7 +3468,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: pvprotection + stringValue: job_orphan_pod bucketCounts: - "0" - "0" @@ -3121,20 +3498,19 @@ resourceMetrics: - attributes: - key: name value: - stringValue: replicaset + stringValue: namespace bucketCounts: - "0" - "0" - "0" - - "30" - - "1" - - "1" - - "10" - - "1" - "0" - "0" - "0" - count: "43" + - "0" + - "0" + - "0" + - "0" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -3147,24 +3523,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.07417278899999998 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: replicationmanager + stringValue: node_lifecycle_controller bucketCounts: - "0" - "0" - "0" + - "5" + - "1" - "0" + - "1" - "0" - "0" - "0" - "0" - - "0" - - "0" - - "0" + count: "7" explicitBounds: - 1e-08 - 1e-07 @@ -3177,24 +3554,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.005965626 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: resource_quota_controller_resource_changes + stringValue: node_lifecycle_controller_pods bucketCounts: - "0" - "0" - "0" + - "3" + - "8" + - "2" - "0" - "0" - "0" - "0" - "0" - - "0" - - "0" - - "0" + count: "13" explicitBounds: - 1e-08 - 1e-07 @@ -3207,24 +3585,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.0013295120000000001 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: resourcequota_primary + stringValue: noexec_taint_node bucketCounts: - "0" - "0" - "0" - "0" + - "1" - "0" - "0" - "0" - "0" - "0" - "0" - - "0" + count: "1" explicitBounds: - 1e-08 - 1e-07 @@ -3237,24 +3616,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 2.4707e-05 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: resourcequota_priority + stringValue: noexec_taint_pod bucketCounts: - "0" - "0" - "0" + - "16" + - "4" + - "2" - "0" - "0" - "0" - "0" - "0" - - "0" - - "0" - - "0" + count: "22" explicitBounds: - 1e-08 - 1e-07 @@ -3267,25 +3647,24 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.000450435 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: root_ca_cert_publisher + stringValue: orphaned_pods_nodes bucketCounts: - "0" - "0" - "0" - - "1" - "0" - "0" - "0" - - "1" - - "4" - "0" - "0" - count: "6" + - "0" + - "0" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -3298,12 +3677,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 1.618493263 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: service + stringValue: pvcprotection bucketCounts: - "0" - "0" @@ -3333,20 +3712,19 @@ resourceMetrics: - attributes: - key: name value: - stringValue: serviceaccount + stringValue: pvcs bucketCounts: - "0" - "0" - "0" - - "1" - "0" - "0" - "0" - - "1" - - "4" - "0" - "0" - count: "6" + - "0" + - "0" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -3359,12 +3737,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 2.418200572 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: serviceaccount_tokens_secret + stringValue: pvprotection bucketCounts: - "0" - "0" @@ -3394,20 +3772,20 @@ resourceMetrics: - attributes: - key: name value: - stringValue: serviceaccount_tokens_service + stringValue: replicaset bucketCounts: - "0" - "0" - "0" - - "32" - - "4" - - "0" - "0" - - "10" + - "17" + - "3" + - "9" + - "11" - "0" - "0" - "0" - count: "46" + count: "40" explicitBounds: - 1e-08 - 1e-07 @@ -3420,12 +3798,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.7150192629999997 + sum: 0.27846307500000006 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: stale_pod_disruption + stringValue: replicationmanager bucketCounts: - "0" - "0" @@ -3455,7 +3833,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: statefulset + stringValue: resource_quota_controller_resource_changes bucketCounts: - "0" - "0" @@ -3485,20 +3863,19 @@ resourceMetrics: - attributes: - key: name value: - stringValue: token_cleaner + stringValue: resourcequota_primary bucketCounts: - "0" - "0" - "0" - "0" - "0" - - "1" - "0" - "0" - "0" - "0" - "0" - count: "1" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -3511,12 +3888,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.000231314 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: ttl_jobs_to_delete + stringValue: resourcequota_priority bucketCounts: - "0" - "0" @@ -3546,20 +3923,20 @@ resourceMetrics: - attributes: - key: name value: - stringValue: ttlcontroller + stringValue: root_ca_cert_publisher bucketCounts: - "0" - "0" - "0" - - "4" - - "1" - "0" + - "0" + - "0" + - "5" - "1" - - "1" - - "1" - "0" - "0" - count: "8" + - "0" + count: "6" explicitBounds: - 1e-08 - 1e-07 @@ -3572,12 +3949,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.46869643099999997 + sum: 0.05281598 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: volume_expand + stringValue: service bucketCounts: - "0" - "0" @@ -3607,7 +3984,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: volumes + stringValue: serviceaccount bucketCounts: - "0" - "0" @@ -3615,11 +3992,12 @@ resourceMetrics: - "0" - "0" - "0" + - "6" - "0" - "0" - "0" - "0" - - "0" + count: "6" explicitBounds: - 1e-08 - 1e-07 @@ -3632,23 +4010,13 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.032349709 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: workqueue_queue_duration_seconds - - description: '[ALPHA] ' - histogram: - aggregationTemporality: 2 - dataPoints: - attributes: - - key: status + - key: name value: - stringValue: miss + stringValue: serviceaccount_tokens_secret bucketCounts: - - "1" - "0" - "0" - "0" @@ -3660,237 +4028,56 @@ resourceMetrics: - "0" - "0" - "0" - count: "1" explicitBounds: - - 0.005 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 - 0.01 - - 0.025 - - 0.05 - 0.1 - - 0.25 - - 0.5 - 1 - - 2.5 - - 5 - 10 startTimeUnixNano: "1000000" - sum: 0.005 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: authentication_token_cache_request_duration_seconds - - description: Distribution of heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "10202" - - "273034" - - "151110" - - "104352" - - "50097" - - "16299" - - "5562" - - "1848" - - "1632" - - "339" - - "238" - - "184" - count: "614897" - explicitBounds: - - 8.999999999999998 - - 24.999999999999996 - - 64.99999999999999 - - 144.99999999999997 - - 320.99999999999994 - - 704.9999999999999 - - 1536.9999999999998 - - 3200.9999999999995 - - 6528.999999999999 - - 13568.999999999998 - - 27264.999999999996 - startTimeUnixNano: "1000000" - sum: 7.9027144e+07 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: go_gc_heap_allocs_by_size_bytes_total - - description: Distribution individual GC-related stop-the-world pause latencies. - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "0" - - "6" - - "13" - - "5" - - "0" - - "0" - - "0" - count: "24" - explicitBounds: - - -5e-324 - - 9.999999999999999e-10 - - 9.999999999999999e-09 - - 9.999999999999998e-08 - - 1.0239999999999999e-06 - - 1.0239999999999999e-05 - - 0.00010239999999999998 - - 0.0010485759999999998 - - 0.010485759999999998 - - 0.10485759999999998 - startTimeUnixNano: "1000000" - sum: NaN - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: go_gc_pauses_seconds_total - - description: '[ALPHA] Number of endpoints added on each Service sync' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "19" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - count: "19" - explicitBounds: - - 2 - - 4 - - 8 - - 16 - - 32 - - 64 - - 128 - - 256 - - 512 - - 1024 - - 2048 - - 4096 - - 8192 - - 16384 - - 32768 - startTimeUnixNano: "1000000" - sum: 4 + sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: endpoint_slice_controller_endpoints_added_per_sync - - description: '[ALPHA] Number of EndpointSlices changed on each Service sync' - histogram: - aggregationTemporality: 2 - dataPoints: - attributes: - - key: topology + - key: name value: - stringValue: Disabled + stringValue: serviceaccount_tokens_service bucketCounts: - - "9" - - "0" - - "0" - "0" - "0" - "0" + - "42" + - "3" + - "1" - "0" - - "10" - "0" - "0" - "0" - "0" - count: "19" + count: "46" explicitBounds: - - 0.005 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 - 0.01 - - 0.025 - - 0.05 - 0.1 - - 0.25 - - 0.5 - 1 - - 2.5 - - 5 - 10 startTimeUnixNano: "1000000" - sum: 10 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: endpoint_slice_controller_endpointslices_changed_per_sync - - description: '[ALPHA] Number of endpoints added on each Service sync' - histogram: - aggregationTemporality: 2 - dataPoints: - - attributes: - - key: clusterCIDR - value: - stringValue: 10.244.0.0/16 - bucketCounts: - - "1" - - "0" - - "0" - - "0" - - "0" - - "0" - count: "1" - explicitBounds: - - 1 - - 5 - - 25 - - 125 - - 625 - startTimeUnixNano: "1000000" - sum: 0 + sum: 0.0004765730000000001 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: node_ipam_controller_cidrset_allocation_tries_per_request - - description: '[ALPHA] Client side rate limiter latency in seconds. Broken down by verb, and host.' - histogram: - aggregationTemporality: 2 - dataPoints: - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: GET + stringValue: stale_pod_disruption bucketCounts: - - "510" - - "1" - - "40" - - "7" - - "9" - - "2" - "0" - "0" - "0" @@ -3898,33 +4085,29 @@ resourceMetrics: - "0" - "0" - "0" - count: "569" + - "0" + - "0" + - "0" + - "0" explicitBounds: - - 0.005 - - 0.025 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 - 0.1 - - 0.25 - - 0.5 - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 10 startTimeUnixNano: "1000000" - sum: 7.691382789000004 + sum: 0 timeUnixNano: "1000000" - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: PATCH + stringValue: statefulset bucketCounts: - - "18" - - "0" - "0" - "0" - "0" @@ -3936,70 +4119,56 @@ resourceMetrics: - "0" - "0" - "0" - count: "18" explicitBounds: - - 0.005 - - 0.025 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 - 0.1 - - 0.25 - - 0.5 - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 10 startTimeUnixNano: "1000000" - sum: 2.2739000000000004e-05 + sum: 0 timeUnixNano: "1000000" - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: POST + stringValue: token_cleaner bucketCounts: - - "78" - "0" - - "17" - - "2" - - "7" - "0" - "0" - "0" + - "1" + - "0" - "0" - "0" - "0" - "0" - "0" - count: "104" + count: "1" explicitBounds: - - 0.005 - - 0.025 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 - 0.1 - - 0.25 - - 0.5 - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 10 startTimeUnixNano: "1000000" - sum: 3.7963923310000003 + sum: 3.3382e-05 timeUnixNano: "1000000" - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: PUT + stringValue: ttl_jobs_to_delete bucketCounts: - - "134" - "0" - "0" - "0" @@ -4011,183 +4180,159 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - count: "134" explicitBounds: - - 0.005 - - 0.025 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 - 0.1 - - 0.25 - - 0.5 - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 10 startTimeUnixNano: "1000000" - sum: 0.00015812399999999997 + sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: rest_client_rate_limiter_duration_seconds - - description: '[ALPHA] Request size in bytes. Broken down by verb and host.' - histogram: - aggregationTemporality: 2 - dataPoints: - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: GET + stringValue: ttlcontroller bucketCounts: - - "569" - - "0" - - "0" - "0" - "0" - "0" + - "5" - "0" - "0" - "0" + - "1" - "0" - "0" - "0" - count: "569" + count: "6" explicitBounds: - - 64 - - 256 - - 512 - - 1024 - - 4096 - - 16384 - - 65536 - - 262144 - - 1.048576e+06 - - 4.194304e+06 - - 1.6777216e+07 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.016074594999999997 timeUnixNano: "1000000" - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: PATCH + stringValue: volume_expand bucketCounts: - - "1" - - "5" - - "1" - "0" - - "8" - - "3" - "0" - "0" - "0" - "0" - "0" - "0" - count: "18" + - "0" + - "0" + - "0" + - "0" explicitBounds: - - 64 - - 256 - - 512 - - 1024 - - 4096 - - 16384 - - 65536 - - 262144 - - 1.048576e+06 - - 4.194304e+06 - - 1.6777216e+07 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 startTimeUnixNano: "1000000" - sum: 36566 + sum: 0 timeUnixNano: "1000000" - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: POST + stringValue: volumes bucketCounts: - - "1" - - "54" - - "22" - - "7" - - "19" - - "1" - "0" - "0" - "0" - "0" - "0" - "0" - count: "104" + - "0" + - "0" + - "0" + - "0" + - "0" explicitBounds: - - 64 - - 256 - - 512 - - 1024 - - 4096 - - 16384 - - 65536 - - 262144 - - 1.048576e+06 - - 4.194304e+06 - - 1.6777216e+07 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 startTimeUnixNano: "1000000" - sum: 51227 + sum: 0 timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: workqueue_work_duration_seconds + - description: '[ALPHA] Number of EndpointSlices changed on each Service sync' + histogram: + aggregationTemporality: 2 + dataPoints: - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: topology value: - stringValue: PUT + stringValue: Disabled bucketCounts: + - "8" + - "0" + - "0" - "0" - "0" - - "66" - - "10" - - "39" - - "19" - "0" - "0" + - "10" - "0" - "0" - "0" - "0" - count: "134" + count: "18" explicitBounds: - - 64 - - 256 - - 512 - - 1024 - - 4096 - - 16384 - - 65536 - - 262144 - - 1.048576e+06 - - 4.194304e+06 - - 1.6777216e+07 + - 0.005 + - 0.01 + - 0.025 + - 0.05 + - 0.1 + - 0.25 + - 0.5 + - 1 + - 2.5 + - 5 + - 10 startTimeUnixNano: "1000000" - sum: 264704 + sum: 10 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: rest_client_request_size_bytes - - description: '[ALPHA] The ratio of chosen deleted pod''s ages to the current youngest pod''s age (at the time). Should be <2.The intent of this metric is to measure the rough efficacy of the LogarithmicScaleDown feature gate''s effect onthe sorting (and deletion) of pods when a replicaset scales down. This only considers Ready pods when calculating and reporting.' + name: endpoint_slice_controller_endpointslices_changed_per_sync + - description: '[ALPHA] A metric measuring the latency for nodesync which updates loadbalancer hosts on cluster node updates.' histogram: aggregationTemporality: 2 dataPoints: @@ -4199,13 +4344,31 @@ resourceMetrics: - "0" - "0" - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" explicitBounds: - - 0.25 - - 0.5 - 1 - 2 - 4 - 8 + - 16 + - 32 + - 64 + - 128 + - 256 + - 512 + - 1024 + - 2048 + - 4096 + - 8192 + - 16384 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" @@ -4213,8 +4376,8 @@ resourceMetrics: - key: prometheus.type value: stringValue: histogram - name: replicaset_controller_sorting_deletion_age_ratio - - description: '[ALPHA] Histogram of the number of seconds the last auth exec plugin client certificate lived before being rotated. If auth exec plugin client certificates are unused, histogram will contain no data.' + name: service_controller_nodesync_latency_seconds + - description: '[ALPHA] The time it took to delete the job since it became eligible for deletion' histogram: aggregationTemporality: 2 dataPoints: @@ -4231,18 +4394,24 @@ resourceMetrics: - "0" - "0" - "0" + - "0" + - "0" + - "0" explicitBounds: - - 600 - - 1800 - - 3600 - - 14400 - - 86400 - - 604800 - - 2.592e+06 - - 7.776e+06 - - 1.5552e+07 - - 3.1104e+07 - - 1.24416e+08 + - 0.1 + - 0.2 + - 0.4 + - 0.8 + - 1.6 + - 3.2 + - 6.4 + - 12.8 + - 25.6 + - 51.2 + - 102.4 + - 204.8 + - 409.6 + - 819.2 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" @@ -4250,7 +4419,7 @@ resourceMetrics: - key: prometheus.type value: stringValue: histogram - name: rest_client_exec_plugin_certificate_rotation_age + name: ttl_after_finished_controller_job_deletion_duration_seconds - description: '[ALPHA] Response size in bytes. Broken down by verb and host.' histogram: aggregationTemporality: 2 @@ -4263,19 +4432,19 @@ resourceMetrics: value: stringValue: GET bucketCounts: - - "33" - - "161" - - "264" - - "50" - - "50" - - "8" + - "34" + - "175" + - "304" + - "60" + - "54" + - "9" - "3" - "0" - "0" - "0" - "0" - "0" - count: "569" + count: "639" explicitBounds: - 64 - 256 @@ -4289,7 +4458,7 @@ resourceMetrics: - 4.194304e+06 - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 428569 + sum: 461304 timeUnixNano: "1000000" - attributes: - key: host @@ -4303,7 +4472,7 @@ resourceMetrics: - "0" - "0" - "1" - - "17" + - "15" - "0" - "0" - "0" @@ -4311,7 +4480,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "18" + count: "16" explicitBounds: - 64 - 256 @@ -4325,7 +4494,7 @@ resourceMetrics: - 4.194304e+06 - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 55810 + sum: 48706 timeUnixNano: "1000000" - attributes: - key: host @@ -4338,7 +4507,7 @@ resourceMetrics: - "0" - "38" - "1" - - "23" + - "20" - "40" - "2" - "0" @@ -4347,7 +4516,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "104" + count: "101" explicitBounds: - 64 - 256 @@ -4361,7 +4530,7 @@ resourceMetrics: - 4.194304e+06 - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 114430 + sum: 111899 timeUnixNano: "1000000" - attributes: - key: host @@ -4372,18 +4541,18 @@ resourceMetrics: stringValue: PUT bucketCounts: - "0" - - "3" - - "70" + - "2" + - "84" - "7" - "36" - - "18" + - "20" - "0" - "0" - "0" - "0" - "0" - "0" - count: "134" + count: "149" explicitBounds: - 64 - 256 @@ -4397,182 +4566,13 @@ resourceMetrics: - 4.194304e+06 - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 245108 + sum: 260452 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram name: rest_client_response_size_bytes - - description: '[ALPHA] Duration of syncEndpoints() in seconds' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "11" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - count: "11" - explicitBounds: - - 0.001 - - 0.002 - - 0.004 - - 0.008 - - 0.016 - - 0.032 - - 0.064 - - 0.128 - - 0.256 - - 0.512 - - 1.024 - - 2.048 - - 4.096 - - 8.192 - - 16.384 - startTimeUnixNano: "1000000" - sum: 0 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: endpoint_slice_mirroring_controller_endpoints_sync_duration - - description: '[ALPHA] Time between when a cronjob is scheduled to be run, and when the corresponding job is created' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - explicitBounds: - - 1 - - 2 - - 4 - - 8 - - 16 - - 32 - - 64 - - 128 - - 256 - - 512 - startTimeUnixNano: "1000000" - sum: 0 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: cronjob_controller_cronjob_job_creation_skew_duration_seconds - - description: '[ALPHA] Number of namespace syncs happened in root ca cert publisher.' - histogram: - aggregationTemporality: 2 - dataPoints: - - attributes: - - key: code - value: - stringValue: "200" - bucketCounts: - - "0" - - "2" - - "3" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "1" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - count: "6" - explicitBounds: - - 0.001 - - 0.002 - - 0.004 - - 0.008 - - 0.016 - - 0.032 - - 0.064 - - 0.128 - - 0.256 - - 0.512 - - 1.024 - - 2.048 - - 4.096 - - 8.192 - - 16.384 - startTimeUnixNano: "1000000" - sum: 0.376113188 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: root_ca_cert_publisher_sync_duration_seconds - - description: Distribution of freed heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "6726" - - "201722" - - "95787" - - "76047" - - "38433" - - "9859" - - "4096" - - "1398" - - "1454" - - "197" - - "44" - - "76" - count: "435839" - explicitBounds: - - 8.999999999999998 - - 24.999999999999996 - - 64.99999999999999 - - 144.99999999999997 - - 320.99999999999994 - - 704.9999999999999 - - 1536.9999999999998 - - 3200.9999999999995 - - 6528.999999999999 - - 13568.999999999998 - - 27264.999999999996 - startTimeUnixNano: "1000000" - sum: 5.2542216e+07 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: go_gc_heap_frees_by_size_bytes_total scope: - name: otelcol/prometheusreceiver - version: v0.105.0 + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver + version: v0.110.0 diff --git a/functional_tests/testdata_histogram/expected/v1.25/coredns_metrics.yaml b/functional_tests/testdata_histogram/expected/v1.25/coredns_metrics.yaml index b584a6281..63807871c 100644 --- a/functional_tests/testdata_histogram/expected/v1.25/coredns_metrics.yaml +++ b/functional_tests/testdata_histogram/expected/v1.25/coredns_metrics.yaml @@ -18,10 +18,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-787d4945fb-jfz8p + stringValue: coredns-565d847f94-dxx72 - key: k8s.pod.uid value: - stringValue: 485281d9-4cec-4fa4-9047-3746fa550353 + stringValue: 929b5afc-e030-4dc3-933a-5d806ddc074e - key: net.host.name value: stringValue: 10.244.0.4 @@ -49,7 +49,7 @@ resourceMetrics: schemaUrl: https://opentelemetry.io/schemas/1.6.1 scopeMetrics: - metrics: - - description: Size of the EDNS0 UDP buffer in bytes (64K for TCP) per zone and protocol. + - description: Size of the returned response in bytes. histogram: aggregationTemporality: 2 dataPoints: @@ -65,9 +65,9 @@ resourceMetrics: stringValue: . bucketCounts: - "0" - - "8" - - "1" - "0" + - "12" + - "3" - "0" - "0" - "0" @@ -79,7 +79,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "9" + count: "15" explicitBounds: - 0 - 100 @@ -96,80 +96,25 @@ resourceMetrics: - 48000 - 64000 startTimeUnixNano: "1000000" - sum: 653 + sum: 2409 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: coredns_dns_request_size_bytes - - description: Histogram of the time (in seconds) each request took per zone. - histogram: - aggregationTemporality: 2 - dataPoints: - - attributes: - - key: server - value: - stringValue: dns://:53 - - key: zone - value: - stringValue: . - bucketCounts: - - "5" - - "0" - - "0" - - "1" - - "0" - - "1" - - "1" - - "0" - - "1" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - count: "9" - explicitBounds: - - 0.00025 - - 0.0005 - - 0.001 - - 0.002 - - 0.004 - - 0.008 - - 0.016 - - 0.032 - - 0.064 - - 0.128 - - 0.256 - - 0.512 - - 1.024 - - 2.048 - - 4.096 - - 8.192 - startTimeUnixNano: "1000000" - sum: 0.080724527 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: coredns_dns_request_duration_seconds + name: coredns_dns_response_size_bytes - description: Histogram of the time (in seconds) each request took. histogram: aggregationTemporality: 2 dataPoints: - bucketCounts: - "0" - - "125" - - "0" + - "136" + - "3" - "0" - "0" - "0" - count: "125" + count: "139" explicitBounds: - 0.00025 - 0.0025 @@ -177,14 +122,14 @@ resourceMetrics: - 0.25 - 2.5 startTimeUnixNano: "1000000" - sum: 0.06260746900000001 + sum: 0.07822562700000006 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram name: coredns_health_request_duration_seconds - - description: Size of the returned response in bytes. + - description: Size of the EDNS0 UDP buffer in bytes (64K for TCP) per zone and protocol. histogram: aggregationTemporality: 2 dataPoints: @@ -200,9 +145,9 @@ resourceMetrics: stringValue: . bucketCounts: - "0" + - "12" + - "3" - "0" - - "8" - - "1" - "0" - "0" - "0" @@ -214,7 +159,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "9" + count: "15" explicitBounds: - 0 - 100 @@ -231,13 +176,68 @@ resourceMetrics: - 48000 - 64000 startTimeUnixNano: "1000000" - sum: 1407 + sum: 1123 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: coredns_dns_response_size_bytes + name: coredns_dns_request_size_bytes + - description: Histogram of the time (in seconds) each request took per zone. + histogram: + aggregationTemporality: 2 + dataPoints: + - attributes: + - key: server + value: + stringValue: dns://:53 + - key: zone + value: + stringValue: . + bucketCounts: + - "9" + - "0" + - "0" + - "0" + - "1" + - "3" + - "0" + - "1" + - "1" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + count: "15" + explicitBounds: + - 0.00025 + - 0.0005 + - 0.001 + - 0.002 + - 0.004 + - 0.008 + - 0.016 + - 0.032 + - 0.064 + - 0.128 + - 0.256 + - 0.512 + - 1.024 + - 2.048 + - 4.096 + - 8.192 + startTimeUnixNano: "1000000" + sum: 0.08552541900000002 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: coredns_dns_request_duration_seconds - description: Histogram of the time each request took. histogram: aggregationTemporality: 2 @@ -253,6 +253,7 @@ resourceMetrics: - "0" - "0" - "0" + - "0" - "1" - "0" - "0" @@ -266,7 +267,6 @@ resourceMetrics: - "0" - "0" - "0" - - "0" count: "2" explicitBounds: - 0.00025 @@ -286,7 +286,7 @@ resourceMetrics: - 4.096 - 8.192 startTimeUnixNano: "1000000" - sum: 0.011487233 + sum: 0.02882215 timeUnixNano: "1000000" - attributes: - key: rcode @@ -301,7 +301,7 @@ resourceMetrics: - "0" - "0" - "0" - - "1" + - "3" - "0" - "0" - "1" @@ -313,7 +313,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "2" + count: "4" explicitBounds: - 0.00025 - 0.0005 @@ -332,7 +332,7 @@ resourceMetrics: - 4.096 - 8.192 startTimeUnixNano: "1000000" - sum: 0.06821759699999999 + sum: 0.055191014000000004 timeUnixNano: "1000000" metadata: - key: prometheus.type @@ -340,5 +340,5 @@ resourceMetrics: stringValue: histogram name: coredns_forward_request_duration_seconds scope: - name: otelcol/prometheusreceiver - version: v0.105.0 + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver + version: v0.110.0 diff --git a/functional_tests/testdata_histogram/expected/v1.26/controller_manager_metrics.yaml b/functional_tests/testdata_histogram/expected/v1.26/controller_manager_metrics.yaml index 5d3dd502e..7e0606328 100644 --- a/functional_tests/testdata_histogram/expected/v1.26/controller_manager_metrics.yaml +++ b/functional_tests/testdata_histogram/expected/v1.26/controller_manager_metrics.yaml @@ -21,7 +21,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 69d8d9af-3647-4c9e-aac3-b3fbb81b230e + stringValue: 2f4d20e4-c1ef-4426-9da1-fac1b31c7de2 - key: net.host.name value: stringValue: 172.18.0.2 @@ -49,7 +49,42 @@ resourceMetrics: schemaUrl: https://opentelemetry.io/schemas/1.6.1 scopeMetrics: - metrics: - - description: '[ALPHA] Histogram of the number of seconds the last auth exec plugin client certificate lived before being rotated. If auth exec plugin client certificates are unused, histogram will contain no data.' + - description: '[STABLE] Time between when a cronjob is scheduled to be run, and when the corresponding job is created' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + explicitBounds: + - 1 + - 2 + - 4 + - 8 + - 16 + - 32 + - 64 + - 128 + - 256 + - 512 + startTimeUnixNano: "1000000" + sum: 0 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: cronjob_controller_job_creation_skew_duration_seconds + - description: '[ALPHA] Distribution of the remaining lifetime on the certificate used to authenticate a request.' histogram: aggregationTemporality: 2 dataPoints: @@ -66,18 +101,24 @@ resourceMetrics: - "0" - "0" - "0" + - "0" + - "0" + - "0" explicitBounds: - - 600 + - 0 - 1800 - 3600 - - 14400 + - 7200 + - 21600 + - 43200 - 86400 + - 172800 + - 345600 - 604800 - 2.592e+06 - 7.776e+06 - 1.5552e+07 - 3.1104e+07 - - 1.24416e+08 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" @@ -85,7 +126,139 @@ resourceMetrics: - key: prometheus.type value: stringValue: histogram - name: rest_client_exec_plugin_certificate_rotation_age + name: apiserver_client_certificate_expiration_seconds + - description: '[ALPHA] Request latency in seconds. Broken down by status code.' + histogram: + aggregationTemporality: 2 + dataPoints: + - attributes: + - key: code + value: + stringValue: "201" + bucketCounts: + - "1" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + count: "1" + explicitBounds: + - 0.25 + - 0.5 + - 0.7 + - 1 + - 1.5 + - 3 + - 5 + - 10 + startTimeUnixNano: "1000000" + sum: 0.001020628 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: apiserver_delegated_authz_request_duration_seconds + - description: '[ALPHA] Authentication duration in seconds broken out by result.' + histogram: + aggregationTemporality: 2 + dataPoints: + - attributes: + - key: result + value: + stringValue: success + bucketCounts: + - "16" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + count: "16" + explicitBounds: + - 0.001 + - 0.002 + - 0.004 + - 0.008 + - 0.016 + - 0.032 + - 0.064 + - 0.128 + - 0.256 + - 0.512 + - 1.024 + - 2.048 + - 4.096 + - 8.192 + - 16.384 + startTimeUnixNano: "1000000" + sum: 0.0004831130000000001 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: authentication_duration_seconds + - description: '[ALPHA] Number of endpoints added on each Service sync' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "20" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + count: "20" + explicitBounds: + - 2 + - 4 + - 8 + - 16 + - 32 + - 64 + - 128 + - 256 + - 512 + - 1024 + - 2048 + - 4096 + - 8192 + - 16384 + - 32768 + startTimeUnixNano: "1000000" + sum: 4 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: endpoint_slice_controller_endpoints_added_per_sync - description: '[ALPHA] How long in seconds an item stays in workqueue before being requested.' histogram: aggregationTemporality: 2 @@ -98,15 +271,15 @@ resourceMetrics: - "0" - "0" - "0" - - "17" - - "15" - - "15" - - "12" - - "1" + - "20" + - "11" + - "23" + - "10" + - "3" - "0" - "0" - "0" - count: "60" + count: "67" explicitBounds: - 1e-08 - 1e-07 @@ -119,7 +292,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.052272098999999995 + sum: 0.1681133580000001 timeUnixNano: "1000000" - attributes: - key: name @@ -150,7 +323,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 4.1481e-05 + sum: 4.9092e-05 timeUnixNano: "1000000" - attributes: - key: name @@ -160,15 +333,15 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "2" + - "1" + - "3" - "2" - "0" - "0" - "0" - "0" - "0" - count: "4" + count: "6" explicitBounds: - 1e-08 - 1e-07 @@ -181,7 +354,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.0006170230000000001 + sum: 0.0008189629999999999 timeUnixNano: "1000000" - attributes: - key: name @@ -191,15 +364,15 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - "1" - "0" + - "1" - "0" - "0" - "0" - "0" - "0" - count: "1" + count: "2" explicitBounds: - 1e-08 - 1e-07 @@ -212,7 +385,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 6.3649e-05 + sum: 0.000585836 timeUnixNano: "1000000" - attributes: - key: name @@ -222,9 +395,9 @@ resourceMetrics: - "0" - "0" - "0" - - "2" - "1" - - "0" + - "1" + - "1" - "0" - "0" - "0" @@ -243,7 +416,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 4.0286e-05 + sum: 0.000343091 timeUnixNano: "1000000" - attributes: - key: name @@ -258,9 +431,9 @@ resourceMetrics: - "1" - "0" - "0" - - "0" - "1" - "0" + - "0" count: "2" explicitBounds: - 1e-08 @@ -274,7 +447,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 3.200590322 + sum: 0.101109353 timeUnixNano: "1000000" - attributes: - key: name @@ -284,9 +457,9 @@ resourceMetrics: - "0" - "0" - "0" - - "20" - - "0" + - "18" - "0" + - "2" - "0" - "10" - "0" @@ -305,7 +478,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.33200277899999997 + sum: 0.4025693860000001 timeUnixNano: "1000000" - attributes: - key: name @@ -375,15 +548,15 @@ resourceMetrics: - "0" - "0" - "0" - - "9" - - "1" - - "0" + - "7" - "0" - "4" - - "2" + - "1" + - "5" - "0" - "0" - count: "16" + - "0" + count: "17" explicitBounds: - 1e-08 - 1e-07 @@ -396,7 +569,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.28289824699999994 + sum: 0.23423013500000006 timeUnixNano: "1000000" - attributes: - key: name @@ -406,15 +579,15 @@ resourceMetrics: - "0" - "0" - "0" - - "16" - - "9" - - "4" - - "11" - - "4" + - "19" + - "5" + - "6" + - "7" + - "6" - "0" - "0" - "0" - count: "44" + count: "43" explicitBounds: - 1e-08 - 1e-07 @@ -427,7 +600,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.185662992 + sum: 0.24982244699999995 timeUnixNano: "1000000" - attributes: - key: name @@ -497,14 +670,14 @@ resourceMetrics: - "0" - "0" - "0" - - "11" - - "2" + - "13" - "0" - "0" - "0" - "2" - "0" - "0" + - "0" count: "15" explicitBounds: - 1e-08 @@ -518,7 +691,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.24652175700000004 + sum: 0.084813069 timeUnixNano: "1000000" - attributes: - key: name @@ -529,14 +702,14 @@ resourceMetrics: - "0" - "0" - "18" - - "2" - - "0" + - "1" - "0" - "0" - "2" - "0" - "0" - count: "22" + - "0" + count: "21" explicitBounds: - 1e-08 - 1e-07 @@ -549,7 +722,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.24564112399999996 + sum: 0.08402123200000002 timeUnixNano: "1000000" - attributes: - key: name @@ -559,8 +732,8 @@ resourceMetrics: - "0" - "0" - "0" - - "10" - - "0" + - "9" + - "1" - "0" - "0" - "1" @@ -580,7 +753,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.073550077 + sum: 0.056064655000000005 timeUnixNano: "1000000" - attributes: - key: name @@ -625,10 +798,9 @@ resourceMetrics: - "0" - "0" - "0" - - "2" - "0" - "0" - count: "2" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -641,7 +813,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 1.1948869439999998 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name @@ -680,16 +852,16 @@ resourceMetrics: bucketCounts: - "0" - "0" - - "9" - - "432" - - "78" - - "58" - - "78" + - "4" + - "457" + - "110" + - "105" - "0" - "0" - "0" - "0" - count: "655" + - "0" + count: "676" explicitBounds: - 1e-08 - 1e-07 @@ -702,7 +874,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.2555220390000002 + sum: 0.025571551000000026 timeUnixNano: "1000000" - attributes: - key: name @@ -862,15 +1034,15 @@ resourceMetrics: - "0" - "0" - "0" - - "5" - - "0" + - "6" - "0" + - "1" - "0" - "0" - "1" - "0" - "0" - count: "6" + count: "8" explicitBounds: - 1e-08 - 1e-07 @@ -883,7 +1055,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.13338346099999998 + sum: 0.292895705 timeUnixNano: "1000000" - attributes: - key: name @@ -894,13 +1066,13 @@ resourceMetrics: - "0" - "0" - "0" - - "4" - - "5" + - "7" + - "1" + - "1" - "0" - "4" - "0" - "0" - - "0" count: "13" explicitBounds: - 1e-08 @@ -914,7 +1086,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.36096514599999996 + sum: 1.1130879219999998 timeUnixNano: "1000000" - attributes: - key: name @@ -924,7 +1096,7 @@ resourceMetrics: - "0" - "0" - "0" - - "0" + - "2" - "0" - "0" - "0" @@ -932,7 +1104,7 @@ resourceMetrics: - "1" - "0" - "0" - count: "1" + count: "3" explicitBounds: - 1e-08 - 1e-07 @@ -945,7 +1117,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.134269891 + sum: 0.295636477 timeUnixNano: "1000000" - attributes: - key: name @@ -955,12 +1127,12 @@ resourceMetrics: - "0" - "0" - "0" - - "14" - - "4" + - "17" + - "1" - "0" - "0" - - "4" - "0" + - "4" - "0" - "0" count: "22" @@ -976,7 +1148,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.3598526980000001 + sum: 1.120016969 timeUnixNano: "1000000" - attributes: - key: name @@ -1106,15 +1278,15 @@ resourceMetrics: - "0" - "0" - "0" - - "27" + - "25" - "2" - - "1" - - "3" - - "9" + - "6" + - "2" + - "6" - "0" - "0" - "0" - count: "42" + count: "41" explicitBounds: - 1e-08 - 1e-07 @@ -1127,7 +1299,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.148082956 + sum: 0.11294416299999999 timeUnixNano: "1000000" - attributes: - key: name @@ -1261,8 +1433,8 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "5" + - "1" + - "4" - "0" - "0" count: "6" @@ -1278,7 +1450,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 3.8367071170000004 + sum: 1.209531318 timeUnixNano: "1000000" - attributes: - key: name @@ -1339,7 +1511,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 1.384032475 + sum: 2.359901452 timeUnixNano: "1000000" - attributes: - key: name @@ -1380,8 +1552,8 @@ resourceMetrics: - "0" - "0" - "37" - - "3" - - "0" + - "2" + - "1" - "0" - "6" - "0" @@ -1400,7 +1572,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.5726738469999998 + sum: 0.5807982330000001 timeUnixNano: "1000000" - attributes: - key: name @@ -1470,9 +1642,9 @@ resourceMetrics: - "0" - "0" - "0" - - "1" - "0" - "0" + - "1" - "0" - "0" - "0" @@ -1491,7 +1663,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 4.328e-06 + sum: 0.000193524 timeUnixNano: "1000000" - attributes: - key: name @@ -1531,15 +1703,15 @@ resourceMetrics: - "0" - "0" - "0" - - "2" - - "2" - - "0" - - "0" + - "1" + - "3" + - "1" + - "1" - "1" - "1" - "0" - "0" - count: "6" + count: "8" explicitBounds: - 1e-08 - 1e-07 @@ -1552,7 +1724,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.427899916 + sum: 0.634484939 timeUnixNano: "1000000" - attributes: - key: name @@ -1619,89 +1791,50 @@ resourceMetrics: value: stringValue: histogram name: workqueue_queue_duration_seconds - - description: '[ALPHA] The time it took to delete the job since it became eligible for deletion' + - description: Distribution of heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. histogram: aggregationTemporality: 2 dataPoints: - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" + - "11440" + - "290127" + - "164797" + - "111006" + - "52774" + - "17779" + - "5811" + - "1967" + - "1764" + - "374" + - "246" + - "186" + count: "658271" explicitBounds: - - 0.1 - - 0.2 - - 0.4 - - 0.8 - - 1.6 - - 3.2 - - 6.4 - - 12.8 - - 25.6 - - 51.2 - - 102.4 - - 204.8 - - 409.6 - - 819.2 + - 8.999999999999998 + - 24.999999999999996 + - 64.99999999999999 + - 144.99999999999997 + - 320.99999999999994 + - 704.9999999999999 + - 1536.9999999999998 + - 3200.9999999999995 + - 6528.999999999999 + - 13568.999999999998 + - 27264.999999999996 startTimeUnixNano: "1000000" - sum: 0 + sum: 8.407976e+07 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: ttl_after_finished_controller_job_deletion_duration_seconds - - description: Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running. + name: go_gc_heap_allocs_by_size_bytes + - description: '[ALPHA] A metric measuring the latency for updating each load balancer hosts.' histogram: aggregationTemporality: 2 dataPoints: - bucketCounts: - - "1763" - "0" - - "509" - - "2443" - - "2682" - - "808" - - "326" - - "31" - - "0" - - "0" - count: "8562" - explicitBounds: - - 9.999999999999999e-10 - - 9.999999999999999e-09 - - 9.999999999999998e-08 - - 1.0239999999999999e-06 - - 1.0239999999999999e-05 - - 0.00010239999999999998 - - 0.0010485759999999998 - - 0.010485759999999998 - - 0.10485759999999998 - startTimeUnixNano: "1000000" - sum: 0.077157934 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: go_sched_latencies_seconds - - description: '[ALPHA] Number of endpoints added on each Service sync' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "21" - "0" - "0" - "0" @@ -1717,8 +1850,8 @@ resourceMetrics: - "0" - "0" - "0" - count: "21" explicitBounds: + - 1 - 2 - 4 - 8 @@ -1733,69 +1866,59 @@ resourceMetrics: - 4096 - 8192 - 16384 - - 32768 startTimeUnixNano: "1000000" - sum: 4 + sum: 0 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: endpoint_slice_controller_endpoints_added_per_sync - - description: '[ALPHA] Request latency in seconds. Broken down by verb, and host.' + name: service_controller_update_loadbalancer_host_latency_seconds + - description: '[ALPHA] Request latency in seconds. Broken down by status code.' histogram: aggregationTemporality: 2 dataPoints: - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: code value: - stringValue: GET + stringValue: "201" bucketCounts: - - "368" - - "176" - - "56" - - "11" - - "7" - - "2" - - "0" - "1" - "0" - "0" - "0" - "0" - "0" - count: "621" + - "0" + - "0" + - "0" + count: "1" explicitBounds: - - 0.005 - - 0.025 - - 0.1 - 0.25 - 0.5 + - 0.7 - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 1.5 + - 3 + - 5 + - 10 startTimeUnixNano: "1000000" - sum: 12.865948227999985 + sum: 0.00563577 timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: apiserver_delegated_authn_request_duration_seconds + - description: '[ALPHA] ' + histogram: + aggregationTemporality: 2 + dataPoints: - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: status value: - stringValue: PATCH + stringValue: miss bucketCounts: - - "5" - - "7" - - "3" - - "0" - "1" - "0" - "0" @@ -1805,73 +1928,47 @@ resourceMetrics: - "0" - "0" - "0" - count: "16" + - "0" + - "0" + - "0" + count: "1" explicitBounds: - 0.005 + - 0.01 - 0.025 + - 0.05 - 0.1 - 0.25 - 0.5 - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 2.5 + - 5 + - 10 startTimeUnixNano: "1000000" - sum: 0.7950531489999999 + sum: 0.005 timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: authentication_token_cache_request_duration_seconds + - description: '[ALPHA] Number of namespace syncs happened in root ca cert publisher.' + histogram: + aggregationTemporality: 2 + dataPoints: - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: code value: - stringValue: POST + stringValue: "200" bucketCounts: - - "45" - - "16" - - "26" - - "4" - - "9" - - "5" - - "0" - "0" - "0" + - "3" + - "2" - "0" - "0" - "0" - "0" - count: "105" - explicitBounds: - - 0.005 - - 0.025 - - 0.1 - - 0.25 - - 0.5 - - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 - startTimeUnixNano: "1000000" - sum: 8.324240457999995 - timeUnixNano: "1000000" - - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb - value: - stringValue: PUT - bucketCounts: - - "97" - - "53" - - "3" - - "1" - "0" - "1" - "0" @@ -1880,118 +1977,75 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - count: "155" + count: "6" explicitBounds: - - 0.005 - - 0.025 - - 0.1 - - 0.25 - - 0.5 - - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 - startTimeUnixNano: "1000000" - sum: 1.7793541769999996 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: + - 0.001 + - 0.002 + - 0.004 + - 0.008 + - 0.016 + - 0.032 + - 0.064 + - 0.128 + - 0.256 + - 0.512 + - 1.024 + - 2.048 + - 4.096 + - 8.192 + - 16.384 + startTimeUnixNano: "1000000" + sum: 0.27878804 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: stringValue: histogram - name: rest_client_request_duration_seconds - - description: '[ALPHA] Request size in bytes. Broken down by verb and host.' + name: root_ca_cert_publisher_sync_duration_seconds + - description: '[ALPHA] How long in seconds processing an item from workqueue takes.' histogram: aggregationTemporality: 2 dataPoints: - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: GET + stringValue: ClusterRoleAggregator bucketCounts: - - "621" - - "0" - - "0" - - "0" - - "0" - - "0" - "0" - "0" - "0" - "0" + - "2" + - "47" + - "13" + - "2" + - "3" - "0" - "0" - count: "621" + count: "67" explicitBounds: - - 64 - - 256 - - 512 - - 1024 - - 4096 - - 16384 - - 65536 - - 262144 - - 1.048576e+06 - - 4.194304e+06 - - 1.6777216e+07 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 2.0729024290000018 timeUnixNano: "1000000" - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: PATCH + stringValue: DynamicCABundle-client-ca-bundle bucketCounts: - - "1" - - "2" - - "2" - - "0" - - "7" - - "4" - - "0" - - "0" - "0" - "0" - "0" - "0" - count: "16" - explicitBounds: - - 64 - - 256 - - 512 - - 1024 - - 4096 - - 16384 - - 65536 - - 262144 - - 1.048576e+06 - - 4.194304e+06 - - 1.6777216e+07 - startTimeUnixNano: "1000000" - sum: 39360 - timeUnixNano: "1000000" - - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb - value: - stringValue: POST - bucketCounts: - - "1" - - "54" - - "23" - - "7" - - "19" - "1" - "0" - "0" @@ -1999,182 +2053,92 @@ resourceMetrics: - "0" - "0" - "0" - count: "105" + count: "1" explicitBounds: - - 64 - - 256 - - 512 - - 1024 - - 4096 - - 16384 - - 65536 - - 262144 - - 1.048576e+06 - - 4.194304e+06 - - 1.6777216e+07 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 startTimeUnixNano: "1000000" - sum: 50803 + sum: 2.5328e-05 timeUnixNano: "1000000" - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: PUT + stringValue: DynamicCABundle-csr-controller bucketCounts: - - "0" - - "0" - - "71" - - "10" - - "46" - - "28" - "0" - "0" - "0" - "0" - - "0" - - "0" - count: "155" - explicitBounds: - - 64 - - 256 - - 512 - - 1024 - - 4096 - - 16384 - - 65536 - - 262144 - - 1.048576e+06 - - 4.194304e+06 - - 1.6777216e+07 - startTimeUnixNano: "1000000" - sum: 350840 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: rest_client_request_size_bytes - - description: '[ALPHA] Number of endpoints added on each Service sync' - histogram: - aggregationTemporality: 2 - dataPoints: - - attributes: - - key: clusterCIDR - value: - stringValue: 10.244.0.0/16 - bucketCounts: - "1" + - "5" - "0" - "0" - "0" - "0" - "0" - count: "1" + count: "6" explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 - 1 - - 5 - - 25 - - 125 - - 625 + - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.0008910130000000001 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: node_ipam_controller_cidrset_allocation_tries_per_request - - description: '[ALPHA] Response size in bytes. Broken down by verb and host.' - histogram: - aggregationTemporality: 2 - dataPoints: - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: GET + stringValue: DynamicCABundle-request-header bucketCounts: - - "19" - - "190" - - "280" - - "60" - - "60" - - "9" - - "3" - "0" - - "0" - - "0" - - "0" - - "0" - count: "621" - explicitBounds: - - 64 - - 256 - - 512 - - 1024 - - 4096 - - 16384 - - 65536 - - 262144 - - 1.048576e+06 - - 4.194304e+06 - - 1.6777216e+07 - startTimeUnixNano: "1000000" - sum: 510764 - timeUnixNano: "1000000" - - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb - value: - stringValue: PATCH - bucketCounts: - "0" - "0" - "0" - "2" - - "14" - - "0" - "0" - "0" - "0" - "0" - "0" - "0" - count: "16" + count: "2" explicitBounds: - - 64 - - 256 - - 512 - - 1024 - - 4096 - - 16384 - - 65536 - - 262144 - - 1.048576e+06 - - 4.194304e+06 - - 1.6777216e+07 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 startTimeUnixNano: "1000000" - sum: 47349 + sum: 3.9634999999999996e-05 timeUnixNano: "1000000" - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: POST + stringValue: DynamicServingCertificateController bucketCounts: - "0" - - "38" + - "0" + - "0" - "1" - - "24" - - "40" - "2" - "0" - "0" @@ -2182,107 +2146,88 @@ resourceMetrics: - "0" - "0" - "0" - count: "105" + count: "3" explicitBounds: - - 64 - - 256 - - 512 - - 1024 - - 4096 - - 16384 - - 65536 - - 262144 - - 1.048576e+06 - - 4.194304e+06 - - 1.6777216e+07 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 startTimeUnixNano: "1000000" - sum: 113827 + sum: 3.8301e-05 timeUnixNano: "1000000" - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: PUT + stringValue: bootstrap_signer_queue bucketCounts: - "0" - - "4" - - "83" - - "7" - - "37" - - "24" - "0" - "0" - "0" + - "1" + - "0" + - "0" - "0" + - "1" - "0" - "0" - count: "155" + count: "2" explicitBounds: - - 64 - - 256 - - 512 - - 1024 - - 4096 - - 16384 - - 65536 - - 262144 - - 1.048576e+06 - - 4.194304e+06 - - 1.6777216e+07 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 startTimeUnixNano: "1000000" - sum: 290516 + sum: 0.703729387 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: rest_client_response_size_bytes - - description: Distribution individual GC-related stop-the-world pause latencies. - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "0" + - attributes: + - key: name + value: + stringValue: certificate + bucketCounts: - "0" - "0" - "0" - - "5" - - "20" - - "7" + - "22" + - "4" + - "2" + - "1" + - "1" - "0" - "0" - "0" - count: "32" + count: "30" explicitBounds: - - 9.999999999999999e-10 - - 9.999999999999999e-09 - - 9.999999999999998e-08 - - 1.0239999999999999e-06 - - 1.0239999999999999e-05 - - 0.00010239999999999998 - - 0.0010485759999999998 - - 0.010485759999999998 - - 0.10485759999999998 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 startTimeUnixNano: "1000000" - sum: 0.00092672 + sum: 0.030803841 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: go_gc_pauses_seconds - - description: '[ALPHA] Duration of syncEndpoints() in seconds' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "11" - - "0" - - "0" - - "0" - - "0" + - attributes: + - key: name + value: + stringValue: claims + bucketCounts: - "0" - "0" - "0" @@ -2294,36 +2239,29 @@ resourceMetrics: - "0" - "0" - "0" - count: "11" explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 - 0.001 - - 0.002 - - 0.004 - - 0.008 - - 0.016 - - 0.032 - - 0.064 - - 0.128 - - 0.256 - - 0.512 - - 1.024 - - 2.048 - - 4.096 - - 8.192 - - 16.384 + - 0.01 + - 0.1 + - 1 + - 10 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: endpoint_slice_mirroring_controller_endpoints_sync_duration - - description: '[ALPHA] The ratio of chosen deleted pod''s ages to the current youngest pod''s age (at the time). Should be <2.The intent of this metric is to measure the rough efficacy of the LogarithmicScaleDown feature gate''s effect onthe sorting (and deletion) of pods when a replicaset scales down. This only considers Ready pods when calculating and reporting.' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: + - attributes: + - key: name + value: + stringValue: cronjob + bucketCounts: + - "0" + - "0" + - "0" + - "0" - "0" - "0" - "0" @@ -2332,77 +2270,86 @@ resourceMetrics: - "0" - "0" explicitBounds: - - 0.25 - - 0.5 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 - 1 - - 2 - - 4 - - 8 + - 10 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: replicaset_controller_sorting_deletion_age_ratio - - description: '[ALPHA] Distribution of the remaining lifetime on the certificate used to authenticate a request.' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "0" + - attributes: + - key: name + value: + stringValue: daemonset + bucketCounts: - "0" - "0" - "0" - "0" - "0" + - "9" + - "4" + - "2" + - "2" - "0" - "0" + count: "17" + explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 + startTimeUnixNano: "1000000" + sum: 1.738524786 + timeUnixNano: "1000000" + - attributes: + - key: name + value: + stringValue: deployment + bucketCounts: - "0" - "0" - "0" - "0" - "0" + - "19" + - "15" + - "7" + - "2" - "0" - "0" + count: "43" explicitBounds: - - 0 - - 1800 - - 3600 - - 7200 - - 21600 - - 43200 - - 86400 - - 172800 - - 345600 - - 604800 - - 2.592e+06 - - 7.776e+06 - - 1.5552e+07 - - 3.1104e+07 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 1.6047394809999997 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: apiserver_client_certificate_expiration_seconds - - description: '[ALPHA] Authentication duration in seconds broken out by result.' - histogram: - aggregationTemporality: 2 - dataPoints: - attributes: - - key: result + - key: name value: - stringValue: success + stringValue: disruption bucketCounts: - - "14" - - "0" - - "0" - - "0" - - "0" - "0" - "0" - "0" @@ -2414,41 +2361,25 @@ resourceMetrics: - "0" - "0" - "0" - count: "14" explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 - 0.001 - - 0.002 - - 0.004 - - 0.008 - - 0.016 - - 0.032 - - 0.064 - - 0.128 - - 0.256 - - 0.512 - - 1.024 - - 2.048 - - 4.096 - - 8.192 - - 16.384 + - 0.01 + - 0.1 + - 1 + - 10 startTimeUnixNano: "1000000" - sum: 0.00044309500000000007 + sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: authentication_duration_seconds - - description: '[ALPHA] Request latency in seconds. Broken down by status code.' - histogram: - aggregationTemporality: 2 - dataPoints: - attributes: - - key: code + - key: name value: - stringValue: "201" + stringValue: disruption_recheck bucketCounts: - - "1" - "0" - "0" - "0" @@ -2457,80 +2388,71 @@ resourceMetrics: - "0" - "0" - "0" - count: "1" + - "0" + - "0" + - "0" explicitBounds: - - 0.25 - - 0.5 - - 0.7 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 - 1 - - 1.5 - - 3 - - 5 - 10 startTimeUnixNano: "1000000" - sum: 0.001469642 + sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: apiserver_delegated_authz_request_duration_seconds - - description: '[STABLE] Time between when a cronjob is scheduled to be run, and when the corresponding job is created' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "0" + - attributes: + - key: name + value: + stringValue: endpoint + bucketCounts: - "0" - "0" - "0" + - "1" + - "2" + - "2" + - "9" - "0" + - "1" - "0" - "0" + count: "15" explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 - 1 - - 2 - - 4 - - 8 - - 16 - - 32 - - 64 - - 128 - - 256 - - 512 - startTimeUnixNano: "1000000" - sum: 0 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: cronjob_controller_job_creation_skew_duration_seconds - - description: '[ALPHA] How long in seconds processing an item from workqueue takes.' - histogram: - aggregationTemporality: 2 - dataPoints: + - 10 + startTimeUnixNano: "1000000" + sum: 0.6311449450000001 + timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: ClusterRoleAggregator + stringValue: endpoint_slice bucketCounts: - "0" - "0" - "0" - "0" + - "3" + - "7" + - "10" - "0" - - "43" - - "12" - - "5" - - "0" + - "1" - "0" - "0" - count: "60" + count: "21" explicitBounds: - 1e-08 - 1e-07 @@ -2543,25 +2465,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.36295727299999997 + sum: 0.38902778799999993 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: DynamicCABundle-client-ca-bundle + stringValue: endpoint_slice_mirroring bucketCounts: - "0" - "0" - "0" - "0" - - "1" + - "11" - "0" - "0" - "0" - "0" - "0" - "0" - count: "1" + count: "11" explicitBounds: - 1e-08 - 1e-07 @@ -2574,25 +2496,24 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 2.676e-05 + sum: 0.000194382 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: DynamicCABundle-csr-controller + stringValue: ephemeral_volume bucketCounts: - "0" - "0" - "0" - "0" - "0" - - "4" - "0" - "0" - "0" - "0" - "0" - count: "4" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -2605,25 +2526,24 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.000538945 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: DynamicCABundle-request-header + stringValue: garbage_collector_attempt_to_delete bucketCounts: - "0" - "0" - "0" - "0" - "0" - - "1" - "0" - "0" - "0" - "0" - "0" - count: "1" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -2636,25 +2556,24 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.0001444 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: DynamicServingCertificateController + stringValue: garbage_collector_attempt_to_orphan bucketCounts: - "0" - "0" - "0" - "0" - - "3" - "0" - "0" - "0" - "0" - "0" - "0" - count: "3" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -2667,25 +2586,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 4.7648e-05 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: bootstrap_signer_queue + stringValue: garbage_collector_graph_changes bucketCounts: - "0" - "0" - - "0" - - "0" - - "1" + - "5" + - "586" + - "81" + - "4" - "0" - "0" - "0" - - "1" - "0" - "0" - count: "2" + count: "676" explicitBounds: - 1e-08 - 1e-07 @@ -2698,25 +2617,24 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.604411193 + sum: 0.005441226 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: certificate + stringValue: horizontalpodautoscaler bucketCounts: - "0" - "0" - "0" - - "21" - - "5" - - "2" - - "1" - - "1" - "0" - "0" - "0" - count: "30" + - "0" + - "0" + - "0" + - "0" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -2729,12 +2647,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.021059340999999992 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: claims + stringValue: job bucketCounts: - "0" - "0" @@ -2764,7 +2682,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: cronjob + stringValue: job_orphan_pod bucketCounts: - "0" - "0" @@ -2794,20 +2712,19 @@ resourceMetrics: - attributes: - key: name value: - stringValue: daemonset + stringValue: namespace bucketCounts: - "0" - "0" - "0" - "0" - "0" - - "8" - - "3" - - "3" - - "2" - "0" - "0" - count: "16" + - "0" + - "0" + - "0" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -2820,25 +2737,24 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.9574482689999999 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: deployment + stringValue: node bucketCounts: - "0" - "0" - "0" - "0" - "0" - - "18" - - "18" - - "6" - - "2" - "0" - "0" - count: "44" + - "0" + - "0" + - "0" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -2851,24 +2767,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 1.4226627360000006 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: disruption + stringValue: node_lifecycle_controller bucketCounts: - "0" - "0" - "0" + - "6" + - "1" - "0" + - "1" - "0" - "0" - "0" - "0" - - "0" - - "0" - - "0" + count: "8" explicitBounds: - 1e-08 - 1e-07 @@ -2881,24 +2798,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.006148064 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: disruption_recheck + stringValue: node_lifecycle_controller_pods bucketCounts: - "0" - "0" - "0" - "0" + - "9" - "0" - "0" + - "4" - "0" - "0" - "0" - - "0" - - "0" + count: "13" explicitBounds: - 1e-08 - 1e-07 @@ -2911,25 +2829,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.05713500600000001 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: endpoint + stringValue: noexec_taint_node bucketCounts: - "0" - "0" - "0" - "0" - - "5" + - "2" + - "1" - "0" - - "9" - "0" - - "1" - "0" - "0" - count: "15" + - "0" + count: "3" explicitBounds: - 1e-08 - 1e-07 @@ -2942,22 +2860,22 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.633880887 + sum: 0.000260769 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: endpoint_slice + stringValue: noexec_taint_pod bucketCounts: - "0" - "0" - "0" - - "0" + - "16" + - "5" - "1" - - "11" - - "9" - "0" - - "1" + - "0" + - "0" - "0" - "0" count: "22" @@ -2973,25 +2891,24 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.537446789 + sum: 0.00032868299999999997 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: endpoint_slice_mirroring + stringValue: orphaned_pods_nodes bucketCounts: - "0" - "0" - "0" - "0" - - "10" - - "1" - "0" - "0" - "0" - "0" - "0" - count: "11" + - "0" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -3004,12 +2921,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.000382724 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: ephemeral_volume + stringValue: pvcprotection bucketCounts: - "0" - "0" @@ -3039,7 +2956,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: garbage_collector_attempt_to_delete + stringValue: pvcs bucketCounts: - "0" - "0" @@ -3047,12 +2964,11 @@ resourceMetrics: - "0" - "0" - "0" - - "1" - - "1" - "0" - "0" - "0" - count: "2" + - "0" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -3065,12 +2981,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.017845415 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: garbage_collector_attempt_to_orphan + stringValue: pvprotection bucketCounts: - "0" - "0" @@ -3100,20 +3016,20 @@ resourceMetrics: - attributes: - key: name value: - stringValue: garbage_collector_graph_changes + stringValue: replicaset bucketCounts: - "0" - "0" - "0" - - "557" - - "90" - - "7" - - "1" - - "0" - "0" + - "23" + - "1" + - "8" + - "7" + - "2" - "0" - "0" - count: "655" + count: "41" explicitBounds: - 1e-08 - 1e-07 @@ -3126,12 +3042,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.008850208999999998 + sum: 0.6981377609999999 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: horizontalpodautoscaler + stringValue: replicationmanager bucketCounts: - "0" - "0" @@ -3161,7 +3077,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: job + stringValue: resource_quota_controller_resource_changes bucketCounts: - "0" - "0" @@ -3191,7 +3107,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: job_orphan_pod + stringValue: resourcequota_primary bucketCounts: - "0" - "0" @@ -3221,7 +3137,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: namespace + stringValue: resourcequota_priority bucketCounts: - "0" - "0" @@ -3251,7 +3167,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: node + stringValue: root_ca_cert_publisher bucketCounts: - "0" - "0" @@ -3259,11 +3175,12 @@ resourceMetrics: - "0" - "0" - "0" + - "5" - "0" + - "1" - "0" - "0" - - "0" - - "0" + count: "6" explicitBounds: - 1e-08 - 1e-07 @@ -3276,25 +3193,24 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.27884830199999994 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: node_lifecycle_controller + stringValue: service bucketCounts: - "0" - "0" - "0" - - "5" - "0" - "0" - - "1" - "0" - "0" - "0" - "0" - count: "6" + - "0" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -3307,25 +3223,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.006107007 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: node_lifecycle_controller_pods + stringValue: serviceaccount bucketCounts: - "0" - "0" - "0" - "0" - - "7" - - "2" - - "2" - - "2" - "0" - "0" + - "5" - "0" - count: "13" + - "1" + - "0" + - "0" + count: "6" explicitBounds: - 1e-08 - 1e-07 @@ -3338,25 +3254,24 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.039308103999999996 + sum: 0.56229327 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: noexec_taint_node + stringValue: serviceaccount_tokens_secret bucketCounts: - "0" - "0" - "0" - "0" - - "1" - "0" - "0" - "0" - "0" - "0" - "0" - count: "1" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -3369,25 +3284,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 1.6551e-05 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: noexec_taint_pod + stringValue: serviceaccount_tokens_service bucketCounts: - "0" - "0" - "0" - - "17" - - "5" + - "45" + - "1" - "0" - "0" - "0" - "0" - "0" - "0" - count: "22" + count: "46" explicitBounds: - 1e-08 - 1e-07 @@ -3400,12 +3315,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.000226613 + sum: 0.00026681999999999997 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: orphaned_pods_nodes + stringValue: stale_pod_disruption bucketCounts: - "0" - "0" @@ -3435,7 +3350,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: pvcprotection + stringValue: statefulset bucketCounts: - "0" - "0" @@ -3465,19 +3380,20 @@ resourceMetrics: - attributes: - key: name value: - stringValue: pvcs + stringValue: token_cleaner bucketCounts: - "0" - "0" - "0" - "0" + - "1" - "0" - "0" - "0" - "0" - "0" - "0" - - "0" + count: "1" explicitBounds: - 1e-08 - 1e-07 @@ -3490,12 +3406,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 1.2032e-05 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: pvprotection + stringValue: ttl_jobs_to_delete bucketCounts: - "0" - "0" @@ -3525,20 +3441,20 @@ resourceMetrics: - attributes: - key: name value: - stringValue: replicaset + stringValue: ttlcontroller bucketCounts: - "0" - "0" - "0" + - "2" + - "3" + - "1" + - "1" - "0" - - "22" - "1" - - "6" - - "11" - - "2" - "0" - "0" - count: "42" + count: "8" explicitBounds: - 1e-08 - 1e-07 @@ -3551,12 +3467,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.6206368890000001 + sum: 0.7625037620000001 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: replicationmanager + stringValue: volume_expand bucketCounts: - "0" - "0" @@ -3586,7 +3502,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: resource_quota_controller_resource_changes + stringValue: volumes bucketCounts: - "0" - "0" @@ -3611,14 +3527,65 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: workqueue_work_duration_seconds + - description: Distribution individual GC-related stop-the-world pause latencies. + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "5" + - "11" + - "9" + - "1" + - "0" + - "0" + count: "26" + explicitBounds: + - 9.999999999999999e-10 + - 9.999999999999999e-09 + - 9.999999999999998e-08 + - 1.0239999999999999e-06 + - 1.0239999999999999e-05 + - 0.00010239999999999998 + - 0.0010485759999999998 + - 0.010485759999999998 + - 0.10485759999999998 + startTimeUnixNano: "1000000" + sum: 0.0020879360000000003 timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: go_gc_pauses_seconds + - description: '[ALPHA] Client side rate limiter latency in seconds. Broken down by verb, and host.' + histogram: + aggregationTemporality: 2 + dataPoints: - attributes: - - key: name + - key: host value: - stringValue: resourcequota_primary + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: GET bucketCounts: + - "547" - "0" + - "58" + - "6" + - "9" + - "2" - "0" - "0" - "0" @@ -3626,28 +3593,32 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "0" - - "0" + count: "622" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0 + sum: 8.379445553 timeUnixNano: "1000000" - attributes: - - key: name + - key: host value: - stringValue: resourcequota_priority + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: PATCH bucketCounts: + - "19" - "0" - "0" - "0" @@ -3659,56 +3630,72 @@ resourceMetrics: - "0" - "0" - "0" + - "0" + count: "19" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0 + sum: 2.3911000000000005e-05 timeUnixNano: "1000000" - attributes: - - key: name + - key: host value: - stringValue: root_ca_cert_publisher + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: POST bucketCounts: + - "74" - "0" + - "25" + - "1" + - "8" - "0" - "0" - "0" - "0" - "0" - - "5" - "0" - - "1" - "0" - "0" - count: "6" + count: "108" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0.612684385 + sum: 4.528806070999999 timeUnixNano: "1000000" - attributes: - - key: name + - key: host value: - stringValue: service + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: PUT bucketCounts: + - "153" + - "0" - "0" - "0" - "0" @@ -3720,59 +3707,81 @@ resourceMetrics: - "0" - "0" - "0" + count: "153" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.0002785 timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: rest_client_rate_limiter_duration_seconds + - description: '[ALPHA] Response size in bytes. Broken down by verb and host.' + histogram: + aggregationTemporality: 2 + dataPoints: - attributes: - - key: name + - key: host value: - stringValue: serviceaccount + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: GET bucketCounts: + - "19" + - "190" + - "288" + - "59" + - "55" + - "8" + - "3" - "0" - "0" - "0" - "0" - "0" - - "0" - - "5" - - "0" - - "1" - - "0" - - "0" - count: "6" + count: "622" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 + - 64 + - 256 + - 512 + - 1024 + - 4096 + - 16384 + - 65536 + - 262144 + - 1.048576e+06 + - 4.194304e+06 + - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 0.31081374599999995 + sum: 490293 timeUnixNano: "1000000" - attributes: - - key: name + - key: host value: - stringValue: serviceaccount_tokens_secret + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: PATCH bucketCounts: - "0" - "0" - "0" + - "1" + - "18" - "0" - "0" - "0" @@ -3780,87 +3789,109 @@ resourceMetrics: - "0" - "0" - "0" - - "0" + count: "19" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 + - 64 + - 256 + - 512 + - 1024 + - 4096 + - 16384 + - 65536 + - 262144 + - 1.048576e+06 + - 4.194304e+06 + - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 0 + sum: 60888 timeUnixNano: "1000000" - attributes: - - key: name + - key: host value: - stringValue: serviceaccount_tokens_service + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: POST bucketCounts: - "0" - - "0" - - "0" - - "42" - - "4" + - "38" + - "1" + - "27" + - "40" + - "2" - "0" - "0" - "0" - "0" - "0" - "0" - count: "46" + count: "108" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 + - 64 + - 256 + - 512 + - 1024 + - 4096 + - 16384 + - 65536 + - 262144 + - 1.048576e+06 + - 4.194304e+06 + - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 0.0004029439999999999 + sum: 115762 timeUnixNano: "1000000" - attributes: - - key: name + - key: host + value: + stringValue: 172.18.0.2:6443 + - key: verb value: - stringValue: stale_pod_disruption + stringValue: PUT bucketCounts: - "0" + - "4" + - "83" + - "7" + - "35" + - "24" - "0" - "0" - "0" - "0" - "0" - "0" + count: "153" + explicitBounds: + - 64 + - 256 + - 512 + - 1024 + - 4096 + - 16384 + - 65536 + - 262144 + - 1.048576e+06 + - 4.194304e+06 + - 1.6777216e+07 + startTimeUnixNano: "1000000" + sum: 284944 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: rest_client_response_size_bytes + - description: '[ALPHA] Number of endpoints removed on each Service sync' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "20" - "0" - "0" - "0" - "0" - explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 - startTimeUnixNano: "1000000" - sum: 0 - timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: statefulset - bucketCounts: - "0" - "0" - "0" @@ -3872,56 +3903,41 @@ resourceMetrics: - "0" - "0" - "0" + count: "20" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 + - 2 + - 4 + - 8 + - 16 + - 32 + - 64 + - 128 + - 256 + - 512 + - 1024 + - 2048 + - 4096 + - 8192 + - 16384 + - 32768 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: token_cleaner - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "1" - - "0" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: endpoint_slice_controller_endpoints_removed_per_sync + - description: '[ALPHA] A metric measuring the latency for nodesync which updates loadbalancer hosts on cluster node updates.' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: - "0" - "0" - "0" - "0" - "0" - count: "1" - explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 - startTimeUnixNano: "1000000" - sum: 1.2334e-05 - timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: ttl_jobs_to_delete - bucketCounts: - "0" - "0" - "0" @@ -3934,58 +3950,85 @@ resourceMetrics: - "0" - "0" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - 1 - - 10 + - 2 + - 4 + - 8 + - 16 + - 32 + - 64 + - 128 + - 256 + - 512 + - 1024 + - 2048 + - 4096 + - 8192 + - 16384 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: service_controller_nodesync_latency_seconds + - description: '[ALPHA] Request latency in seconds. Broken down by verb, and host.' + histogram: + aggregationTemporality: 2 + dataPoints: - attributes: - - key: name + - key: host value: - stringValue: ttlcontroller + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: GET bucketCounts: + - "328" + - "215" + - "58" + - "9" + - "9" + - "2" - "0" - - "0" - - "0" - - "4" - "1" - "0" - "0" - "0" - - "1" - "0" - "0" - count: "6" + count: "622" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0.455270024 + sum: 15.359288399000006 timeUnixNano: "1000000" - attributes: - - key: name + - key: host value: - stringValue: volume_expand + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: PATCH bucketCounts: + - "4" + - "11" - "0" - "0" - "0" + - "4" - "0" - "0" - "0" @@ -3993,26 +4036,37 @@ resourceMetrics: - "0" - "0" - "0" - - "0" + count: "19" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0 + sum: 2.823510979 timeUnixNano: "1000000" - attributes: - - key: name + - key: host value: - stringValue: volumes + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: POST bucketCounts: + - "46" + - "18" + - "25" + - "1" + - "12" + - "6" - "0" - "0" - "0" @@ -4020,66 +4074,88 @@ resourceMetrics: - "0" - "0" - "0" + count: "108" + explicitBounds: + - 0.005 + - 0.025 + - 0.1 + - 0.25 + - 0.5 + - 1 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 + startTimeUnixNano: "1000000" + sum: 10.172680351999995 + timeUnixNano: "1000000" + - attributes: + - key: host + value: + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: PUT + bucketCounts: + - "88" + - "61" + - "3" - "0" - "0" + - "1" - "0" - "0" - explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - "0" + - "0" + - "0" + - "0" + - "0" + count: "153" + explicitBounds: + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0 + sum: 1.696062369 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: workqueue_work_duration_seconds - - description: '[ALPHA] A metric measuring the latency for updating each load balancer hosts.' + name: rest_client_request_duration_seconds + - description: '[ALPHA] Number of endpoints added on each Service sync' histogram: aggregationTemporality: 2 dataPoints: - - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" + - attributes: + - key: clusterCIDR + value: + stringValue: 10.244.0.0/16 + bucketCounts: + - "1" - "0" - "0" - "0" - "0" - "0" + count: "1" explicitBounds: - 1 - - 2 - - 4 - - 8 - - 16 - - 32 - - 64 - - 128 - - 256 - - 512 - - 1024 - - 2048 - - 4096 - - 8192 - - 16384 + - 5 + - 25 + - 125 + - 625 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" @@ -4087,13 +4163,12 @@ resourceMetrics: - key: prometheus.type value: stringValue: histogram - name: service_controller_update_loadbalancer_host_latency_seconds - - description: '[ALPHA] Number of endpoints removed on each Service sync' + name: node_ipam_controller_cidrset_allocation_tries_per_request + - description: '[ALPHA] Latencies in seconds of data encryption key(DEK) generation operations.' histogram: aggregationTemporality: 2 dataPoints: - bucketCounts: - - "21" - "0" - "0" - "0" @@ -4109,23 +4184,21 @@ resourceMetrics: - "0" - "0" - "0" - count: "21" explicitBounds: - - 2 - - 4 - - 8 - - 16 - - 32 - - 64 - - 128 - - 256 - - 512 - - 1024 - - 2048 - - 4096 - - 8192 - - 16384 - - 32768 + - 5e-06 + - 1e-05 + - 2e-05 + - 4e-05 + - 8e-05 + - 0.00016 + - 0.00032 + - 0.00064 + - 0.00128 + - 0.00256 + - 0.00512 + - 0.01024 + - 0.02048 + - 0.04096 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" @@ -4133,7 +4206,7 @@ resourceMetrics: - key: prometheus.type value: stringValue: histogram - name: endpoint_slice_controller_endpoints_removed_per_sync + name: apiserver_storage_data_key_generation_duration_seconds - description: '[ALPHA] Number of EndpointSlices changed on each Service sync' histogram: aggregationTemporality: 2 @@ -4143,7 +4216,7 @@ resourceMetrics: value: stringValue: Disabled bucketCounts: - - "11" + - "10" - "0" - "0" - "0" @@ -4155,7 +4228,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "21" + count: "20" explicitBounds: - 0.005 - 0.01 @@ -4176,69 +4249,24 @@ resourceMetrics: value: stringValue: histogram name: endpoint_slice_controller_endpointslices_changed_per_sync - - description: '[ALPHA] A metric measuring the latency for nodesync which updates loadbalancer hosts on cluster node updates.' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - explicitBounds: - - 1 - - 2 - - 4 - - 8 - - 16 - - 32 - - 64 - - 128 - - 256 - - 512 - - 1024 - - 2048 - - 4096 - - 8192 - - 16384 - startTimeUnixNano: "1000000" - sum: 0 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: service_controller_nodesync_latency_seconds - - description: Distribution of heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. + - description: Distribution of freed heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. histogram: aggregationTemporality: 2 dataPoints: - bucketCounts: - - "11259" - - "280582" - - "162977" - - "109770" - - "93334" - - "17440" - - "5907" - - "2005" - - "3132" - - "400" - - "248" - - "188" - count: "687242" + - "7043" + - "213990" + - "102799" + - "79580" + - "40215" + - "10751" + - "4301" + - "1479" + - "1544" + - "221" + - "48" + - "78" + count: "462049" explicitBounds: - 8.999999999999998 - 24.999999999999996 @@ -4252,50 +4280,14 @@ resourceMetrics: - 13568.999999999998 - 27264.999999999996 startTimeUnixNano: "1000000" - sum: 1.01464848e+08 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: go_gc_heap_allocs_by_size_bytes - - description: '[ALPHA] Request latency in seconds. Broken down by status code.' - histogram: - aggregationTemporality: 2 - dataPoints: - - attributes: - - key: code - value: - stringValue: "201" - bucketCounts: - - "1" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - count: "1" - explicitBounds: - - 0.25 - - 0.5 - - 0.7 - - 1 - - 1.5 - - 3 - - 5 - - 10 - startTimeUnixNano: "1000000" - sum: 0.005527615 + sum: 5.5756328e+07 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: apiserver_delegated_authn_request_duration_seconds - - description: '[ALPHA] Latencies in seconds of data encryption key(DEK) generation operations.' + name: go_gc_heap_frees_by_size_bytes + - description: '[ALPHA] The time it took to delete the job since it became eligible for deletion' histogram: aggregationTemporality: 2 dataPoints: @@ -4316,20 +4308,20 @@ resourceMetrics: - "0" - "0" explicitBounds: - - 5e-06 - - 1e-05 - - 2e-05 - - 4e-05 - - 8e-05 - - 0.00016 - - 0.00032 - - 0.00064 - - 0.00128 - - 0.00256 - - 0.00512 - - 0.01024 - - 0.02048 - - 0.04096 + - 0.1 + - 0.2 + - 0.4 + - 0.8 + - 1.6 + - 3.2 + - 6.4 + - 12.8 + - 25.6 + - 51.2 + - 102.4 + - 204.8 + - 409.6 + - 819.2 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" @@ -4337,58 +4329,35 @@ resourceMetrics: - key: prometheus.type value: stringValue: histogram - name: apiserver_storage_data_key_generation_duration_seconds - - description: '[ALPHA] Number of namespace syncs happened in root ca cert publisher.' + name: ttl_after_finished_controller_job_deletion_duration_seconds + - description: '[ALPHA] The ratio of chosen deleted pod''s ages to the current youngest pod''s age (at the time). Should be <2.The intent of this metric is to measure the rough efficacy of the LogarithmicScaleDown feature gate''s effect onthe sorting (and deletion) of pods when a replicaset scales down. This only considers Ready pods when calculating and reporting.' histogram: aggregationTemporality: 2 - dataPoints: - - attributes: - - key: code - value: - stringValue: "200" - bucketCounts: - - "0" - - "2" - - "3" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "1" - - "0" - - "0" - - "0" - - "0" - - "0" - count: "6" - explicitBounds: - - 0.001 - - 0.002 - - 0.004 - - 0.008 - - 0.016 - - 0.032 - - 0.064 - - 0.128 - - 0.256 - - 0.512 - - 1.024 - - 2.048 - - 4.096 - - 8.192 - - 16.384 + dataPoints: + - bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + explicitBounds: + - 0.25 + - 0.5 + - 1 + - 2 + - 4 + - 8 startTimeUnixNano: "1000000" - sum: 0.612652346 + sum: 0 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: root_ca_cert_publisher_sync_duration_seconds - - description: '[ALPHA] Client side rate limiter latency in seconds. Broken down by verb, and host.' + name: replicaset_controller_sorting_deletion_age_ratio + - description: '[ALPHA] Request size in bytes. Broken down by verb and host.' histogram: aggregationTemporality: 2 dataPoints: @@ -4400,12 +4369,11 @@ resourceMetrics: value: stringValue: GET bucketCounts: - - "548" + - "622" + - "0" + - "0" + - "0" - "0" - - "55" - - "9" - - "7" - - "2" - "0" - "0" - "0" @@ -4413,22 +4381,21 @@ resourceMetrics: - "0" - "0" - "0" - count: "621" + count: "622" explicitBounds: - - 0.005 - - 0.025 - - 0.1 - - 0.25 - - 0.5 - - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 64 + - 256 + - 512 + - 1024 + - 4096 + - 16384 + - 65536 + - 262144 + - 1.048576e+06 + - 4.194304e+06 + - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 8.015528284000005 + sum: 0 timeUnixNano: "1000000" - attributes: - key: host @@ -4438,35 +4405,33 @@ resourceMetrics: value: stringValue: PATCH bucketCounts: - - "16" - - "0" - - "0" - - "0" - - "0" - - "0" + - "1" + - "5" + - "1" - "0" + - "8" + - "4" - "0" - "0" - "0" - "0" - "0" - "0" - count: "16" + count: "19" explicitBounds: - - 0.005 - - 0.025 - - 0.1 - - 0.25 - - 0.5 - - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 64 + - 256 + - 512 + - 1024 + - 4096 + - 16384 + - 65536 + - 262144 + - 1.048576e+06 + - 4.194304e+06 + - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 2.3595e-05 + sum: 42850 timeUnixNano: "1000000" - attributes: - key: host @@ -4476,35 +4441,33 @@ resourceMetrics: value: stringValue: POST bucketCounts: - - "71" - - "0" + - "1" + - "54" - "26" - - "3" - - "5" - - "0" - - "0" + - "7" + - "19" + - "1" - "0" - "0" - "0" - "0" - "0" - "0" - count: "105" + count: "108" explicitBounds: - - 0.005 - - 0.025 - - 0.1 - - 0.25 - - 0.5 - - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 64 + - 256 + - 512 + - 1024 + - 4096 + - 16384 + - 65536 + - 262144 + - 1.048576e+06 + - 4.194304e+06 + - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 3.526327406 + sum: 51864 timeUnixNano: "1000000" - attributes: - key: host @@ -4514,51 +4477,79 @@ resourceMetrics: value: stringValue: PUT bucketCounts: - - "155" - - "0" - "0" - "0" + - "79" + - "10" + - "38" + - "26" - "0" - "0" - "0" - "0" - "0" - "0" + count: "153" + explicitBounds: + - 64 + - 256 + - 512 + - 1024 + - 4096 + - 16384 + - 65536 + - 262144 + - 1.048576e+06 + - 4.194304e+06 + - 1.6777216e+07 + startTimeUnixNano: "1000000" + sum: 314490 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: rest_client_request_size_bytes + - description: Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running. + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "1190" - "0" + - "306" + - "2187" + - "2614" + - "1116" + - "257" + - "50" - "0" - "0" - count: "155" + count: "7720" explicitBounds: - - 0.005 - - 0.025 - - 0.1 - - 0.25 - - 0.5 - - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 9.999999999999999e-10 + - 9.999999999999999e-09 + - 9.999999999999998e-08 + - 1.0239999999999999e-06 + - 1.0239999999999999e-05 + - 0.00010239999999999998 + - 0.0010485759999999998 + - 0.010485759999999998 + - 0.10485759999999998 startTimeUnixNano: "1000000" - sum: 0.00028543299999999987 + sum: 0.093071936 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: rest_client_rate_limiter_duration_seconds - - description: '[ALPHA] ' + name: go_sched_latencies_seconds + - description: '[ALPHA] Histogram of the number of seconds the last auth exec plugin client certificate lived before being rotated. If auth exec plugin client certificates are unused, histogram will contain no data.' histogram: aggregationTemporality: 2 dataPoints: - - attributes: - - key: status - value: - stringValue: miss - bucketCounts: - - "1" + - bucketCounts: + - "0" - "0" - "0" - "0" @@ -4570,65 +4561,72 @@ resourceMetrics: - "0" - "0" - "0" - count: "1" explicitBounds: - - 0.005 - - 0.01 - - 0.025 - - 0.05 - - 0.1 - - 0.25 - - 0.5 - - 1 - - 2.5 - - 5 - - 10 + - 600 + - 1800 + - 3600 + - 14400 + - 86400 + - 604800 + - 2.592e+06 + - 7.776e+06 + - 1.5552e+07 + - 3.1104e+07 + - 1.24416e+08 startTimeUnixNano: "1000000" - sum: 0.005 + sum: 0 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: authentication_token_cache_request_duration_seconds - - description: Distribution of freed heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. + name: rest_client_exec_plugin_certificate_rotation_age + - description: '[ALPHA] Duration of syncEndpoints() in seconds' histogram: aggregationTemporality: 2 dataPoints: - bucketCounts: - - "7415" - - "219696" - - "115065" - - "85941" - - "82939" - - "12318" - - "4816" - - "1609" - - "2940" - - "229" - - "53" - - "80" - count: "533101" + - "11" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + count: "11" explicitBounds: - - 8.999999999999998 - - 24.999999999999996 - - 64.99999999999999 - - 144.99999999999997 - - 320.99999999999994 - - 704.9999999999999 - - 1536.9999999999998 - - 3200.9999999999995 - - 6528.999999999999 - - 13568.999999999998 - - 27264.999999999996 + - 0.001 + - 0.002 + - 0.004 + - 0.008 + - 0.016 + - 0.032 + - 0.064 + - 0.128 + - 0.256 + - 0.512 + - 1.024 + - 2.048 + - 4.096 + - 8.192 + - 16.384 startTimeUnixNano: "1000000" - sum: 7.6963128e+07 + sum: 0 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: go_gc_heap_frees_by_size_bytes + name: endpoint_slice_mirroring_controller_endpoints_sync_duration scope: - name: otelcol/prometheusreceiver - version: v0.105.0 + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver + version: v0.110.0 diff --git a/functional_tests/testdata_histogram/expected/v1.26/coredns_metrics.yaml b/functional_tests/testdata_histogram/expected/v1.26/coredns_metrics.yaml index b584a6281..8a282ad22 100644 --- a/functional_tests/testdata_histogram/expected/v1.26/coredns_metrics.yaml +++ b/functional_tests/testdata_histogram/expected/v1.26/coredns_metrics.yaml @@ -18,13 +18,13 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-787d4945fb-jfz8p + stringValue: coredns-787d4945fb-wll9w - key: k8s.pod.uid value: - stringValue: 485281d9-4cec-4fa4-9047-3746fa550353 + stringValue: d9c37f34-e239-485d-8632-1a2bc1c9f239 - key: net.host.name value: - stringValue: 10.244.0.4 + stringValue: 10.244.0.2 - key: net.host.port value: stringValue: "9153" @@ -33,13 +33,13 @@ resourceMetrics: stringValue: linux - key: server.address value: - stringValue: 10.244.0.4 + stringValue: 10.244.0.2 - key: server.port value: stringValue: "9153" - key: service.instance.id value: - stringValue: 10.244.0.4:9153 + stringValue: 10.244.0.2:9153 - key: service.name value: stringValue: coredns @@ -49,14 +49,11 @@ resourceMetrics: schemaUrl: https://opentelemetry.io/schemas/1.6.1 scopeMetrics: - metrics: - - description: Size of the EDNS0 UDP buffer in bytes (64K for TCP) per zone and protocol. + - description: Histogram of the time (in seconds) each request took per zone. histogram: aggregationTemporality: 2 dataPoints: - attributes: - - key: proto - value: - stringValue: udp - key: server value: stringValue: dns://:53 @@ -64,50 +61,57 @@ resourceMetrics: value: stringValue: . bucketCounts: + - "2" - "0" - - "8" - - "1" - "0" - "0" - "0" + - "3" - "0" - "0" - "0" + - "1" + - "0" - "0" - "0" - "0" - "0" - "0" - "0" - count: "9" + count: "6" explicitBounds: - - 0 - - 100 - - 200 - - 300 - - 400 - - 511 - - 1023 - - 2047 - - 4095 - - 8291 - - 16000 - - 32000 - - 48000 - - 64000 + - 0.00025 + - 0.0005 + - 0.001 + - 0.002 + - 0.004 + - 0.008 + - 0.016 + - 0.032 + - 0.064 + - 0.128 + - 0.256 + - 0.512 + - 1.024 + - 2.048 + - 4.096 + - 8.192 startTimeUnixNano: "1000000" - sum: 653 + sum: 0.096871069 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: coredns_dns_request_size_bytes - - description: Histogram of the time (in seconds) each request took per zone. + name: coredns_dns_request_duration_seconds + - description: Size of the returned response in bytes. histogram: aggregationTemporality: 2 dataPoints: - attributes: + - key: proto + value: + stringValue: udp - key: server value: stringValue: dns://:53 @@ -115,15 +119,12 @@ resourceMetrics: value: stringValue: . bucketCounts: - - "5" - "0" - "0" - - "1" + - "4" + - "2" - "0" - - "1" - - "1" - "0" - - "1" - "0" - "0" - "0" @@ -132,44 +133,43 @@ resourceMetrics: - "0" - "0" - "0" - count: "9" + - "0" + count: "6" explicitBounds: - - 0.00025 - - 0.0005 - - 0.001 - - 0.002 - - 0.004 - - 0.008 - - 0.016 - - 0.032 - - 0.064 - - 0.128 - - 0.256 - - 0.512 - - 1.024 - - 2.048 - - 4.096 - - 8.192 + - 0 + - 100 + - 200 + - 300 + - 400 + - 511 + - 1023 + - 2047 + - 4095 + - 8291 + - 16000 + - 32000 + - 48000 + - 64000 startTimeUnixNano: "1000000" - sum: 0.080724527 + sum: 997 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: coredns_dns_request_duration_seconds + name: coredns_dns_response_size_bytes - description: Histogram of the time (in seconds) each request took. histogram: aggregationTemporality: 2 dataPoints: - bucketCounts: - "0" - - "125" + - "112" - "0" - "0" - "0" - "0" - count: "125" + count: "112" explicitBounds: - 0.00025 - 0.0025 @@ -177,14 +177,14 @@ resourceMetrics: - 0.25 - 2.5 startTimeUnixNano: "1000000" - sum: 0.06260746900000001 + sum: 0.063655658 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram name: coredns_health_request_duration_seconds - - description: Size of the returned response in bytes. + - description: Size of the EDNS0 UDP buffer in bytes (64K for TCP) per zone and protocol. histogram: aggregationTemporality: 2 dataPoints: @@ -200,9 +200,9 @@ resourceMetrics: stringValue: . bucketCounts: - "0" + - "4" + - "2" - "0" - - "8" - - "1" - "0" - "0" - "0" @@ -214,7 +214,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "9" + count: "6" explicitBounds: - 0 - 100 @@ -231,13 +231,13 @@ resourceMetrics: - 48000 - 64000 startTimeUnixNano: "1000000" - sum: 1407 + sum: 475 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: coredns_dns_response_size_bytes + name: coredns_dns_request_size_bytes - description: Histogram of the time each request took. histogram: aggregationTemporality: 2 @@ -253,7 +253,6 @@ resourceMetrics: - "0" - "0" - "0" - - "1" - "0" - "0" - "1" @@ -267,7 +266,8 @@ resourceMetrics: - "0" - "0" - "0" - count: "2" + - "0" + count: "1" explicitBounds: - 0.00025 - 0.0005 @@ -286,7 +286,7 @@ resourceMetrics: - 4.096 - 8.192 startTimeUnixNano: "1000000" - sum: 0.011487233 + sum: 0.004573306 timeUnixNano: "1000000" - attributes: - key: rcode @@ -301,11 +301,11 @@ resourceMetrics: - "0" - "0" - "0" - - "1" + - "2" - "0" - "0" - - "1" - "0" + - "1" - "0" - "0" - "0" @@ -313,7 +313,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "2" + count: "3" explicitBounds: - 0.00025 - 0.0005 @@ -332,7 +332,7 @@ resourceMetrics: - 4.096 - 8.192 startTimeUnixNano: "1000000" - sum: 0.06821759699999999 + sum: 0.09162727899999999 timeUnixNano: "1000000" metadata: - key: prometheus.type @@ -340,5 +340,5 @@ resourceMetrics: stringValue: histogram name: coredns_forward_request_duration_seconds scope: - name: otelcol/prometheusreceiver - version: v0.105.0 + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver + version: v0.110.0 diff --git a/functional_tests/testdata_histogram/expected/v1.27/controller_manager_metrics.yaml b/functional_tests/testdata_histogram/expected/v1.27/controller_manager_metrics.yaml index e275b74e2..0e8895cfe 100644 --- a/functional_tests/testdata_histogram/expected/v1.27/controller_manager_metrics.yaml +++ b/functional_tests/testdata_histogram/expected/v1.27/controller_manager_metrics.yaml @@ -21,7 +21,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 4551e654-a1fd-41f6-90ff-e5abd2c7a519 + stringValue: 98666744-e2df-46bb-9b96-e180263bd67c - key: net.host.name value: stringValue: 172.18.0.2 @@ -49,53 +49,7 @@ resourceMetrics: schemaUrl: https://opentelemetry.io/schemas/1.6.1 scopeMetrics: - metrics: - - description: '[ALPHA] Number of endpoints added on each Service sync' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "17" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - count: "17" - explicitBounds: - - 2 - - 4 - - 8 - - 16 - - 32 - - 64 - - 128 - - 256 - - 512 - - 1024 - - 2048 - - 4096 - - 8192 - - 16384 - - 32768 - startTimeUnixNano: "1000000" - sum: 4 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: endpoint_slice_controller_endpoints_added_per_sync - - description: '[ALPHA] How long in seconds processing an item from workqueue takes.' + - description: '[ALPHA] How long in seconds an item stays in workqueue before being requested.' histogram: aggregationTemporality: 2 dataPoints: @@ -107,15 +61,15 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "0" - - "42" + - "21" + - "16" + - "17" - "7" - - "6" - "3" - "0" - "0" - count: "58" + - "0" + count: "64" explicitBounds: - 1e-08 - 1e-07 @@ -128,7 +82,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 2.2749872509999998 + sum: 0.295257275 timeUnixNano: "1000000" - attributes: - key: name @@ -139,14 +93,14 @@ resourceMetrics: - "0" - "0" - "0" - - "1" + - "2" - "0" - "0" - "0" - "0" - "0" - "0" - count: "1" + count: "2" explicitBounds: - 1e-08 - 1e-07 @@ -159,7 +113,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 3.1569e-05 + sum: 0.00010025799999999999 timeUnixNano: "1000000" - attributes: - key: name @@ -170,14 +124,14 @@ resourceMetrics: - "0" - "0" - "0" + - "4" - "0" - - "5" - "0" - "0" - "0" - "0" - "0" - count: "5" + count: "4" explicitBounds: - 1e-08 - 1e-07 @@ -190,7 +144,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.0027392659999999997 + sum: 0.000211477 timeUnixNano: "1000000" - attributes: - key: name @@ -201,8 +155,8 @@ resourceMetrics: - "0" - "0" - "0" - - "1" - "0" + - "1" - "0" - "0" - "0" @@ -221,7 +175,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 2.6089e-05 + sum: 0.000152577 timeUnixNano: "1000000" - attributes: - key: name @@ -231,8 +185,8 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "3" + - "1" + - "2" - "0" - "0" - "0" @@ -252,7 +206,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 5.286e-05 + sum: 3.6178e-05 timeUnixNano: "1000000" - attributes: - key: name @@ -262,14 +216,14 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - "1" - "0" - "0" - "0" - - "1" - "0" - "0" + - "1" + - "0" count: "2" explicitBounds: - 1e-08 @@ -283,7 +237,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.253246015 + sum: 4.000332361 timeUnixNano: "1000000" - attributes: - key: name @@ -293,12 +247,12 @@ resourceMetrics: - "0" - "0" - "0" - - "17" - - "8" - - "3" + - "18" - "0" - "2" - "0" + - "10" + - "0" - "0" - "0" count: "30" @@ -314,7 +268,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.039911327 + sum: 0.6513473890000003 timeUnixNano: "1000000" - attributes: - key: name @@ -384,11 +338,11 @@ resourceMetrics: - "0" - "0" - "0" + - "7" - "0" - - "0" - - "10" - "3" - "2" + - "3" - "2" - "0" - "0" @@ -405,7 +359,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 1.6081393639999995 + sum: 0.627224736 timeUnixNano: "1000000" - attributes: - key: name @@ -415,15 +369,15 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "0" - - "18" - - "13" - - "9" + - "20" + - "2" + - "7" + - "5" + - "5" - "2" - "0" - "0" - count: "42" + count: "41" explicitBounds: - 1e-08 - 1e-07 @@ -436,7 +390,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 1.353375918 + sum: 0.610048157 timeUnixNano: "1000000" - attributes: - key: name @@ -506,12 +460,12 @@ resourceMetrics: - "0" - "0" - "0" + - "8" + - "4" - "0" - - "5" - "0" - - "8" - "0" - - "1" + - "2" - "0" - "0" count: "14" @@ -527,7 +481,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.5380258259999999 + sum: 0.4863371810000001 timeUnixNano: "1000000" - attributes: - key: name @@ -537,15 +491,15 @@ resourceMetrics: - "0" - "0" - "0" + - "16" + - "2" + - "0" - "0" - - "5" - - "4" - - "8" - "0" - - "1" + - "2" - "0" - "0" - count: "18" + count: "20" explicitBounds: - 1e-08 - 1e-07 @@ -558,7 +512,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.696240538 + sum: 0.48655040499999996 timeUnixNano: "1000000" - attributes: - key: name @@ -568,11 +522,11 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "10" + - "9" - "0" - "0" - "0" + - "1" - "0" - "0" - "0" @@ -589,7 +543,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.000265928 + sum: 0.042640992 timeUnixNano: "1000000" - attributes: - key: name @@ -632,12 +586,12 @@ resourceMetrics: - "0" - "0" - "0" - - "1" - "0" - "0" + - "2" - "0" - "0" - count: "1" + count: "2" explicitBounds: - 1e-08 - 1e-07 @@ -650,7 +604,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.001641362 + sum: 1.246962405 timeUnixNano: "1000000" - attributes: - key: name @@ -690,15 +644,15 @@ resourceMetrics: - "0" - "0" - "3" - - "532" - - "118" - - "1" + - "406" + - "107" + - "143" - "0" - "0" - "0" - "0" - "0" - count: "654" + count: "659" explicitBounds: - 1e-08 - 1e-07 @@ -711,7 +665,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.004664540999999998 + sum: 0.05611566299999995 timeUnixNano: "1000000" - attributes: - key: name @@ -871,15 +825,15 @@ resourceMetrics: - "0" - "0" - "0" - - "5" + - "4" - "1" - "0" - - "1" - "0" - "0" + - "1" - "0" - "0" - count: "7" + count: "6" explicitBounds: - 1e-08 - 1e-07 @@ -892,7 +846,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.005471408 + sum: 0.24444672199999998 timeUnixNano: "1000000" - attributes: - key: name @@ -903,10 +857,10 @@ resourceMetrics: - "0" - "0" - "0" - - "8" - - "1" - "3" - - "1" + - "6" + - "0" + - "4" - "0" - "0" - "0" @@ -923,7 +877,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.035108433000000015 + sum: 0.24814251900000006 timeUnixNano: "1000000" - attributes: - key: name @@ -934,11 +888,11 @@ resourceMetrics: - "0" - "0" - "0" - - "1" - "0" - "0" - "0" - "0" + - "1" - "0" - "0" count: "1" @@ -954,7 +908,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 2.5497e-05 + sum: 0.244618094 timeUnixNano: "1000000" - attributes: - key: name @@ -964,11 +918,11 @@ resourceMetrics: - "0" - "0" - "0" - - "14" - - "8" + - "18" - "0" - "0" - "0" + - "4" - "0" - "0" - "0" @@ -985,7 +939,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.00029099299999999997 + sum: 0.24669473700000005 timeUnixNano: "1000000" - attributes: - key: name @@ -1115,15 +1069,15 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "18" - - "4" - - "6" - - "8" + - "20" + - "5" - "2" + - "6" + - "6" - "0" - "0" - count: "38" + - "0" + count: "39" explicitBounds: - 1e-08 - 1e-07 @@ -1136,7 +1090,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 1.0292141900000003 + sum: 0.165675845 timeUnixNano: "1000000" - attributes: - key: name @@ -1266,12 +1220,12 @@ resourceMetrics: - "0" - "0" - "0" + - "1" - "0" - "0" - "0" - - "5" - - "0" - "1" + - "4" - "0" - "0" count: "6" @@ -1287,7 +1241,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.617389953 + sum: 1.897431202 timeUnixNano: "1000000" - attributes: - key: name @@ -1327,12 +1281,12 @@ resourceMetrics: - "0" - "0" - "0" + - "1" - "0" - "0" - "0" - - "5" - - "0" - "1" + - "4" - "0" - "0" count: "6" @@ -1348,7 +1302,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.165999207 + sum: 1.70029148 timeUnixNano: "1000000" - attributes: - key: name @@ -1388,11 +1342,11 @@ resourceMetrics: - "0" - "0" - "0" - - "42" - - "4" - - "0" + - "43" + - "1" - "0" - "0" + - "2" - "0" - "0" - "0" @@ -1409,7 +1363,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.000368605 + sum: 0.18726025999999993 timeUnixNano: "1000000" - attributes: - key: name @@ -1480,8 +1434,8 @@ resourceMetrics: - "0" - "0" - "0" - - "1" - "0" + - "1" - "0" - "0" - "0" @@ -1500,7 +1454,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 2.2532e-05 + sum: 0.000262503 timeUnixNano: "1000000" - attributes: - key: name @@ -1540,14 +1494,14 @@ resourceMetrics: - "0" - "0" - "0" - - "5" - - "0" - - "0" + - "4" + - "1" - "0" - "0" - "1" - "0" - "0" + - "0" count: "6" explicitBounds: - 1e-08 @@ -1561,7 +1515,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.7069737770000001 + sum: 0.07821251100000001 timeUnixNano: "1000000" - attributes: - key: name @@ -1627,113 +1581,107 @@ resourceMetrics: - key: prometheus.type value: stringValue: histogram - name: workqueue_work_duration_seconds - - description: '[ALPHA] How long in seconds an item stays in workqueue before being requested.' + name: workqueue_queue_duration_seconds + - description: '[ALPHA] Number of endpoints added on each Service sync' histogram: aggregationTemporality: 2 dataPoints: - - attributes: - - key: name - value: - stringValue: ClusterRoleAggregator - bucketCounts: - - "0" + - bucketCounts: + - "19" - "0" - "0" - - "13" - - "10" - - "24" - - "8" - - "3" - "0" - "0" - "0" - count: "58" - explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 - startTimeUnixNano: "1000000" - sum: 0.183020271 - timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: DynamicCABundle-client-ca-bundle - bucketCounts: - "0" - "0" - "0" - "0" - - "1" - "0" - "0" - "0" - "0" - "0" - "0" - count: "1" + count: "19" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 + - 2 + - 4 + - 8 + - 16 + - 32 + - 64 + - 128 + - 256 + - 512 + - 1024 + - 2048 + - 4096 + - 8192 + - 16384 + - 32768 startTimeUnixNano: "1000000" - sum: 6.5332e-05 + sum: 4 timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: endpoint_slice_controller_endpoints_added_per_sync + - description: '[ALPHA] Request latency in seconds. Broken down by status code.' + histogram: + aggregationTemporality: 2 + dataPoints: - attributes: - - key: name + - key: code value: - stringValue: DynamicCABundle-csr-controller + stringValue: "201" bucketCounts: + - "1" - "0" - "0" - "0" - "0" - - "4" - - "1" - - "0" - "0" - "0" - "0" - "0" - count: "5" + count: "1" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 + - 0.25 + - 0.5 + - 0.7 - 1 + - 1.5 + - 3 + - 5 - 10 startTimeUnixNano: "1000000" - sum: 0.0007490169999999999 + sum: 0.001418113 timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: DynamicCABundle-request-header + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: apiserver_delegated_authz_request_duration_seconds + - description: '[ALPHA] Request latency in seconds. Broken down by verb, and host.' + histogram: + aggregationTemporality: 2 + dataPoints: + - attributes: + - key: host + value: + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: GET bucketCounts: - - "0" - - "0" - - "0" - - "0" + - "145" + - "38" + - "55" + - "12" + - "9" + - "2" - "0" - "1" - "0" @@ -1741,119 +1689,228 @@ resourceMetrics: - "0" - "0" - "0" - count: "1" + count: "262" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0.000296076 + sum: 12.900358273000002 timeUnixNano: "1000000" - attributes: - - key: name + - key: host value: - stringValue: DynamicServingCertificateController + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: PATCH bucketCounts: - - "0" + - "3" + - "7" - "0" - "0" - "1" - - "2" + - "3" - "0" - "0" - "0" - "0" - "0" - "0" - count: "3" + - "0" + count: "14" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 5.0886e-05 + sum: 1.9633367179999999 timeUnixNano: "1000000" - attributes: - - key: name + - key: host value: - stringValue: bootstrap_signer_queue + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: POST bucketCounts: + - "40" + - "17" + - "25" + - "7" + - "6" + - "6" - "0" - "0" - "0" - - "1" - - "0" - - "0" - "0" - "0" - "0" - - "1" - "0" - count: "2" + count: "101" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 3.000088145 + sum: 8.024932455000002 timeUnixNano: "1000000" - attributes: - - key: name + - key: host value: - stringValue: certificate + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: PUT bucketCounts: + - "85" + - "65" + - "1" - "0" - "0" - "0" - - "17" - - "1" - - "2" - "0" - - "10" - "0" - "0" - "0" - count: "30" + - "0" + - "0" + - "0" + count: "151" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 + startTimeUnixNano: "1000000" + sum: 0.9361699639999996 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: rest_client_request_duration_seconds + - description: Distribution of heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "11382" + - "269449" + - "149489" + - "107049" + - "64414" + - "15193" + - "5002" + - "2031" + - "2233" + - "457" + - "277" + - "189" + count: "627165" + explicitBounds: + - 8.999999999999998 + - 24.999999999999996 + - 64.99999999999999 + - 144.99999999999997 + - 320.99999999999994 + - 704.9999999999999 + - 1536.9999999999998 + - 3200.9999999999995 + - 6528.999999999999 + - 13568.999999999998 + - 27264.999999999996 startTimeUnixNano: "1000000" - sum: 0.4701162300000001 + sum: 8.7816656e+07 timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: go_gc_heap_allocs_by_size_bytes + - description: Distribution of freed heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "7459" + - "198567" + - "94569" + - "78868" + - "53185" + - "9613" + - "3970" + - "1526" + - "2016" + - "269" + - "70" + - "79" + count: "450191" + explicitBounds: + - 8.999999999999998 + - 24.999999999999996 + - 64.99999999999999 + - 144.99999999999997 + - 320.99999999999994 + - 704.9999999999999 + - 1536.9999999999998 + - 3200.9999999999995 + - 6528.999999999999 + - 13568.999999999998 + - 27264.999999999996 + startTimeUnixNano: "1000000" + sum: 6.1027424e+07 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: go_gc_heap_frees_by_size_bytes + - description: '[ALPHA] Authentication duration in seconds broken out by result.' + histogram: + aggregationTemporality: 2 + dataPoints: - attributes: - - key: name + - key: result value: - stringValue: claims + stringValue: success bucketCounts: + - "15" - "0" - "0" - "0" @@ -1865,25 +1922,149 @@ resourceMetrics: - "0" - "0" - "0" + - "0" + - "0" + - "0" + - "0" + count: "15" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - 0.001 - - 0.01 - - 0.1 + - 0.002 + - 0.004 + - 0.008 + - 0.016 + - 0.032 + - 0.064 + - 0.128 + - 0.256 + - 0.512 + - 1.024 + - 2.048 + - 4.096 + - 8.192 + - 16.384 + startTimeUnixNano: "1000000" + sum: 0.0005649819999999999 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: authentication_duration_seconds + - description: '[ALPHA] Histogram of the number of seconds the last auth exec plugin client certificate lived before being rotated. If auth exec plugin client certificates are unused, histogram will contain no data.' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + explicitBounds: + - 600 + - 1800 + - 3600 + - 14400 + - 86400 + - 604800 + - 2.592e+06 + - 7.776e+06 + - 1.5552e+07 + - 3.1104e+07 + - 1.24416e+08 + startTimeUnixNano: "1000000" + sum: 0 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: rest_client_exec_plugin_certificate_rotation_age + - description: '[ALPHA] A metric measuring the latency for nodesync which updates loadbalancer hosts on cluster node updates.' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + explicitBounds: - 1 - - 10 + - 2 + - 4 + - 8 + - 16 + - 32 + - 64 + - 128 + - 256 + - 512 + - 1024 + - 2048 + - 4096 + - 8192 + - 16384 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: cronjob - bucketCounts: + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: service_controller_nodesync_latency_seconds + - description: '[ALPHA] The ratio of chosen deleted pod''s ages to the current youngest pod''s age (at the time). Should be <2.The intent of this metric is to measure the rough efficacy of the LogarithmicScaleDown feature gate''s effect onthe sorting (and deletion) of pods when a replicaset scales down. This only considers Ready pods when calculating and reporting.' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + explicitBounds: + - 0.25 + - 0.5 + - 1 + - 2 + - 4 + - 8 + startTimeUnixNano: "1000000" + sum: 0 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: replicaset_controller_sorting_deletion_age_ratio + - description: '[STABLE] Time between when a cronjob is scheduled to be run, and when the corresponding job is created' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: - "0" - "0" - "0" @@ -1896,89 +2077,155 @@ resourceMetrics: - "0" - "0" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - 1 - - 10 + - 2 + - 4 + - 8 + - 16 + - 32 + - 64 + - 128 + - 256 + - 512 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: daemonset - bucketCounts: + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: cronjob_controller_job_creation_skew_duration_seconds + - description: '[ALPHA] The time it took to delete the job since it became eligible for deletion' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" - "0" - "0" - "0" - - "9" - - "1" - - "1" - - "2" - - "4" - "0" - "0" - "0" - count: "17" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - 0.1 - - 1 - - 10 + - 0.2 + - 0.4 + - 0.8 + - 1.6 + - 3.2 + - 6.4 + - 12.8 + - 25.6 + - 51.2 + - 102.4 + - 204.8 + - 409.6 + - 819.2 startTimeUnixNano: "1000000" - sum: 0.197769057 + sum: 0 timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: ttl_after_finished_controller_job_deletion_duration_seconds + - description: '[ALPHA] Duration of syncEndpoints() in seconds' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "10" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + count: "10" + explicitBounds: + - 0.001 + - 0.002 + - 0.004 + - 0.008 + - 0.016 + - 0.032 + - 0.064 + - 0.128 + - 0.256 + - 0.512 + - 1.024 + - 2.048 + - 4.096 + - 8.192 + - 16.384 + startTimeUnixNano: "1000000" + sum: 0 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: endpoint_slice_mirroring_controller_endpoints_sync_duration + - description: '[ALPHA] Request latency in seconds. Broken down by status code.' + histogram: + aggregationTemporality: 2 + dataPoints: - attributes: - - key: name + - key: code value: - stringValue: deployment + stringValue: "201" bucketCounts: + - "1" + - "0" - "0" - "0" - "0" - - "17" - - "3" - - "8" - - "8" - - "6" - "0" - "0" - "0" - count: "42" + - "0" + count: "1" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 + - 0.25 + - 0.5 + - 0.7 - 1 + - 1.5 + - 3 + - 5 - 10 startTimeUnixNano: "1000000" - sum: 0.18511068899999997 + sum: 0.008222015 timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: disruption - bucketCounts: - - "0" - - "0" - - "0" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: apiserver_delegated_authn_request_duration_seconds + - description: '[ALPHA] Duration in seconds for NodeController to update the health of a single node.' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "28" - "0" - "0" - "0" @@ -1987,28 +2234,30 @@ resourceMetrics: - "0" - "0" - "0" + count: "28" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 + - 0.004 + - 0.016 + - 0.064 + - 0.256 + - 1.024 + - 4.096 + - 16.384 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.002360085 timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: disruption_recheck - bucketCounts: - - "0" - - "0" - - "0" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: node_collector_update_node_health_duration_seconds + - description: '[ALPHA] Duration in seconds for NodeController to update the health of all nodes.' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "28" - "0" - "0" - "0" @@ -2017,148 +2266,197 @@ resourceMetrics: - "0" - "0" - "0" + count: "28" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - 0.01 - - 0.1 - - 1 - - 10 + - 0.04 + - 0.16 + - 0.64 + - 2.56 + - 10.24 + - 40.96 + - 163.84 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.0037354729999999987 timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: endpoint - bucketCounts: + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: node_collector_update_all_nodes_health_duration_seconds + - description: '[ALPHA] Number of endpoints removed on each Service sync' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "19" - "0" - "0" - "0" - - "12" - "0" - "0" - "0" - - "2" - "0" - "0" - "0" - count: "14" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + count: "19" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 + - 2 + - 4 + - 8 + - 16 + - 32 + - 64 + - 128 + - 256 + - 512 + - 1024 + - 2048 + - 4096 + - 8192 + - 16384 + - 32768 startTimeUnixNano: "1000000" - sum: 0.051047587 + sum: 0 timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: endpoint_slice_controller_endpoints_removed_per_sync + - description: '[ALPHA] Client side rate limiter latency in seconds. Broken down by verb, and host.' + histogram: + aggregationTemporality: 2 + dataPoints: - attributes: - - key: name + - key: host value: - stringValue: endpoint_slice + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: GET bucketCounts: + - "188" - "0" + - "58" + - "6" + - "8" + - "2" - "0" - "0" - - "13" - - "3" - "0" - "0" - - "2" - "0" - "0" - "0" - count: "18" + count: "262" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0.14803129499999998 + sum: 8.110631165000001 timeUnixNano: "1000000" - attributes: - - key: name + - key: host value: - stringValue: endpoint_slice_mirroring + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: PATCH bucketCounts: + - "14" - "0" - "0" - "0" - - "8" - - "1" - "0" - "0" - - "1" - "0" - "0" - "0" - count: "10" + - "0" + - "0" + - "0" + - "0" + count: "14" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0.024042234 + sum: 1.7074999999999998e-05 timeUnixNano: "1000000" - attributes: - - key: name + - key: host value: - stringValue: ephemeral_volume + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: POST bucketCounts: + - "67" - "0" + - "26" + - "7" + - "1" - "0" - "0" - "0" - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - "0" + - "0" + - "0" + - "0" + count: "101" + explicitBounds: + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0 + sum: 3.209349308 timeUnixNano: "1000000" - attributes: - - key: name + - key: host value: - stringValue: garbage_collector_attempt_to_delete + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: PUT bucketCounts: + - "151" - "0" - "0" - "0" @@ -2167,29 +2465,41 @@ resourceMetrics: - "0" - "0" - "0" - - "1" - "0" - "0" - count: "1" + - "0" + - "0" + count: "151" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0.667118648 + sum: 0.0002683499999999999 timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: garbage_collector_attempt_to_orphan - bucketCounts: + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: rest_client_rate_limiter_duration_seconds + - description: '[ALPHA] Latencies in seconds of data encryption key(DEK) generation operations.' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "0" + - "0" + - "0" + - "0" - "0" - "0" - "0" @@ -2202,36 +2512,49 @@ resourceMetrics: - "0" - "0" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 + - 5e-06 + - 1e-05 + - 2e-05 + - 4e-05 + - 8e-05 + - 0.00016 + - 0.00032 + - 0.00064 + - 0.00128 + - 0.00256 + - 0.00512 + - 0.01024 + - 0.02048 + - 0.04096 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: apiserver_storage_data_key_generation_duration_seconds + - description: '[ALPHA] How long in seconds processing an item from workqueue takes.' + histogram: + aggregationTemporality: 2 + dataPoints: - attributes: - key: name value: - stringValue: garbage_collector_graph_changes + stringValue: ClusterRoleAggregator bucketCounts: - "0" - "0" - - "5" - - "446" - - "175" - - "28" - "0" - "0" + - "2" + - "47" + - "12" - "0" + - "3" - "0" - "0" - count: "654" + count: "64" explicitBounds: - 1e-08 - 1e-07 @@ -2244,24 +2567,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.013817855999999998 + sum: 1.7265888939999998 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: horizontalpodautoscaler + stringValue: DynamicCABundle-client-ca-bundle bucketCounts: - "0" - "0" - "0" - "0" + - "1" + - "1" - "0" - "0" - "0" - "0" - "0" - - "0" - - "0" + count: "2" explicitBounds: - 1e-08 - 1e-07 @@ -2274,24 +2598,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.000248688 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: job + stringValue: DynamicCABundle-csr-controller bucketCounts: - "0" - "0" - "0" - "0" - "0" + - "4" - "0" - "0" - "0" - "0" - "0" - - "0" + count: "4" explicitBounds: - 1e-08 - 1e-07 @@ -2304,24 +2629,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.00222831 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: job_orphan_pod + stringValue: DynamicCABundle-request-header bucketCounts: - "0" - "0" - "0" - "0" - "0" + - "1" - "0" - "0" - "0" - "0" - "0" - - "0" + count: "1" explicitBounds: - 1e-08 - 1e-07 @@ -2334,24 +2660,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.00017005 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: namespace + stringValue: DynamicServingCertificateController bucketCounts: - "0" - "0" - "0" - "0" + - "3" - "0" - "0" - "0" - "0" - "0" - "0" - - "0" + count: "3" explicitBounds: - 1e-08 - 1e-07 @@ -2364,24 +2691,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 6.6034e-05 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: node + stringValue: bootstrap_signer_queue bucketCounts: - "0" - "0" - "0" - "0" + - "1" - "0" - "0" + - "1" - "0" - "0" - "0" - - "0" - - "0" + count: "2" explicitBounds: - 1e-08 - 1e-07 @@ -2394,25 +2722,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.013977604000000001 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: node_lifecycle_controller + stringValue: certificate bucketCounts: - "0" - "0" - "0" - - "5" + - "20" + - "6" + - "2" - "1" - - "0" - - "0" - "1" - "0" - "0" - "0" - count: "7" + count: "30" explicitBounds: - 1e-08 - 1e-07 @@ -2425,25 +2753,24 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.09081987100000002 + sum: 0.023804006000000003 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: node_lifecycle_controller_pods + stringValue: claims bucketCounts: - "0" - "0" - "0" - - "1" - - "6" - - "2" - "0" - "0" - - "4" - "0" - "0" - count: "13" + - "0" + - "0" + - "0" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -2456,12 +2783,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.41269182300000007 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: noexec_taint_node + stringValue: cronjob bucketCounts: - "0" - "0" @@ -2470,11 +2797,10 @@ resourceMetrics: - "0" - "0" - "0" - - "1" - "0" - "0" - "0" - count: "1" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -2487,25 +2813,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.090718788 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: noexec_taint_pod + stringValue: daemonset bucketCounts: - "0" - "0" - "0" - - "18" - - "0" - - "0" - "0" - "0" + - "8" + - "3" - "4" + - "2" - "0" - "0" - count: "22" + count: "17" explicitBounds: - 1e-08 - 1e-07 @@ -2518,24 +2844,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.40950471899999996 + sum: 1.3178790480000002 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: orphaned_pods_nodes + stringValue: deployment bucketCounts: - "0" - "0" - "0" - "0" - "0" + - "16" + - "15" + - "8" + - "2" - "0" - "0" - - "0" - - "0" - - "0" - - "0" + count: "41" explicitBounds: - 1e-08 - 1e-07 @@ -2548,12 +2875,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 1.454848659 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: pvcprotection + stringValue: disruption bucketCounts: - "0" - "0" @@ -2583,7 +2910,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: pvcs + stringValue: disruption_recheck bucketCounts: - "0" - "0" @@ -2613,19 +2940,20 @@ resourceMetrics: - attributes: - key: name value: - stringValue: pvprotection + stringValue: endpoint bucketCounts: - "0" - "0" - "0" - "0" + - "5" - "0" + - "8" - "0" + - "1" - "0" - "0" - - "0" - - "0" - - "0" + count: "14" explicitBounds: - 1e-08 - 1e-07 @@ -2638,25 +2966,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.5369967590000002 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: replicaset + stringValue: endpoint_slice bucketCounts: - "0" - "0" - "0" - - "25" - - "2" - - "4" - - "2" - - "5" - "0" + - "2" + - "9" + - "7" + - "1" + - "1" - "0" - "0" - count: "38" + count: "20" explicitBounds: - 1e-08 - 1e-07 @@ -2669,24 +2997,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.09579040300000001 + sum: 0.599190747 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: replicationmanager + stringValue: endpoint_slice_mirroring bucketCounts: - "0" - "0" - "0" - "0" + - "9" + - "1" - "0" - "0" - "0" - "0" - "0" - - "0" - - "0" + count: "10" explicitBounds: - 1e-08 - 1e-07 @@ -2699,12 +3028,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.00031155599999999994 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: resource_quota_controller_resource_changes + stringValue: ephemeral_volume bucketCounts: - "0" - "0" @@ -2734,19 +3063,20 @@ resourceMetrics: - attributes: - key: name value: - stringValue: resourcequota_primary + stringValue: garbage_collector_attempt_to_delete bucketCounts: - "0" - "0" - "0" + - "1" - "0" - "0" + - "1" - "0" - "0" - "0" - "0" - - "0" - - "0" + count: "2" explicitBounds: - 1e-08 - 1e-07 @@ -2759,12 +3089,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.003714698 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: resourcequota_priority + stringValue: garbage_collector_attempt_to_orphan bucketCounts: - "0" - "0" @@ -2794,20 +3124,20 @@ resourceMetrics: - attributes: - key: name value: - stringValue: root_ca_cert_publisher + stringValue: garbage_collector_graph_changes bucketCounts: - "0" - "0" - "0" - - "1" + - "571" + - "84" + - "4" - "0" - "0" - "0" - - "1" - - "4" - "0" - "0" - count: "6" + count: "659" explicitBounds: - 1e-08 - 1e-07 @@ -2820,12 +3150,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 2.8579039539999997 + sum: 0.006495644000000005 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: service + stringValue: horizontalpodautoscaler bucketCounts: - "0" - "0" @@ -2855,20 +3185,19 @@ resourceMetrics: - attributes: - key: name value: - stringValue: serviceaccount + stringValue: job bucketCounts: - "0" - "0" - "0" - - "1" - "0" - "0" - "0" - - "1" - - "4" - "0" - "0" - count: "6" + - "0" + - "0" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -2881,12 +3210,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.810668624 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: serviceaccount_tokens_secret + stringValue: job_orphan_pod bucketCounts: - "0" - "0" @@ -2916,20 +3245,19 @@ resourceMetrics: - attributes: - key: name value: - stringValue: serviceaccount_tokens_service + stringValue: namespace bucketCounts: - "0" - "0" - "0" - - "37" - - "1" - - "1" - "0" - - "7" - "0" - "0" - "0" - count: "46" + - "0" + - "0" + - "0" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -2942,12 +3270,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.5948701399999999 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: stale_pod_disruption + stringValue: node bucketCounts: - "0" - "0" @@ -2977,19 +3305,20 @@ resourceMetrics: - attributes: - key: name value: - stringValue: statefulset + stringValue: node_lifecycle_controller bucketCounts: - "0" - "0" - "0" + - "4" + - "1" - "0" + - "1" - "0" - "0" - "0" - "0" - - "0" - - "0" - - "0" + count: "6" explicitBounds: - 1e-08 - 1e-07 @@ -3002,25 +3331,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.007181934 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: token_cleaner + stringValue: node_lifecycle_controller_pods bucketCounts: - "0" - "0" - "0" + - "3" + - "10" - "0" - "0" - - "1" - - "0" - "0" - "0" - "0" - "0" - count: "1" + count: "13" explicitBounds: - 1e-08 - 1e-07 @@ -3033,24 +3362,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.000197849 + sum: 0.0002907 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: ttl_jobs_to_delete + stringValue: noexec_taint_node bucketCounts: - "0" - "0" - "0" - "0" + - "1" - "0" - "0" - "0" - "0" - "0" - "0" - - "0" + count: "1" explicitBounds: - 1e-08 - 1e-07 @@ -3063,25 +3393,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 1.607e-05 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: ttlcontroller + stringValue: noexec_taint_pod bucketCounts: - "0" - "0" - "0" + - "18" - "4" - "0" - "0" - "0" - - "1" - - "1" - "0" - "0" - count: "6" + - "0" + count: "22" explicitBounds: - 1e-08 - 1e-07 @@ -3094,12 +3424,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.761797873 + sum: 0.00018095 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: volume_expand + stringValue: orphaned_pods_nodes bucketCounts: - "0" - "0" @@ -3129,7 +3459,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: volumes + stringValue: pvcprotection bucketCounts: - "0" - "0" @@ -3156,97 +3486,44 @@ resourceMetrics: startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: workqueue_queue_duration_seconds - - description: '[ALPHA] Number of endpoints added on each Service sync' - histogram: - aggregationTemporality: 2 - dataPoints: - attributes: - - key: clusterCIDR + - key: name value: - stringValue: 10.244.0.0/16 + stringValue: pvcs bucketCounts: - - "1" - "0" - "0" - "0" - "0" - "0" - count: "1" - explicitBounds: - - 1 - - 5 - - 25 - - 125 - - 625 - startTimeUnixNano: "1000000" - sum: 0 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: node_ipam_controller_cidrset_allocation_tries_per_request - - description: '[ALPHA] Request latency in seconds. Broken down by verb, and host.' - histogram: - aggregationTemporality: 2 - dataPoints: - - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb - value: - stringValue: GET - bucketCounts: - - "145" - - "36" - - "51" - - "7" - - "10" - - "2" - "0" - - "1" - "0" - "0" - "0" - "0" - "0" - count: "252" explicitBounds: - - 0.005 - - 0.025 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 - 0.1 - - 0.25 - - 0.5 - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 10 startTimeUnixNano: "1000000" - sum: 11.670188344999998 + sum: 0 timeUnixNano: "1000000" - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: PATCH + stringValue: pvprotection bucketCounts: - - "1" - - "9" - "0" - "0" - "0" - - "4" - "0" - "0" - "0" @@ -3254,113 +3531,57 @@ resourceMetrics: - "0" - "0" - "0" - count: "14" + - "0" explicitBounds: - - 0.005 - - 0.025 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 - 0.1 - - 0.25 - - 0.5 - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 10 startTimeUnixNano: "1000000" - sum: 2.957201648 + sum: 0 timeUnixNano: "1000000" - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: POST + stringValue: replicaset bucketCounts: - - "42" - - "22" - - "19" - - "6" - - "9" - - "7" - - "0" - "0" - "0" - "0" - "0" + - "17" + - "4" + - "7" + - "9" + - "2" - "0" - "0" - count: "105" + count: "39" explicitBounds: - - 0.005 - - 0.025 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 - 0.1 - - 0.25 - - 0.5 - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 10 startTimeUnixNano: "1000000" - sum: 9.74875719 + sum: 0.5417694160000001 timeUnixNano: "1000000" - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: PUT + stringValue: replicationmanager bucketCounts: - - "80" - - "61" - - "2" - - "1" - - "1" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - count: "145" - explicitBounds: - - 0.005 - - 0.025 - - 0.1 - - 0.25 - - 0.5 - - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 - startTimeUnixNano: "1000000" - sum: 1.2593861500000003 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: rest_client_request_duration_seconds - - description: '[ALPHA] The time it took to delete the job since it became eligible for deletion' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "0" - - "0" - - "0" - - "0" - "0" - "0" - "0" @@ -3373,75 +3594,58 @@ resourceMetrics: - "0" - "0" explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 - 0.1 - - 0.2 - - 0.4 - - 0.8 - - 1.6 - - 3.2 - - 6.4 - - 12.8 - - 25.6 - - 51.2 - - 102.4 - - 204.8 - - 409.6 - - 819.2 + - 1 + - 10 startTimeUnixNano: "1000000" sum: 0 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: ttl_after_finished_controller_job_deletion_duration_seconds - - description: '[ALPHA] Number of EndpointSlices changed on each Service sync' - histogram: - aggregationTemporality: 2 - dataPoints: + timeUnixNano: "1000000" - attributes: - - key: topology + - key: name value: - stringValue: Disabled + stringValue: resource_quota_controller_resource_changes bucketCounts: - - "8" - "0" - "0" - "0" - "0" - "0" - "0" - - "9" - "0" - "0" - "0" - "0" - count: "17" + - "0" explicitBounds: - - 0.005 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 - 0.01 - - 0.025 - - 0.05 - 0.1 - - 0.25 - - 0.5 - 1 - - 2.5 - - 5 - 10 startTimeUnixNano: "1000000" - sum: 9 + sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: endpoint_slice_controller_endpointslices_changed_per_sync - - description: '[ALPHA] The ratio of chosen deleted pod''s ages to the current youngest pod''s age (at the time). Should be <2.The intent of this metric is to measure the rough efficacy of the LogarithmicScaleDown feature gate''s effect onthe sorting (and deletion) of pods when a replicaset scales down. This only considers Ready pods when calculating and reporting.' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: + - attributes: + - key: name + value: + stringValue: resourcequota_primary + bucketCounts: + - "0" + - "0" + - "0" + - "0" - "0" - "0" - "0" @@ -3450,38 +3654,24 @@ resourceMetrics: - "0" - "0" explicitBounds: - - 0.25 - - 0.5 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 - 1 - - 2 - - 4 - - 8 + - 10 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: replicaset_controller_sorting_deletion_age_ratio - - description: '[ALPHA] Client side rate limiter latency in seconds. Broken down by verb, and host.' - histogram: - aggregationTemporality: 2 - dataPoints: - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: GET + stringValue: resourcequota_priority bucketCounts: - - "183" - - "1" - - "51" - - "5" - - "10" - - "2" - "0" - "0" - "0" @@ -3489,74 +3679,61 @@ resourceMetrics: - "0" - "0" - "0" - count: "252" + - "0" + - "0" + - "0" + - "0" explicitBounds: - - 0.005 - - 0.025 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 - 0.1 - - 0.25 - - 0.5 - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 10 startTimeUnixNano: "1000000" - sum: 8.264814277000001 + sum: 0 timeUnixNano: "1000000" - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: PATCH + stringValue: root_ca_cert_publisher bucketCounts: - - "14" - - "0" - - "0" - - "0" - "0" - "0" - "0" - "0" - "0" - "0" + - "5" - "0" + - "1" - "0" - "0" - count: "14" + count: "6" explicitBounds: - - 0.005 - - 0.025 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 - 0.1 - - 0.25 - - 0.5 - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 10 startTimeUnixNano: "1000000" - sum: 4.3953000000000004e-05 + sum: 0.36354317399999997 timeUnixNano: "1000000" - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: POST + stringValue: service bucketCounts: - - "74" - "0" - - "22" - - "2" - - "7" - "0" - "0" - "0" @@ -3565,76 +3742,58 @@ resourceMetrics: - "0" - "0" - "0" - count: "105" + - "0" + - "0" explicitBounds: - - 0.005 - - 0.025 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 - 0.1 - - 0.25 - - 0.5 - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 10 startTimeUnixNano: "1000000" - sum: 3.9698658590000013 + sum: 0 timeUnixNano: "1000000" - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: PUT + stringValue: serviceaccount bucketCounts: - - "145" - - "0" - - "0" - - "0" - "0" - "0" - "0" - "0" - "0" - "0" + - "5" - "0" + - "1" - "0" - "0" - count: "145" + count: "6" explicitBounds: - - 0.005 - - 0.025 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 - 0.1 - - 0.25 - - 0.5 - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 10 startTimeUnixNano: "1000000" - sum: 0.000205694 + sum: 0.313513933 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: rest_client_rate_limiter_duration_seconds - - description: '[ALPHA] Duration of syncEndpoints() in seconds' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "10" - - "0" - - "0" - - "0" - - "0" + - attributes: + - key: name + value: + stringValue: serviceaccount_tokens_secret + bucketCounts: - "0" - "0" - "0" @@ -3646,79 +3805,57 @@ resourceMetrics: - "0" - "0" - "0" - count: "10" explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 - 0.001 - - 0.002 - - 0.004 - - 0.008 - - 0.016 - - 0.032 - - 0.064 - - 0.128 - - 0.256 - - 0.512 - - 1.024 - - 2.048 - - 4.096 - - 8.192 - - 16.384 - startTimeUnixNano: "1000000" - sum: 0 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: endpoint_slice_mirroring_controller_endpoints_sync_duration - - description: '[ALPHA] Histogram of the number of seconds the last auth exec plugin client certificate lived before being rotated. If auth exec plugin client certificates are unused, histogram will contain no data.' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "0" - - "0" - - "0" + - 0.01 + - 0.1 + - 1 + - 10 + startTimeUnixNano: "1000000" + sum: 0 + timeUnixNano: "1000000" + - attributes: + - key: name + value: + stringValue: serviceaccount_tokens_service + bucketCounts: - "0" - "0" - "0" + - "45" + - "1" - "0" - "0" - "0" - "0" - "0" - "0" + count: "46" explicitBounds: - - 600 - - 1800 - - 3600 - - 14400 - - 86400 - - 604800 - - 2.592e+06 - - 7.776e+06 - - 1.5552e+07 - - 3.1104e+07 - - 1.24416e+08 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.00029685000000000003 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: rest_client_exec_plugin_certificate_rotation_age - - description: '[ALPHA] ' - histogram: - aggregationTemporality: 2 - dataPoints: - attributes: - - key: status + - key: name value: - stringValue: miss + stringValue: stale_pod_disruption bucketCounts: - "0" - - "1" - "0" - "0" - "0" @@ -3729,37 +3866,25 @@ resourceMetrics: - "0" - "0" - "0" - count: "1" explicitBounds: - - 0.005 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 - 0.01 - - 0.025 - - 0.05 - 0.1 - - 0.25 - - 0.5 - 1 - - 2.5 - - 5 - 10 startTimeUnixNano: "1000000" - sum: 0.008 + sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: authentication_token_cache_request_duration_seconds - - description: '[ALPHA] Request latency in seconds. Broken down by status code.' - histogram: - aggregationTemporality: 2 - dataPoints: - attributes: - - key: code + - key: name value: - stringValue: "201" + stringValue: statefulset bucketCounts: - - "1" - "0" - "0" - "0" @@ -3768,96 +3893,62 @@ resourceMetrics: - "0" - "0" - "0" - count: "1" + - "0" + - "0" + - "0" explicitBounds: - - 0.25 - - 0.5 - - 0.7 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 - 1 - - 1.5 - - 3 - - 5 - 10 startTimeUnixNano: "1000000" - sum: 0.001436595 + sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: apiserver_delegated_authz_request_duration_seconds - - description: '[ALPHA] Request latency in seconds. Broken down by status code.' - histogram: - aggregationTemporality: 2 - dataPoints: - attributes: - - key: code + - key: name value: - stringValue: "201" + stringValue: token_cleaner bucketCounts: - - "1" - "0" - "0" - "0" - "0" + - "1" + - "0" + - "0" - "0" - "0" - "0" - "0" count: "1" explicitBounds: - - 0.25 - - 0.5 - - 0.7 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 - 1 - - 1.5 - - 3 - - 5 - 10 startTimeUnixNano: "1000000" - sum: 0.008154843 + sum: 1.9968e-05 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: apiserver_delegated_authn_request_duration_seconds - - description: Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running. - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "946" - - "620" - - "2339" - - "1267" - - "225" - - "20" + - attributes: + - key: name + value: + stringValue: ttl_jobs_to_delete + bucketCounts: + - "0" - "0" - "0" - count: "5417" - explicitBounds: - - 6.399999999999999e-08 - - 6.399999999999999e-07 - - 7.167999999999999e-06 - - 8.191999999999999e-05 - - 0.0009175039999999999 - - 0.010485759999999998 - - 0.11744051199999998 - startTimeUnixNano: "1000000" - sum: 0.047400576 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: go_sched_latencies_seconds - - description: '[ALPHA] Duration in seconds for NodeController to update the health of all nodes.' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "27" - "0" - "0" - "0" @@ -3866,34 +3957,56 @@ resourceMetrics: - "0" - "0" - "0" - count: "27" explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 - 0.01 - - 0.04 - - 0.16 - - 0.64 - - 2.56 - - 10.24 - - 40.96 - - 163.84 + - 0.1 + - 1 + - 10 startTimeUnixNano: "1000000" - sum: 0.00313746 + sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: node_collector_update_all_nodes_health_duration_seconds - - description: '[ALPHA] A metric measuring the latency for nodesync which updates loadbalancer hosts on cluster node updates.' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: + - attributes: + - key: name + value: + stringValue: ttlcontroller + bucketCounts: + - "0" + - "0" + - "0" + - "4" + - "1" - "0" - "0" - "0" + - "1" - "0" - "0" + count: "6" + explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 + startTimeUnixNano: "1000000" + sum: 0.2581896470000001 + timeUnixNano: "1000000" + - attributes: + - key: name + value: + stringValue: volume_expand + bucketCounts: - "0" - "0" - "0" @@ -3906,34 +4019,24 @@ resourceMetrics: - "0" - "0" explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 - 1 - - 2 - - 4 - - 8 - - 16 - - 32 - - 64 - - 128 - - 256 - - 512 - - 1024 - - 2048 - - 4096 - - 8192 - - 16384 + - 10 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: service_controller_nodesync_latency_seconds - - description: '[STABLE] Time between when a cronjob is scheduled to be run, and when the corresponding job is created' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: + - attributes: + - key: name + value: + stringValue: volumes + bucketCounts: - "0" - "0" - "0" @@ -3946,16 +4049,16 @@ resourceMetrics: - "0" - "0" explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 - 1 - - 2 - - 4 - - 8 - - 16 - - 32 - - 64 - - 128 - - 256 - - 512 + - 10 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" @@ -3963,48 +4066,18 @@ resourceMetrics: - key: prometheus.type value: stringValue: histogram - name: cronjob_controller_job_creation_skew_duration_seconds - - description: Distribution individual GC-related stop-the-world pause latencies. - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "0" - - "0" - - "6" - - "13" - - "5" - - "0" - - "0" - - "0" - count: "24" - explicitBounds: - - 6.399999999999999e-08 - - 6.399999999999999e-07 - - 7.167999999999999e-06 - - 8.191999999999999e-05 - - 0.0009175039999999999 - - 0.010485759999999998 - - 0.11744051199999998 - startTimeUnixNano: "1000000" - sum: 0.000506624 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: go_gc_pauses_seconds - - description: '[ALPHA] Number of endpoints removed on each Service sync' + name: workqueue_work_duration_seconds + - description: '[ALPHA] ' histogram: aggregationTemporality: 2 dataPoints: - - bucketCounts: - - "17" - - "0" - - "0" - - "0" - - "0" + - attributes: + - key: status + value: + stringValue: miss + bucketCounts: - "0" + - "1" - "0" - "0" - "0" @@ -4015,256 +4088,164 @@ resourceMetrics: - "0" - "0" - "0" - count: "17" - explicitBounds: - - 2 - - 4 - - 8 - - 16 - - 32 - - 64 - - 128 - - 256 - - 512 - - 1024 - - 2048 - - 4096 - - 8192 - - 16384 - - 32768 - startTimeUnixNano: "1000000" - sum: 0 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: endpoint_slice_controller_endpoints_removed_per_sync - - description: Distribution of freed heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "8018" - - "235130" - - "119079" - - "91054" - - "38524" - - "12602" - - "4483" - - "1730" - - "1484" - - "394" - - "86" - - "86" - count: "512670" + count: "1" explicitBounds: - - 8.999999999999998 - - 24.999999999999996 - - 64.99999999999999 - - 144.99999999999997 - - 320.99999999999994 - - 704.9999999999999 - - 1536.9999999999998 - - 3200.9999999999995 - - 6528.999999999999 - - 13568.999999999998 - - 27264.999999999996 + - 0.005 + - 0.01 + - 0.025 + - 0.05 + - 0.1 + - 0.25 + - 0.5 + - 1 + - 2.5 + - 5 + - 10 startTimeUnixNano: "1000000" - sum: 6.2123944e+07 + sum: 0.008 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: go_gc_heap_frees_by_size_bytes - - description: '[ALPHA] Number of namespace syncs happened in root ca cert publisher.' + name: authentication_token_cache_request_duration_seconds + - description: '[ALPHA] Number of EndpointSlices changed on each Service sync' histogram: aggregationTemporality: 2 dataPoints: - attributes: - - key: code + - key: topology value: - stringValue: "200" + stringValue: Disabled bucketCounts: - - "0" - - "2" - - "3" - - "0" - - "0" + - "10" - "0" - "0" - "0" - "0" - "0" - - "1" - "0" + - "9" - "0" - "0" - "0" - "0" - count: "6" + count: "19" explicitBounds: - - 0.001 - - 0.002 - - 0.004 - - 0.008 - - 0.016 - - 0.032 - - 0.064 - - 0.128 - - 0.256 - - 0.512 - - 1.024 - - 2.048 - - 4.096 - - 8.192 - - 16.384 + - 0.005 + - 0.01 + - 0.025 + - 0.05 + - 0.1 + - 0.25 + - 0.5 + - 1 + - 2.5 + - 5 + - 10 startTimeUnixNano: "1000000" - sum: 0.617357512 + sum: 9 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: root_ca_cert_publisher_sync_duration_seconds - - description: '[ALPHA] Latencies in seconds of data encryption key(DEK) generation operations.' + name: endpoint_slice_controller_endpointslices_changed_per_sync + - description: Distribution individual GC-related stop-the-world pause latencies. histogram: aggregationTemporality: 2 dataPoints: - bucketCounts: - "0" - "0" + - "7" + - "14" + - "5" - "0" - "0" - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" + count: "26" explicitBounds: - - 5e-06 - - 1e-05 - - 2e-05 - - 4e-05 - - 8e-05 - - 0.00016 - - 0.00032 - - 0.00064 - - 0.00128 - - 0.00256 - - 0.00512 - - 0.01024 - - 0.02048 - - 0.04096 + - 6.399999999999999e-08 + - 6.399999999999999e-07 + - 7.167999999999999e-06 + - 8.191999999999999e-05 + - 0.0009175039999999999 + - 0.010485759999999998 + - 0.11744051199999998 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.000514432 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: apiserver_storage_data_key_generation_duration_seconds - - description: '[ALPHA] Authentication duration in seconds broken out by result.' + name: go_gc_pauses_seconds + - description: Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running. histogram: aggregationTemporality: 2 dataPoints: - - attributes: - - key: result - value: - stringValue: success - bucketCounts: - - "14" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" + - bucketCounts: + - "1260" + - "669" + - "2366" + - "1450" + - "178" + - "19" - "0" - "0" - count: "14" + count: "5942" explicitBounds: - - 0.001 - - 0.002 - - 0.004 - - 0.008 - - 0.016 - - 0.032 - - 0.064 - - 0.128 - - 0.256 - - 0.512 - - 1.024 - - 2.048 - - 4.096 - - 8.192 - - 16.384 + - 6.399999999999999e-08 + - 6.399999999999999e-07 + - 7.167999999999999e-06 + - 8.191999999999999e-05 + - 0.0009175039999999999 + - 0.010485759999999998 + - 0.11744051199999998 startTimeUnixNano: "1000000" - sum: 0.00035745899999999997 + sum: 0.043964991999999994 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: authentication_duration_seconds - - description: Distribution of heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. + name: go_sched_latencies_seconds + - description: '[ALPHA] Number of endpoints added on each Service sync' histogram: aggregationTemporality: 2 dataPoints: - - bucketCounts: - - "11945" - - "263754" - - "147118" - - "103966" - - "45547" - - "15535" - - "5034" - - "2096" - - "1619" - - "526" - - "289" - - "191" - count: "597620" + - attributes: + - key: clusterCIDR + value: + stringValue: 10.244.0.0/16 + bucketCounts: + - "1" + - "0" + - "0" + - "0" + - "0" + - "0" + count: "1" explicitBounds: - - 8.999999999999998 - - 24.999999999999996 - - 64.99999999999999 - - 144.99999999999997 - - 320.99999999999994 - - 704.9999999999999 - - 1536.9999999999998 - - 3200.9999999999995 - - 6528.999999999999 - - 13568.999999999998 - - 27264.999999999996 + - 1 + - 5 + - 25 + - 125 + - 625 startTimeUnixNano: "1000000" - sum: 8.0416816e+07 + sum: 0 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: go_gc_heap_allocs_by_size_bytes - - description: '[ALPHA] Duration in seconds for NodeController to update the health of a single node.' + name: node_ipam_controller_cidrset_allocation_tries_per_request + - description: '[ALPHA] A metric measuring the latency for updating each load balancer hosts.' histogram: aggregationTemporality: 2 dataPoints: - bucketCounts: - - "27" - "0" - "0" - "0" @@ -4273,24 +4254,38 @@ resourceMetrics: - "0" - "0" - "0" - count: "27" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" explicitBounds: - - 0.001 - - 0.004 - - 0.016 - - 0.064 - - 0.256 - - 1.024 - - 4.096 - - 16.384 + - 1 + - 2 + - 4 + - 8 + - 16 + - 32 + - 64 + - 128 + - 256 + - 512 + - 1024 + - 2048 + - 4096 + - 8192 + - 16384 startTimeUnixNano: "1000000" - sum: 0.001964755 + sum: 0 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: node_collector_update_node_health_duration_seconds + name: service_controller_update_loadbalancer_host_latency_seconds - description: '[ALPHA] Request size in bytes. Broken down by verb and host.' histogram: aggregationTemporality: 2 @@ -4303,7 +4298,7 @@ resourceMetrics: value: stringValue: GET bucketCounts: - - "252" + - "262" - "0" - "0" - "0" @@ -4315,7 +4310,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "252" + count: "262" explicitBounds: - 64 - 256 @@ -4377,7 +4372,7 @@ resourceMetrics: bucketCounts: - "1" - "54" - - "23" + - "19" - "7" - "19" - "1" @@ -4387,7 +4382,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "105" + count: "101" explicitBounds: - 64 - 256 @@ -4401,7 +4396,7 @@ resourceMetrics: - 4.194304e+06 - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 50827 + sum: 49733 timeUnixNano: "1000000" - attributes: - key: host @@ -4413,17 +4408,17 @@ resourceMetrics: bucketCounts: - "0" - "0" - - "72" + - "77" - "9" - - "38" - - "26" + - "43" + - "22" - "0" - "0" - "0" - "0" - "0" - "0" - count: "145" + count: "151" explicitBounds: - 64 - 256 @@ -4437,13 +4432,106 @@ resourceMetrics: - 4.194304e+06 - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 310831 + sum: 305159 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram name: rest_client_request_size_bytes + - description: '[ALPHA] Number of namespace syncs happened in root ca cert publisher.' + histogram: + aggregationTemporality: 2 + dataPoints: + - attributes: + - key: code + value: + stringValue: "200" + bucketCounts: + - "0" + - "1" + - "4" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "1" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + count: "6" + explicitBounds: + - 0.001 + - 0.002 + - 0.004 + - 0.008 + - 0.016 + - 0.032 + - 0.064 + - 0.128 + - 0.256 + - 0.512 + - 1.024 + - 2.048 + - 4.096 + - 8.192 + - 16.384 + startTimeUnixNano: "1000000" + sum: 0.36350212499999995 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: root_ca_cert_publisher_sync_duration_seconds + - description: '[ALPHA] Distribution of the remaining lifetime on the certificate used to authenticate a request.' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + explicitBounds: + - 0 + - 1800 + - 3600 + - 7200 + - 21600 + - 43200 + - 86400 + - 172800 + - 345600 + - 604800 + - 2.592e+06 + - 7.776e+06 + - 1.5552e+07 + - 3.1104e+07 + startTimeUnixNano: "1000000" + sum: 0 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: apiserver_client_certificate_expiration_seconds - description: '[ALPHA] Response size in bytes. Broken down by verb and host.' histogram: aggregationTemporality: 2 @@ -4456,11 +4544,11 @@ resourceMetrics: value: stringValue: GET bucketCounts: - - "19" - - "71" - - "107" + - "21" + - "70" + - "112" - "4" - - "10" + - "14" - "29" - "12" - "0" @@ -4468,7 +4556,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "252" + count: "262" explicitBounds: - 64 - 256 @@ -4482,7 +4570,7 @@ resourceMetrics: - 4.194304e+06 - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 610661 + sum: 625980 timeUnixNano: "1000000" - attributes: - key: host @@ -4531,7 +4619,7 @@ resourceMetrics: - "0" - "38" - "1" - - "24" + - "20" - "40" - "2" - "0" @@ -4540,7 +4628,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "105" + count: "101" explicitBounds: - 64 - 256 @@ -4554,7 +4642,7 @@ resourceMetrics: - 4.194304e+06 - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 113851 + sum: 111762 timeUnixNano: "1000000" - attributes: - key: host @@ -4566,17 +4654,17 @@ resourceMetrics: bucketCounts: - "0" - "4" - - "78" + - "85" - "6" - - "34" - - "23" + - "36" + - "20" - "0" - "0" - "0" - "0" - "0" - "0" - count: "145" + count: "151" explicitBounds: - 64 - 256 @@ -4590,101 +4678,13 @@ resourceMetrics: - 4.194304e+06 - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 272098 + sum: 266643 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram name: rest_client_response_size_bytes - - description: '[ALPHA] A metric measuring the latency for updating each load balancer hosts.' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - explicitBounds: - - 1 - - 2 - - 4 - - 8 - - 16 - - 32 - - 64 - - 128 - - 256 - - 512 - - 1024 - - 2048 - - 4096 - - 8192 - - 16384 - startTimeUnixNano: "1000000" - sum: 0 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: service_controller_update_loadbalancer_host_latency_seconds - - description: '[ALPHA] Distribution of the remaining lifetime on the certificate used to authenticate a request.' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - explicitBounds: - - 0 - - 1800 - - 3600 - - 7200 - - 21600 - - 43200 - - 86400 - - 172800 - - 345600 - - 604800 - - 2.592e+06 - - 7.776e+06 - - 1.5552e+07 - - 3.1104e+07 - startTimeUnixNano: "1000000" - sum: 0 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: apiserver_client_certificate_expiration_seconds scope: - name: otelcol/prometheusreceiver - version: v0.105.0 + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver + version: v0.110.0 diff --git a/functional_tests/testdata_histogram/expected/v1.27/coredns_metrics.yaml b/functional_tests/testdata_histogram/expected/v1.27/coredns_metrics.yaml index 74ac55a80..e3e4816ca 100644 --- a/functional_tests/testdata_histogram/expected/v1.27/coredns_metrics.yaml +++ b/functional_tests/testdata_histogram/expected/v1.27/coredns_metrics.yaml @@ -18,13 +18,13 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-5d78c9869d-z6h45 + stringValue: coredns-5d78c9869d-bf46z - key: k8s.pod.uid value: - stringValue: a5be27cc-3758-4d35-b4f9-aa7f36c08fd5 + stringValue: 945facdb-fb0b-4b0e-ae8c-a98159c6a7b4 - key: net.host.name value: - stringValue: 10.244.0.4 + stringValue: 10.244.0.3 - key: net.host.port value: stringValue: "9153" @@ -33,13 +33,13 @@ resourceMetrics: stringValue: linux - key: server.address value: - stringValue: 10.244.0.4 + stringValue: 10.244.0.3 - key: server.port value: stringValue: "9153" - key: service.instance.id value: - stringValue: 10.244.0.4:9153 + stringValue: 10.244.0.3:9153 - key: service.name value: stringValue: coredns @@ -65,8 +65,8 @@ resourceMetrics: stringValue: . bucketCounts: - "0" + - "25" - "5" - - "1" - "0" - "0" - "0" @@ -79,7 +79,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "6" + count: "30" explicitBounds: - 0 - 100 @@ -96,7 +96,7 @@ resourceMetrics: - 48000 - 64000 startTimeUnixNano: "1000000" - sum: 452 + sum: 2255 timeUnixNano: "1000000" metadata: - key: prometheus.type @@ -115,24 +115,24 @@ resourceMetrics: value: stringValue: . bucketCounts: - - "4" - - "0" + - "20" - "0" - "0" - "0" - "1" + - "5" + - "3" + - "1" - "0" - "0" - "0" - - "1" - - "0" - "0" - "0" - "0" - "0" - "0" - "0" - count: "6" + count: "30" explicitBounds: - 0.00025 - 0.0005 @@ -151,39 +151,67 @@ resourceMetrics: - 4.096 - 8.192 startTimeUnixNano: "1000000" - sum: 0.078521228 + sum: 0.09931596000000001 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram name: coredns_dns_request_duration_seconds - - description: Histogram of the time (in seconds) each request took. + - description: Size of the returned response in bytes. histogram: aggregationTemporality: 2 dataPoints: - - bucketCounts: + - attributes: + - key: proto + value: + stringValue: udp + - key: server + value: + stringValue: dns://:53 + - key: zone + value: + stringValue: . + bucketCounts: + - "0" + - "0" + - "25" + - "5" + - "0" - "0" - - "128" - "0" - "0" - "0" - "0" - count: "128" + - "0" + - "0" + - "0" + - "0" + - "0" + count: "30" explicitBounds: - - 0.00025 - - 0.0025 - - 0.025 - - 0.25 - - 2.5 + - 0 + - 100 + - 200 + - 300 + - 400 + - 511 + - 1023 + - 2047 + - 4095 + - 8291 + - 16000 + - 32000 + - 48000 + - 64000 startTimeUnixNano: "1000000" - sum: 0.05456532400000002 + sum: 4811 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: coredns_health_request_duration_seconds + name: coredns_dns_response_size_bytes - description: Histogram of the time each request took. histogram: aggregationTemporality: 2 @@ -191,7 +219,7 @@ resourceMetrics: - attributes: - key: rcode value: - stringValue: NXDOMAIN + stringValue: NOERROR - key: to value: stringValue: 172.18.0.1:53 @@ -200,12 +228,12 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - "1" - "0" + - "3" + - "0" - "0" - "0" - - "1" - "0" - "0" - "0" @@ -213,7 +241,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "2" + count: "4" explicitBounds: - 0.00025 - 0.0005 @@ -232,31 +260,23 @@ resourceMetrics: - 4.096 - 8.192 startTimeUnixNano: "1000000" - sum: 0.077749352 + sum: 0.035429310000000006 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: coredns_forward_request_duration_seconds - - description: Size of the returned response in bytes. - histogram: - aggregationTemporality: 2 - dataPoints: - attributes: - - key: proto - value: - stringValue: udp - - key: server + - key: rcode value: - stringValue: dns://:53 - - key: zone + stringValue: NXDOMAIN + - key: to value: - stringValue: . + stringValue: 172.18.0.1:53 bucketCounts: + - "0" + - "0" + - "0" - "0" - "0" - "5" + - "0" - "1" - "0" - "0" @@ -267,32 +287,58 @@ resourceMetrics: - "0" - "0" - "0" + count: "6" + explicitBounds: + - 0.00025 + - 0.0005 + - 0.001 + - 0.002 + - 0.004 + - 0.008 + - 0.016 + - 0.032 + - 0.064 + - 0.128 + - 0.256 + - 0.512 + - 1.024 + - 2.048 + - 4.096 + - 8.192 + startTimeUnixNano: "1000000" + sum: 0.060914834 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: coredns_forward_request_duration_seconds + - description: Histogram of the time (in seconds) each request took. + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: - "0" + - "159" + - "1" - "0" - count: "6" + - "0" + - "0" + count: "160" explicitBounds: - - 0 - - 100 - - 200 - - 300 - - 400 - - 511 - - 1023 - - 2047 - - 4095 - - 8291 - - 16000 - - 32000 - - 48000 - - 64000 + - 0.00025 + - 0.0025 + - 0.025 + - 0.25 + - 2.5 startTimeUnixNano: "1000000" - sum: 954 + sum: 0.08620115599999999 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: coredns_dns_response_size_bytes + name: coredns_health_request_duration_seconds scope: - name: otelcol/prometheusreceiver - version: v0.105.0 + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver + version: v0.110.0 diff --git a/functional_tests/testdata_histogram/expected/v1.28/controller_manager_metrics.yaml b/functional_tests/testdata_histogram/expected/v1.28/controller_manager_metrics.yaml index 924458bf6..454c37b67 100644 --- a/functional_tests/testdata_histogram/expected/v1.28/controller_manager_metrics.yaml +++ b/functional_tests/testdata_histogram/expected/v1.28/controller_manager_metrics.yaml @@ -21,7 +21,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 7cb40a1d-edde-4e3d-83a7-8f693563e886 + stringValue: 429a0a55-7c09-4879-82ef-57f23dbcd0ba - key: net.host.name value: stringValue: 172.18.0.2 @@ -49,37 +49,57 @@ resourceMetrics: schemaUrl: https://opentelemetry.io/schemas/1.6.1 scopeMetrics: - metrics: - - description: Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running. + - description: '[ALPHA] Authorization duration in seconds broken out by result.' histogram: aggregationTemporality: 2 dataPoints: - - bucketCounts: - - "1270" - - "696" - - "2943" - - "1059" - - "187" - - "34" - - "4" + - attributes: + - key: result + value: + stringValue: allowed + bucketCounts: + - "16" + - "0" + - "0" + - "0" + - "0" + - "0" - "0" - count: "6193" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + count: "16" explicitBounds: - - 6.399999999999999e-08 - - 6.399999999999999e-07 - - 7.167999999999999e-06 - - 8.191999999999999e-05 - - 0.0009175039999999999 - - 0.010485759999999998 - - 0.11744051199999998 + - 0.001 + - 0.002 + - 0.004 + - 0.008 + - 0.016 + - 0.032 + - 0.064 + - 0.128 + - 0.256 + - 0.512 + - 1.024 + - 2.048 + - 4.096 + - 8.192 + - 16.384 startTimeUnixNano: "1000000" - sum: 0.097976192 + sum: 6.4925e-05 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: go_sched_latencies_seconds - - description: '[ALPHA] Request latency in seconds. Broken down by verb, and host.' + name: authorization_duration_seconds + - description: '[ALPHA] Request size in bytes. Broken down by verb and host.' histogram: aggregationTemporality: 2 dataPoints: @@ -91,20 +111,173 @@ resourceMetrics: value: stringValue: GET bucketCounts: - - "165" - - "32" - - "39" + - "258" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + count: "258" + explicitBounds: + - 64 + - 256 + - 512 + - 1024 + - 4096 + - 16384 + - 65536 + - 262144 + - 1.048576e+06 + - 4.194304e+06 + - 1.6777216e+07 + startTimeUnixNano: "1000000" + sum: 0 + timeUnixNano: "1000000" + - attributes: + - key: host + value: + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: PATCH + bucketCounts: + - "1" + - "2" + - "1" + - "0" + - "7" + - "3" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + count: "14" + explicitBounds: + - 64 + - 256 + - 512 + - 1024 + - 4096 + - 16384 + - 65536 + - 262144 + - 1.048576e+06 + - 4.194304e+06 + - 1.6777216e+07 + startTimeUnixNano: "1000000" + sum: 34289 + timeUnixNano: "1000000" + - attributes: + - key: host + value: + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: POST + bucketCounts: + - "1" + - "54" + - "23" - "7" + - "19" + - "1" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + count: "105" + explicitBounds: + - 64 + - 256 + - 512 + - 1024 + - 4096 + - 16384 + - 65536 + - 262144 + - 1.048576e+06 + - 4.194304e+06 + - 1.6777216e+07 + startTimeUnixNano: "1000000" + sum: 51348 + timeUnixNano: "1000000" + - attributes: + - key: host + value: + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: PUT + bucketCounts: + - "0" + - "0" + - "79" - "10" - - "2" + - "38" + - "25" + - "0" + - "0" - "0" + - "0" + - "0" + - "0" + count: "152" + explicitBounds: + - 64 + - 256 + - 512 + - 1024 + - 4096 + - 16384 + - 65536 + - 262144 + - 1.048576e+06 + - 4.194304e+06 + - 1.6777216e+07 + startTimeUnixNano: "1000000" + sum: 308152 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: rest_client_request_size_bytes + - description: '[ALPHA] Request latency in seconds. Broken down by verb, and host.' + histogram: + aggregationTemporality: 2 + dataPoints: + - attributes: + - key: host + value: + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: GET + bucketCounts: + - "156" + - "40" + - "39" + - "11" + - "9" + - "2" - "1" - "0" - "0" - "0" - "0" - "0" - count: "256" + - "0" + count: "258" explicitBounds: - 0.005 - 0.025 @@ -119,7 +292,7 @@ resourceMetrics: - 30 - 60 startTimeUnixNano: "1000000" - sum: 10.900202727999993 + sum: 10.252283928999994 timeUnixNano: "1000000" - attributes: - key: host @@ -129,8 +302,8 @@ resourceMetrics: value: stringValue: PATCH bucketCounts: - - "5" - - "7" + - "1" + - "9" - "0" - "0" - "0" @@ -142,7 +315,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "16" + count: "14" explicitBounds: - 0.005 - 0.025 @@ -157,7 +330,7 @@ resourceMetrics: - 30 - 60 startTimeUnixNano: "1000000" - sum: 2.6907728460000007 + sum: 2.9424806329999997 timeUnixNano: "1000000" - attributes: - key: host @@ -167,12 +340,12 @@ resourceMetrics: value: stringValue: POST bucketCounts: - - "43" - - "19" - - "19" - - "3" - - "10" - - "7" + - "51" + - "17" + - "16" + - "2" + - "13" + - "6" - "0" - "0" - "0" @@ -180,7 +353,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "101" + count: "105" explicitBounds: - 0.005 - 0.025 @@ -195,7 +368,7 @@ resourceMetrics: - 30 - 60 startTimeUnixNano: "1000000" - sum: 9.965587482 + sum: 9.986952157999996 timeUnixNano: "1000000" - attributes: - key: host @@ -205,12 +378,12 @@ resourceMetrics: value: stringValue: PUT bucketCounts: - - "99" - - "44" - - "3" - - "0" + - "88" + - "61" + - "1" - "0" - "1" + - "1" - "0" - "0" - "0" @@ -218,7 +391,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "147" + count: "152" explicitBounds: - 0.005 - 0.025 @@ -233,7 +406,7 @@ resourceMetrics: - 30 - 60 startTimeUnixNano: "1000000" - sum: 1.6364586360000006 + sum: 1.7183537939999995 timeUnixNano: "1000000" metadata: - key: prometheus.type @@ -285,15 +458,47 @@ resourceMetrics: value: stringValue: histogram name: service_controller_nodesync_latency_seconds - - description: '[ALPHA] The time it took to delete the job since it became eligible for deletion' + - description: '[ALPHA] Request latency in seconds. Broken down by status code.' histogram: aggregationTemporality: 2 dataPoints: - - bucketCounts: + - attributes: + - key: code + value: + stringValue: "201" + bucketCounts: + - "1" + - "0" + - "0" + - "0" + - "0" - "0" - "0" - "0" - "0" + count: "1" + explicitBounds: + - 0.25 + - 0.5 + - 0.7 + - 1 + - 1.5 + - 3 + - 5 + - 10 + startTimeUnixNano: "1000000" + sum: 0.001254768 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: apiserver_delegated_authz_request_duration_seconds + - description: '[STABLE] Time between when a cronjob is scheduled to be run, and when the corresponding job is created' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: - "0" - "0" - "0" @@ -306,20 +511,16 @@ resourceMetrics: - "0" - "0" explicitBounds: - - 0.1 - - 0.2 - - 0.4 - - 0.8 - - 1.6 - - 3.2 - - 6.4 - - 12.8 - - 25.6 - - 51.2 - - 102.4 - - 204.8 - - 409.6 - - 819.2 + - 1 + - 2 + - 4 + - 8 + - 16 + - 32 + - 64 + - 128 + - 256 + - 512 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" @@ -327,8 +528,67 @@ resourceMetrics: - key: prometheus.type value: stringValue: histogram - name: ttl_after_finished_controller_job_deletion_duration_seconds - - description: '[ALPHA] Request size in bytes. Broken down by verb and host.' + name: cronjob_controller_job_creation_skew_duration_seconds + - description: '[ALPHA] Duration in seconds for NodeController to update the health of all nodes.' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "30" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + count: "30" + explicitBounds: + - 0.01 + - 0.04 + - 0.16 + - 0.64 + - 2.56 + - 10.24 + - 40.96 + - 163.84 + startTimeUnixNano: "1000000" + sum: 0.0034431500000000003 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: node_collector_update_all_nodes_health_duration_seconds + - description: '[ALPHA] The ratio of chosen deleted pod''s ages to the current youngest pod''s age (at the time). Should be <2.The intent of this metric is to measure the rough efficacy of the LogarithmicScaleDown feature gate''s effect onthe sorting (and deletion) of pods when a replicaset scales down. This only considers Ready pods when calculating and reporting.' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + explicitBounds: + - 0.25 + - 0.5 + - 1 + - 2 + - 4 + - 8 + startTimeUnixNano: "1000000" + sum: 0 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: replicaset_controller_sorting_deletion_age_ratio + - description: '[ALPHA] Response size in bytes. Broken down by verb and host.' histogram: aggregationTemporality: 2 dataPoints: @@ -340,19 +600,19 @@ resourceMetrics: value: stringValue: GET bucketCounts: - - "256" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" + - "21" + - "70" + - "114" + - "3" + - "9" + - "29" + - "12" - "0" - "0" - "0" - "0" - "0" - count: "256" + count: "258" explicitBounds: - 64 - 256 @@ -366,7 +626,7 @@ resourceMetrics: - 4.194304e+06 - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 0 + sum: 611597 timeUnixNano: "1000000" - attributes: - key: host @@ -376,19 +636,19 @@ resourceMetrics: value: stringValue: PATCH bucketCounts: + - "0" + - "0" + - "0" - "1" - - "3" - - "1" + - "13" - "0" - - "7" - - "4" - "0" - "0" - "0" - "0" - "0" - "0" - count: "16" + count: "14" explicitBounds: - 64 - 256 @@ -402,7 +662,7 @@ resourceMetrics: - 4.194304e+06 - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 38668 + sum: 42589 timeUnixNano: "1000000" - attributes: - key: host @@ -412,19 +672,19 @@ resourceMetrics: value: stringValue: POST bucketCounts: + - "0" + - "38" - "1" - - "54" - - "19" - - "7" - - "19" - - "1" + - "24" + - "40" + - "2" - "0" - "0" - "0" - "0" - "0" - "0" - count: "101" + count: "105" explicitBounds: - 64 - 256 @@ -438,7 +698,7 @@ resourceMetrics: - 4.194304e+06 - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 50499 + sum: 114966 timeUnixNano: "1000000" - attributes: - key: host @@ -449,18 +709,18 @@ resourceMetrics: stringValue: PUT bucketCounts: - "0" - - "0" - - "71" - - "10" - - "43" - - "23" + - "2" + - "85" + - "7" + - "34" + - "24" - "0" - "0" - "0" - "0" - "0" - "0" - count: "147" + count: "152" explicitBounds: - 64 - 256 @@ -474,51 +734,21 @@ resourceMetrics: - 4.194304e+06 - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 310768 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: rest_client_request_size_bytes - - description: Distribution individual GC-related stop-the-world pause latencies. - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "0" - - "0" - - "8" - - "8" - - "8" - - "2" - - "0" - - "0" - count: "26" - explicitBounds: - - 6.399999999999999e-08 - - 6.399999999999999e-07 - - 7.167999999999999e-06 - - 8.191999999999999e-05 - - 0.0009175039999999999 - - 0.010485759999999998 - - 0.11744051199999998 - startTimeUnixNano: "1000000" - sum: 0.002552832 + sum: 281881 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: go_gc_pauses_seconds - - description: '[ALPHA] Number of endpoints added on each Service sync' + name: rest_client_response_size_bytes + - description: '[ALPHA] Request latency in seconds. Broken down by status code.' histogram: aggregationTemporality: 2 dataPoints: - attributes: - - key: clusterCIDR + - key: code value: - stringValue: 10.244.0.0/16 + stringValue: "201" bucketCounts: - "1" - "0" @@ -526,30 +756,37 @@ resourceMetrics: - "0" - "0" - "0" + - "0" + - "0" + - "0" count: "1" explicitBounds: + - 0.25 + - 0.5 + - 0.7 - 1 + - 1.5 + - 3 - 5 - - 25 - - 125 - - 625 + - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.007996424 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: node_ipam_controller_cidrset_allocation_tries_per_request - - description: '[ALPHA] Request latency in seconds. Broken down by status code.' + name: apiserver_delegated_authn_request_duration_seconds + - description: '[ALPHA] ' histogram: aggregationTemporality: 2 dataPoints: - attributes: - - key: code + - key: status value: - stringValue: "201" + stringValue: miss bucketCounts: + - "0" - "1" - "0" - "0" @@ -559,280 +796,117 @@ resourceMetrics: - "0" - "0" - "0" + - "0" + - "0" count: "1" explicitBounds: + - 0.005 + - 0.01 + - 0.025 + - 0.05 + - 0.1 - 0.25 - 0.5 - - 0.7 - 1 - - 1.5 - - 3 + - 2.5 - 5 - 10 startTimeUnixNano: "1000000" - sum: 0.00144484 + sum: 0.008 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: apiserver_delegated_authz_request_duration_seconds - - description: '[ALPHA] How long in seconds an item stays in workqueue before being requested.' + name: authentication_token_cache_request_duration_seconds + - description: '[ALPHA] Number of endpoints removed on each Service sync' histogram: aggregationTemporality: 2 dataPoints: - - attributes: - - key: name - value: - stringValue: ClusterRoleAggregator - bucketCounts: - - "0" + - bucketCounts: + - "21" - "0" - "0" - - "20" - - "15" - - "18" - - "7" - - "3" - "0" - "0" - "0" - count: "63" - explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 - startTimeUnixNano: "1000000" - sum: 0.1861759609999999 - timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: DynamicCABundle-client-ca-bundle - bucketCounts: - "0" - "0" - "0" - "0" - - "1" - "0" - "0" - "0" - "0" - "0" - "0" - count: "1" + count: "21" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 + - 2 + - 4 + - 8 + - 16 + - 32 + - 64 + - 128 + - 256 + - 512 + - 1024 + - 2048 + - 4096 + - 8192 + - 16384 + - 32768 startTimeUnixNano: "1000000" - sum: 4.9712e-05 + sum: 0 timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: DynamicCABundle-csr-controller - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "4" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - count: "4" - explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 - startTimeUnixNano: "1000000" - sum: 0.000198488 - timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: DynamicCABundle-request-header - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "1" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - count: "1" - explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 - startTimeUnixNano: "1000000" - sum: 6.9729e-05 - timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: DynamicServingCertificateController - bucketCounts: - - "0" - - "0" - - "0" - - "2" - - "1" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - count: "3" - explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 - startTimeUnixNano: "1000000" - sum: 5.6285e-05 - timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: bootstrap_signer_queue - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "0" - - "1" - - "0" - - "0" - - "1" - - "0" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: endpoint_slice_controller_endpoints_removed_per_sync + - description: '[ALPHA] Duration in seconds for NodeController to update the health of a single node.' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "30" - "0" - count: "2" - explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 - startTimeUnixNano: "1000000" - sum: 0.30094294 - timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: certificate - bucketCounts: - "0" - "0" - "0" - - "18" - "0" - - "2" - - "6" - - "4" - "0" - "0" - "0" count: "30" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 - startTimeUnixNano: "1000000" - sum: 0.15747214899999992 - timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: claims - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 + - 0.004 + - 0.016 + - 0.064 + - 0.256 + - 1.024 + - 4.096 + - 16.384 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.002264703 timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: node_collector_update_node_health_duration_seconds + - description: '[ALPHA] Authentication duration in seconds broken out by result.' + histogram: + aggregationTemporality: 2 + dataPoints: - attributes: - - key: name + - key: result value: - stringValue: cronjob + stringValue: success bucketCounts: + - "16" - "0" - "0" - "0" @@ -844,117 +918,41 @@ resourceMetrics: - "0" - "0" - "0" - explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 - startTimeUnixNano: "1000000" - sum: 0 - timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: daemonset - bucketCounts: - - "0" - - "0" - "0" - - "7" - - "1" - - "1" - - "2" - - "5" - "0" - "0" - "0" count: "16" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 - startTimeUnixNano: "1000000" - sum: 0.11296595100000001 - timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: deployment - bucketCounts: - - "0" - - "0" - - "0" - - "21" - - "1" - - "6" - - "5" - - "6" - - "0" - - "0" - - "0" - count: "39" - explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 - startTimeUnixNano: "1000000" - sum: 0.156384502 - timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: disruption - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 - startTimeUnixNano: "1000000" - sum: 0 - timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: disruption_recheck - bucketCounts: + - 0.002 + - 0.004 + - 0.008 + - 0.016 + - 0.032 + - 0.064 + - 0.128 + - 0.256 + - 0.512 + - 1.024 + - 2.048 + - 4.096 + - 8.192 + - 16.384 + startTimeUnixNano: "1000000" + sum: 0.000421186 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: authentication_duration_seconds + - description: '[ALPHA] Number of endpoints added on each Service sync' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "21" - "0" - "0" - "0" @@ -966,118 +964,155 @@ resourceMetrics: - "0" - "0" - "0" - explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 - startTimeUnixNano: "1000000" - sum: 0 - timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: endpoint - bucketCounts: - "0" - "0" - "0" - - "13" - "0" + count: "21" + explicitBounds: + - 2 + - 4 + - 8 + - 16 + - 32 + - 64 + - 128 + - 256 + - 512 + - 1024 + - 2048 + - 4096 + - 8192 + - 16384 + - 32768 + startTimeUnixNano: "1000000" + sum: 4 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: endpoint_slice_controller_endpoints_added_per_sync + - description: Distribution of heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "19958" + - "288838" + - "168349" + - "114401" + - "72977" + - "15073" + - "4815" + - "2139" + - "2577" + - "545" + - "288" + - "188" + count: "690148" + explicitBounds: + - 8.999999999999998 + - 24.999999999999996 + - 64.99999999999999 + - 144.99999999999997 + - 320.99999999999994 + - 704.9999999999999 + - 1536.9999999999998 + - 3200.9999999999995 + - 6528.999999999999 + - 13568.999999999998 + - 27264.999999999996 + startTimeUnixNano: "1000000" + sum: 9.4643352e+07 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: go_gc_heap_allocs_by_size_bytes + - description: Distribution individual GC-related stop-the-world pause latencies. + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: - "0" - "0" - - "2" + - "5" + - "16" + - "9" - "0" - "0" - "0" - count: "15" + count: "30" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 + - 6.399999999999999e-08 + - 6.399999999999999e-07 + - 7.167999999999999e-06 + - 8.191999999999999e-05 + - 0.0009175039999999999 + - 0.010485759999999998 + - 0.11744051199999998 startTimeUnixNano: "1000000" - sum: 0.16685807699999997 + sum: 0.000855168 timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: endpoint_slice - bucketCounts: - - "0" - - "0" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: go_gc_pauses_seconds + - description: '[ALPHA] Latencies in seconds of data encryption key(DEK) generation operations.' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: - "0" - - "16" - "0" - - "1" - "0" - - "2" - "0" - "0" - "0" - count: "19" - explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 - startTimeUnixNano: "1000000" - sum: 0.07041071600000001 - timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: endpoint_slice_mirroring - bucketCounts: - "0" - "0" - "0" - - "10" - "0" - "0" - "0" - - "1" - "0" - "0" - "0" - count: "11" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 + - 5e-06 + - 1e-05 + - 2e-05 + - 4e-05 + - 8e-05 + - 0.00016 + - 0.00032 + - 0.00064 + - 0.00128 + - 0.00256 + - 0.00512 + - 0.01024 + - 0.02048 + - 0.04096 startTimeUnixNano: "1000000" - sum: 0.06454258399999999 + sum: 0 timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: ephemeral_volume - bucketCounts: + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: apiserver_storage_data_key_generation_duration_seconds + - description: '[ALPHA] Duration of syncEndpoints() in seconds' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "11" + - "0" + - "0" + - "0" - "0" - "0" - "0" @@ -1089,25 +1124,39 @@ resourceMetrics: - "0" - "0" - "0" + - "0" + count: "11" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 + - 0.002 + - 0.004 + - 0.008 + - 0.016 + - 0.032 + - 0.064 + - 0.128 + - 0.256 + - 0.512 + - 1.024 + - 2.048 + - 4.096 + - 8.192 + - 16.384 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: garbage_collector_attempt_to_delete - bucketCounts: + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: endpoint_slice_mirroring_controller_endpoints_sync_duration + - description: '[ALPHA] CEL compilation time in seconds.' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "0" + - "0" - "0" - "0" - "0" @@ -1116,120 +1165,155 @@ resourceMetrics: - "0" - "0" - "0" - - "2" - "0" - "0" - count: "2" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 + - 0.005 - 0.01 + - 0.025 + - 0.05 - 0.1 + - 0.25 + - 0.5 - 1 + - 2.5 + - 5 - 10 startTimeUnixNano: "1000000" - sum: 1.2764251309999999 + sum: 0 timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: apiserver_cel_compilation_duration_seconds + - description: '[ALPHA] Number of endpoints added on each Service sync' + histogram: + aggregationTemporality: 2 + dataPoints: - attributes: - - key: name + - key: clusterCIDR value: - stringValue: garbage_collector_attempt_to_orphan + stringValue: 10.244.0.0/16 bucketCounts: + - "1" - "0" - "0" - "0" - "0" - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" + count: "1" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - 1 - - 10 + - 5 + - 25 + - 125 + - 625 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: garbage_collector_graph_changes - bucketCounts: + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: node_ipam_controller_cidrset_allocation_tries_per_request + - description: '[ALPHA] CEL evaluation time in seconds.' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" - "0" - "0" - - "17" - - "469" - - "152" - - "9" - "0" - "0" - "0" - "0" - "0" - count: "647" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 + - 0.005 - 0.01 + - 0.025 + - 0.05 - 0.1 + - 0.25 + - 0.5 - 1 + - 2.5 + - 5 - 10 startTimeUnixNano: "1000000" - sum: 0.007960224 + sum: 0 timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: apiserver_cel_evaluation_duration_seconds + - description: '[ALPHA] Number of EndpointSlices changed on each Service sync' + histogram: + aggregationTemporality: 2 + dataPoints: - attributes: - - key: name + - key: topology value: - stringValue: horizontalpodautoscaler + stringValue: Disabled bucketCounts: + - "11" - "0" - "0" - "0" - "0" - "0" - "0" + - "10" - "0" - "0" - "0" - "0" - - "0" + count: "21" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 + - 0.005 - 0.01 + - 0.025 + - 0.05 - 0.1 + - 0.25 + - 0.5 - 1 + - 2.5 + - 5 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 10 timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: endpoint_slice_controller_endpointslices_changed_per_sync + - description: '[ALPHA] Client side rate limiter latency in seconds. Broken down by verb, and host.' + histogram: + aggregationTemporality: 2 + dataPoints: - attributes: - - key: name + - key: host value: - stringValue: job + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: GET bucketCounts: + - "199" + - "1" + - "41" + - "6" + - "9" + - "2" - "0" - "0" - "0" @@ -1237,29 +1321,33 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "0" - - "0" - - "0" + count: "258" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0 + sum: 7.688760803000003 timeUnixNano: "1000000" - attributes: - - key: name + - key: host value: - stringValue: job_orphan_pod + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: PATCH bucketCounts: + - "14" + - "0" - "0" - "0" - "0" @@ -1271,26 +1359,36 @@ resourceMetrics: - "0" - "0" - "0" + count: "14" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0 + sum: 2.1011e-05 timeUnixNano: "1000000" - attributes: - - key: name + - key: host value: - stringValue: namespace + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: POST bucketCounts: + - "78" - "0" + - "18" + - "1" + - "8" - "0" - "0" - "0" @@ -1299,27 +1397,32 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "0" + count: "105" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0 + sum: 4.293161147 timeUnixNano: "1000000" - attributes: - - key: name + - key: host value: - stringValue: node + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: PUT bucketCounts: + - "152" - "0" - "0" - "0" @@ -1331,37 +1434,50 @@ resourceMetrics: - "0" - "0" - "0" + - "0" + count: "152" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.00027027299999999997 timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: rest_client_rate_limiter_duration_seconds + - description: '[ALPHA] How long in seconds an item stays in workqueue before being requested.' + histogram: + aggregationTemporality: 2 + dataPoints: - attributes: - key: name value: - stringValue: node_lifecycle_controller + stringValue: ClusterRoleAggregator bucketCounts: - "0" - "0" - "0" - - "1" - - "3" - - "1" - - "0" - - "1" + - "14" + - "21" + - "16" + - "7" + - "4" - "0" - "0" - "0" - count: "6" + count: "62" explicitBounds: - 1e-08 - 1e-07 @@ -1374,25 +1490,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.084545424 + sum: 0.15461456299999995 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: node_lifecycle_controller_pods + stringValue: DynamicCABundle-client-ca-bundle bucketCounts: - "0" - "0" - "0" + - "1" + - "1" + - "0" - "0" - - "7" - - "2" - "0" - - "4" - "0" - "0" - "0" - count: "13" + count: "2" explicitBounds: - 1e-08 - 1e-07 @@ -1405,25 +1521,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.324419032 + sum: 8.5289e-05 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: noexec_taint_node + stringValue: DynamicCABundle-csr-controller bucketCounts: - "0" - "0" - "0" - "0" + - "4" + - "1" - "0" - "0" - "0" - - "1" - - "0" - "0" - "0" - count: "1" + count: "5" explicitBounds: - 1e-08 - 1e-07 @@ -1436,25 +1552,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.084637308 + sum: 0.000888186 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: noexec_taint_pod + stringValue: DynamicCABundle-request-header bucketCounts: - "0" - "0" - "0" - - "18" + - "1" - "0" + - "1" - "0" - "0" - - "4" - "0" - "0" - "0" - count: "22" + count: "2" explicitBounds: - 1e-08 - 1e-07 @@ -1467,24 +1583,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.3259026510000001 + sum: 0.00013377 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: orphaned_pods_nodes + stringValue: DynamicServingCertificateController bucketCounts: - "0" - "0" - "0" + - "1" + - "2" - "0" - "0" - "0" - "0" - "0" - "0" - - "0" - - "0" + count: "3" explicitBounds: - 1e-08 - 1e-07 @@ -1497,24 +1614,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 4.2772e-05 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: pvcprotection + stringValue: bootstrap_signer_queue bucketCounts: - "0" - "0" - "0" + - "1" - "0" - "0" - "0" - "0" - "0" - "0" - - "0" - - "0" + - "1" + count: "2" explicitBounds: - 1e-08 - 1e-07 @@ -1527,24 +1645,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 10.600946147 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: pvcs + stringValue: certificate bucketCounts: - "0" - "0" - "0" + - "18" - "0" + - "2" + - "2" + - "8" - "0" - "0" - "0" - - "0" - - "0" - - "0" - - "0" + count: "30" explicitBounds: - 1e-08 - 1e-07 @@ -1557,12 +1676,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.296560614 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: pvprotection + stringValue: claims bucketCounts: - "0" - "0" @@ -1592,20 +1711,19 @@ resourceMetrics: - attributes: - key: name value: - stringValue: replicaset + stringValue: cronjob bucketCounts: - "0" - "0" - "0" - - "21" - - "4" - - "4" - - "7" - - "5" - "0" - "0" - "0" - count: "41" + - "0" + - "0" + - "0" + - "0" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -1618,24 +1736,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.15150380200000002 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: replicationmanager + stringValue: daemonset bucketCounts: - "0" - "0" - "0" + - "10" + - "1" + - "1" - "0" + - "5" - "0" - "0" - "0" - - "0" - - "0" - - "0" - - "0" + count: "17" explicitBounds: - 1e-08 - 1e-07 @@ -1648,24 +1767,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.19704969800000005 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: resource_quota_controller_resource_changes + stringValue: deployment bucketCounts: - "0" - "0" - "0" + - "16" + - "3" + - "7" + - "8" + - "5" - "0" - "0" - "0" - - "0" - - "0" - - "0" - - "0" - - "0" + count: "39" explicitBounds: - 1e-08 - 1e-07 @@ -1678,12 +1798,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.17739068899999993 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: resourcequota_primary + stringValue: disruption bucketCounts: - "0" - "0" @@ -1713,7 +1833,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: resourcequota_priority + stringValue: disruption_recheck bucketCounts: - "0" - "0" @@ -1743,20 +1863,20 @@ resourceMetrics: - attributes: - key: name value: - stringValue: root_ca_cert_publisher + stringValue: endpoint bucketCounts: - "0" - "0" - "0" - - "1" + - "11" + - "2" - "0" - "0" + - "2" - "0" - - "1" - - "4" - "0" - "0" - count: "6" + count: "15" explicitBounds: - 1e-08 - 1e-07 @@ -1769,24 +1889,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 3.024412881 + sum: 0.027503861000000008 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: service + stringValue: endpoint_slice bucketCounts: - "0" - "0" - "0" + - "19" + - "1" - "0" - "0" - "0" + - "2" - "0" - "0" - - "0" - - "0" - - "0" + count: "22" explicitBounds: - 1e-08 - 1e-07 @@ -1799,25 +1920,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.203151776 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: serviceaccount + stringValue: endpoint_slice_mirroring bucketCounts: - "0" - "0" - "0" - - "1" - - "0" + - "9" - "0" + - "1" - "0" - "1" - - "4" - "0" - "0" - count: "6" + - "0" + count: "11" explicitBounds: - 1e-08 - 1e-07 @@ -1830,12 +1951,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 2.813764993 + sum: 0.081444424 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: serviceaccount_tokens_secret + stringValue: ephemeral_volume bucketCounts: - "0" - "0" @@ -1865,20 +1986,20 @@ resourceMetrics: - attributes: - key: name value: - stringValue: serviceaccount_tokens_service + stringValue: garbage_collector_attempt_to_delete bucketCounts: - "0" - "0" - "0" - - "36" - "0" - "0" - "0" - - "10" - "0" - "0" + - "1" - "0" - count: "46" + - "0" + count: "1" explicitBounds: - 1e-08 - 1e-07 @@ -1891,12 +2012,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.7075846650000002 + sum: 0.617893399 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: stale_pod_disruption + stringValue: garbage_collector_attempt_to_orphan bucketCounts: - "0" - "0" @@ -1926,19 +2047,20 @@ resourceMetrics: - attributes: - key: name value: - stringValue: statefulset + stringValue: garbage_collector_graph_changes bucketCounts: - "0" - "0" - "0" + - "420" + - "94" + - "163" - "0" - "0" - "0" - "0" - "0" - - "0" - - "0" - - "0" + count: "677" explicitBounds: - 1e-08 - 1e-07 @@ -1951,25 +2073,24 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.049375700999999966 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: token_cleaner + stringValue: horizontalpodautoscaler bucketCounts: - "0" - "0" - "0" - "0" - "0" - - "1" - "0" - "0" - "0" - "0" - "0" - count: "1" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -1982,12 +2103,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.000125052 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: ttl_jobs_to_delete + stringValue: job bucketCounts: - "0" - "0" @@ -2017,20 +2138,19 @@ resourceMetrics: - attributes: - key: name value: - stringValue: ttlcontroller + stringValue: job_orphan_pod bucketCounts: - "0" - "0" - "0" - - "4" - "0" - "0" - - "1" - - "1" - - "1" - "0" - "0" - count: "7" + - "0" + - "0" + - "0" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -2043,12 +2163,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.544666089 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: volume_expand + stringValue: namespace bucketCounts: - "0" - "0" @@ -2078,7 +2198,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: volumes + stringValue: node bucketCounts: - "0" - "0" @@ -2105,251 +2225,169 @@ resourceMetrics: startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: workqueue_queue_duration_seconds - - description: '[ALPHA] Authentication duration in seconds broken out by result.' - histogram: - aggregationTemporality: 2 - dataPoints: - attributes: - - key: result + - key: name value: - stringValue: success + stringValue: node_lifecycle_controller bucketCounts: - - "15" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - "0" - count: "15" - explicitBounds: - - 0.001 - - 0.002 - - 0.004 - - 0.008 - - 0.016 - - 0.032 - - 0.064 - - 0.128 - - 0.256 - - 0.512 - - 1.024 - - 2.048 - - 4.096 - - 8.192 - - 16.384 - startTimeUnixNano: "1000000" - sum: 0.000379751 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: authentication_duration_seconds - - description: '[ALPHA] Duration in seconds for NodeController to update the health of a single node.' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "26" - "0" - "0" + - "5" - "0" - "0" - "0" - "0" + - "1" - "0" - "0" - count: "26" + count: "6" explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 - 0.001 - - 0.004 - - 0.016 - - 0.064 - - 0.256 - - 1.024 - - 4.096 - - 16.384 - startTimeUnixNano: "1000000" - sum: 0.0018292450000000002 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: node_collector_update_node_health_duration_seconds - - description: '[ALPHA] Response size in bytes. Broken down by verb and host.' - histogram: - aggregationTemporality: 2 - dataPoints: - - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - 0.01 + - 0.1 + - 1 + - 10 + startTimeUnixNano: "1000000" + sum: 0.23477021799999997 + timeUnixNano: "1000000" + - attributes: + - key: name value: - stringValue: GET + stringValue: node_lifecycle_controller_pods bucketCounts: - - "19" - - "71" - - "107" - - "4" - - "12" - - "31" - - "12" - "0" - "0" - "0" - "0" + - "4" + - "5" + - "0" + - "0" + - "4" + - "0" - "0" - count: "256" + count: "13" explicitBounds: - - 64 - - 256 - - 512 - - 1024 - - 4096 - - 16384 - - 65536 - - 262144 - - 1.048576e+06 - - 4.194304e+06 - - 1.6777216e+07 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 startTimeUnixNano: "1000000" - sum: 630482 + sum: 0.9296804769999999 timeUnixNano: "1000000" - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: PATCH + stringValue: noexec_taint_node bucketCounts: - "0" - "0" - "0" - - "1" - - "15" - "0" - "0" - "0" - "0" - "0" + - "1" - "0" - "0" - count: "16" + count: "1" explicitBounds: - - 64 - - 256 - - 512 - - 1024 - - 4096 - - 16384 - - 65536 - - 262144 - - 1.048576e+06 - - 4.194304e+06 - - 1.6777216e+07 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 startTimeUnixNano: "1000000" - sum: 50067 + sum: 0.234827334 timeUnixNano: "1000000" - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: POST + stringValue: noexec_taint_pod bucketCounts: - "0" - - "38" - - "1" - - "20" - - "40" - - "2" - "0" - "0" + - "17" + - "0" + - "1" - "0" - "0" + - "4" - "0" - "0" - count: "101" + count: "22" explicitBounds: - - 64 - - 256 - - 512 - - 1024 - - 4096 - - 16384 - - 65536 - - 262144 - - 1.048576e+06 - - 4.194304e+06 - - 1.6777216e+07 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 startTimeUnixNano: "1000000" - sum: 113318 + sum: 0.9277257410000002 timeUnixNano: "1000000" - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: PUT + stringValue: orphaned_pods_nodes bucketCounts: - "0" - - "3" - - "80" - - "7" - - "36" - - "21" - "0" - "0" - "0" - "0" - "0" - "0" - count: "147" + - "0" + - "0" + - "0" + - "0" explicitBounds: - - 64 - - 256 - - 512 - - 1024 - - 4096 - - 16384 - - 65536 - - 262144 - - 1.048576e+06 - - 4.194304e+06 - - 1.6777216e+07 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 startTimeUnixNano: "1000000" - sum: 268412 + sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: rest_client_response_size_bytes - - description: '[ALPHA] The ratio of chosen deleted pod''s ages to the current youngest pod''s age (at the time). Should be <2.The intent of this metric is to measure the rough efficacy of the LogarithmicScaleDown feature gate''s effect onthe sorting (and deletion) of pods when a replicaset scales down. This only considers Ready pods when calculating and reporting.' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: + - attributes: + - key: name + value: + stringValue: pvcprotection + bucketCounts: + - "0" + - "0" + - "0" + - "0" - "0" - "0" - "0" @@ -2358,30 +2396,24 @@ resourceMetrics: - "0" - "0" explicitBounds: - - 0.25 - - 0.5 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 - 1 - - 2 - - 4 - - 8 + - 10 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: replicaset_controller_sorting_deletion_age_ratio - - description: '[ALPHA] Number of endpoints added on each Service sync' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "18" - - "0" - - "0" - - "0" - - "0" + - attributes: + - key: name + value: + stringValue: pvcs + bucketCounts: - "0" - "0" - "0" @@ -2393,74 +2425,25 @@ resourceMetrics: - "0" - "0" - "0" - count: "18" - explicitBounds: - - 2 - - 4 - - 8 - - 16 - - 32 - - 64 - - 128 - - 256 - - 512 - - 1024 - - 2048 - - 4096 - - 8192 - - 16384 - - 32768 - startTimeUnixNano: "1000000" - sum: 4 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: endpoint_slice_controller_endpoints_added_per_sync - - description: Distribution of freed heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "15217" - - "255532" - - "136136" - - "98370" - - "58046" - - "11758" - - "4179" - - "1758" - - "2207" - - "392" - - "94" - - "81" - count: "583770" explicitBounds: - - 8.999999999999998 - - 24.999999999999996 - - 64.99999999999999 - - 144.99999999999997 - - 320.99999999999994 - - 704.9999999999999 - - 1536.9999999999998 - - 3200.9999999999995 - - 6528.999999999999 - - 13568.999999999998 - - 27264.999999999996 - startTimeUnixNano: "1000000" - sum: 7.2131032e+07 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: go_gc_heap_frees_by_size_bytes - - description: '[ALPHA] A metric measuring the latency for updating each load balancer hosts.' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 + startTimeUnixNano: "1000000" + sum: 0 + timeUnixNano: "1000000" + - attributes: + - key: name + value: + stringValue: pvprotection + bucketCounts: - "0" - "0" - "0" @@ -2472,46 +2455,88 @@ resourceMetrics: - "0" - "0" - "0" + explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 + startTimeUnixNano: "1000000" + sum: 0 + timeUnixNano: "1000000" + - attributes: + - key: name + value: + stringValue: replicaset + bucketCounts: + - "0" - "0" - "0" + - "27" + - "2" + - "4" + - "2" + - "5" - "0" - "0" - "0" + count: "40" explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 - 1 - - 2 - - 4 - - 8 - - 16 - - 32 - - 64 - - 128 - - 256 - - 512 - - 1024 - - 2048 - - 4096 - - 8192 - - 16384 + - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.094433077 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: service_controller_update_loadbalancer_host_latency_seconds - - description: '[ALPHA] Latencies in seconds of data encryption key(DEK) generation operations.' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: + - attributes: + - key: name + value: + stringValue: replicationmanager + bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" - "0" - "0" - "0" - "0" - "0" - "0" + explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 + startTimeUnixNano: "1000000" + sum: 0 + timeUnixNano: "1000000" + - attributes: + - key: name + value: + stringValue: resource_quota_controller_resource_changes + bucketCounts: + - "0" + - "0" - "0" - "0" - "0" @@ -2522,38 +2547,24 @@ resourceMetrics: - "0" - "0" explicitBounds: - - 5e-06 - - 1e-05 - - 2e-05 - - 4e-05 - - 8e-05 - - 0.00016 - - 0.00032 - - 0.00064 - - 0.00128 - - 0.00256 - - 0.00512 - - 0.01024 - - 0.02048 - - 0.04096 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: apiserver_storage_data_key_generation_duration_seconds - - description: '[ALPHA] Request latency in seconds. Broken down by status code.' - histogram: - aggregationTemporality: 2 - dataPoints: - attributes: - - key: code + - key: name value: - stringValue: "201" + stringValue: resourcequota_primary bucketCounts: - - "1" - "0" - "0" - "0" @@ -2562,45 +2573,39 @@ resourceMetrics: - "0" - "0" - "0" - count: "1" + - "0" + - "0" + - "0" explicitBounds: - - 0.25 - - 0.5 - - 0.7 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 - 1 - - 1.5 - - 3 - - 5 - 10 startTimeUnixNano: "1000000" - sum: 0.007777491 + sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: apiserver_delegated_authn_request_duration_seconds - - description: '[ALPHA] How long in seconds processing an item from workqueue takes.' - histogram: - aggregationTemporality: 2 - dataPoints: - attributes: - key: name value: - stringValue: ClusterRoleAggregator + stringValue: resourcequota_priority bucketCounts: - "0" - "0" - "0" - "0" - "0" - - "49" - - "11" - "0" - - "3" - "0" - "0" - count: "63" + - "0" + - "0" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -2613,25 +2618,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 2.1925661279999993 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: DynamicCABundle-client-ca-bundle + stringValue: root_ca_cert_publisher bucketCounts: - "0" - "0" - "0" - - "0" - "1" - "0" - "0" + - "1" - "0" + - "4" - "0" - "0" - - "0" - count: "1" + count: "6" explicitBounds: - 1e-08 - 1e-07 @@ -2644,25 +2649,24 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 2.1339e-05 + sum: 1.169525497 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: DynamicCABundle-csr-controller + stringValue: service bucketCounts: - "0" - "0" - "0" - "0" - "0" - - "4" - "0" - "0" - "0" - "0" - "0" - count: "4" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -2675,12 +2679,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.001783454 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: DynamicCABundle-request-header + stringValue: serviceaccount bucketCounts: - "0" - "0" @@ -2689,11 +2693,11 @@ resourceMetrics: - "1" - "0" - "0" + - "1" + - "4" - "0" - "0" - - "0" - - "0" - count: "1" + count: "6" explicitBounds: - 1e-08 - 1e-07 @@ -2706,25 +2710,24 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 2.1069e-05 + sum: 2.3851086200000005 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: DynamicServingCertificateController + stringValue: serviceaccount_tokens_secret bucketCounts: - "0" - "0" - "0" - "0" - - "3" - "0" - "0" - "0" - "0" - "0" - "0" - count: "3" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -2737,25 +2740,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 7.4499e-05 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: bootstrap_signer_queue + stringValue: serviceaccount_tokens_service bucketCounts: - "0" - "0" - "0" - - "0" - - "0" + - "37" - "1" - "0" - "0" - - "1" + - "8" - "0" - "0" - count: "2" + - "0" + count: "46" explicitBounds: - 1e-08 - 1e-07 @@ -2768,25 +2771,24 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.720449021 + sum: 0.62868951 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: certificate + stringValue: stale_pod_disruption bucketCounts: - "0" - "0" - "0" - - "22" - - "3" - - "3" - "0" - - "2" - "0" - "0" - "0" - count: "30" + - "0" + - "0" + - "0" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -2799,12 +2801,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.039120922 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: claims + stringValue: statefulset bucketCounts: - "0" - "0" @@ -2834,11 +2836,12 @@ resourceMetrics: - attributes: - key: name value: - stringValue: cronjob + stringValue: token_cleaner bucketCounts: - "0" - "0" - "0" + - "1" - "0" - "0" - "0" @@ -2846,7 +2849,7 @@ resourceMetrics: - "0" - "0" - "0" - - "0" + count: "1" explicitBounds: - 1e-08 - 1e-07 @@ -2859,25 +2862,24 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 3.186e-06 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: daemonset + stringValue: ttl_jobs_to_delete bucketCounts: - "0" - "0" - "0" - "0" - "0" - - "8" - - "3" - - "3" - - "2" - "0" - "0" - count: "16" + - "0" + - "0" + - "0" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -2890,25 +2892,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 1.2170028430000002 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: deployment + stringValue: ttlcontroller bucketCounts: - "0" - "0" - "0" + - "2" + - "3" - "0" - "0" - - "14" - - "14" - - "9" - - "2" + - "1" - "0" - "0" - count: "39" + - "0" + count: "6" explicitBounds: - 1e-08 - 1e-07 @@ -2921,12 +2923,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 1.1502812480000002 + sum: 0.09726425699999999 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: disruption + stringValue: volume_expand bucketCounts: - "0" - "0" @@ -2956,7 +2958,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: disruption_recheck + stringValue: volumes bucketCounts: - "0" - "0" @@ -2983,54 +2985,243 @@ resourceMetrics: startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: workqueue_queue_duration_seconds + - description: '[ALPHA] Distribution of the remaining lifetime on the certificate used to authenticate a request.' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + explicitBounds: + - 0 + - 1800 + - 3600 + - 7200 + - 21600 + - 43200 + - 86400 + - 172800 + - 345600 + - 604800 + - 2.592e+06 + - 7.776e+06 + - 1.5552e+07 + - 3.1104e+07 + startTimeUnixNano: "1000000" + sum: 0 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: apiserver_client_certificate_expiration_seconds + - description: '[ALPHA] Number of namespace syncs happened in root ca cert publisher.' + histogram: + aggregationTemporality: 2 + dataPoints: - attributes: - - key: name + - key: code value: - stringValue: endpoint + stringValue: "200" bucketCounts: + - "0" + - "3" + - "2" - "0" - "0" - "0" - "0" - - "5" - "0" - - "9" - "0" - "1" - "0" - "0" - count: "15" + - "0" + - "0" + - "0" + - "0" + count: "6" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - 0.001 - - 0.01 + - 0.002 + - 0.004 + - 0.008 + - 0.016 + - 0.032 + - 0.064 + - 0.128 + - 0.256 + - 0.512 + - 1.024 + - 2.048 + - 4.096 + - 8.192 + - 16.384 + startTimeUnixNano: "1000000" + sum: 0.28932246500000003 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: root_ca_cert_publisher_sync_duration_seconds + - description: '[ALPHA] The time it took to delete the job since it became eligible for deletion' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + explicitBounds: - 0.1 + - 0.2 + - 0.4 + - 0.8 + - 1.6 + - 3.2 + - 6.4 + - 12.8 + - 25.6 + - 51.2 + - 102.4 + - 204.8 + - 409.6 + - 819.2 + startTimeUnixNano: "1000000" + sum: 0 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: ttl_after_finished_controller_job_deletion_duration_seconds + - description: Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running. + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "1467" + - "746" + - "2968" + - "1301" + - "203" + - "29" + - "0" + - "0" + count: "6714" + explicitBounds: + - 6.399999999999999e-08 + - 6.399999999999999e-07 + - 7.167999999999999e-06 + - 8.191999999999999e-05 + - 0.0009175039999999999 + - 0.010485759999999998 + - 0.11744051199999998 + startTimeUnixNano: "1000000" + sum: 0.054510208000000004 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: go_sched_latencies_seconds + - description: '[ALPHA] A metric measuring the latency for updating each load balancer hosts.' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + explicitBounds: - 1 - - 10 + - 2 + - 4 + - 8 + - 16 + - 32 + - 64 + - 128 + - 256 + - 512 + - 1024 + - 2048 + - 4096 + - 8192 + - 16384 startTimeUnixNano: "1000000" - sum: 0.7820642600000002 + sum: 0 timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: service_controller_update_loadbalancer_host_latency_seconds + - description: '[ALPHA] How long in seconds processing an item from workqueue takes.' + histogram: + aggregationTemporality: 2 + dataPoints: - attributes: - key: name value: - stringValue: endpoint_slice + stringValue: ClusterRoleAggregator bucketCounts: - "0" - "0" - "0" - "0" - - "4" - - "5" - - "9" - "0" - - "1" + - "51" + - "6" + - "2" + - "3" - "0" - "0" - count: "19" + count: "62" explicitBounds: - 1e-08 - 1e-07 @@ -3043,25 +3234,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.237821309 + sum: 2.2160274009999994 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: endpoint_slice_mirroring + stringValue: DynamicCABundle-client-ca-bundle bucketCounts: - "0" - "0" - "0" - - "1" - - "10" - "0" + - "2" - "0" - "0" - "0" - "0" - "0" - count: "11" + - "0" + count: "2" explicitBounds: - 1e-08 - 1e-07 @@ -3074,24 +3265,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.000260355 + sum: 4.262e-05 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: ephemeral_volume + stringValue: DynamicCABundle-csr-controller bucketCounts: - "0" - "0" - "0" - "0" - "0" + - "5" - "0" - "0" - "0" - "0" - "0" - - "0" + count: "5" explicitBounds: - 1e-08 - 1e-07 @@ -3104,24 +3296,24 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.003082354 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: garbage_collector_attempt_to_delete + stringValue: DynamicCABundle-request-header bucketCounts: - "0" - "0" - "0" - "0" - "1" - - "0" - "1" - "0" - "0" - "0" - "0" + - "0" count: "2" explicitBounds: - 1e-08 @@ -3135,24 +3327,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.0027031489999999997 + sum: 0.000191649 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: garbage_collector_attempt_to_orphan + stringValue: DynamicServingCertificateController bucketCounts: - "0" - "0" - "0" - "0" + - "3" - "0" - "0" - "0" - "0" - "0" - "0" - - "0" + count: "3" explicitBounds: - 1e-08 - 1e-07 @@ -3165,25 +3358,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 9.6857e-05 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: garbage_collector_graph_changes + stringValue: bootstrap_signer_queue bucketCounts: - "0" - "0" - - "2" - - "561" - - "84" - "0" - "0" + - "1" + - "0" - "0" - "0" + - "1" - "0" - "0" - count: "647" + count: "2" explicitBounds: - 1e-08 - 1e-07 @@ -3196,24 +3389,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.00424178 + sum: 0.607599363 timeUnixNano: "1000000" - attributes: - - key: name - value: - stringValue: horizontalpodautoscaler - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "0" + - key: name + value: + stringValue: certificate + bucketCounts: - "0" - "0" - "0" + - "19" + - "6" + - "3" + - "1" + - "1" - "0" - "0" - "0" + count: "30" explicitBounds: - 1e-08 - 1e-07 @@ -3226,12 +3420,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.023372524 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: job + stringValue: claims bucketCounts: - "0" - "0" @@ -3261,7 +3455,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: job_orphan_pod + stringValue: cronjob bucketCounts: - "0" - "0" @@ -3291,19 +3485,20 @@ resourceMetrics: - attributes: - key: name value: - stringValue: namespace + stringValue: daemonset bucketCounts: - "0" - "0" - "0" - "0" - "0" + - "9" + - "4" + - "2" + - "2" - "0" - "0" - - "0" - - "0" - - "0" - - "0" + count: "17" explicitBounds: - 1e-08 - 1e-07 @@ -3316,24 +3511,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 1.506415865 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: node + stringValue: deployment bucketCounts: - "0" - "0" - "0" - "0" - "0" + - "17" + - "15" + - "5" + - "2" - "0" - "0" - - "0" - - "0" - - "0" - - "0" + count: "39" explicitBounds: - 1e-08 - 1e-07 @@ -3346,25 +3542,24 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 1.529207536 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: node_lifecycle_controller + stringValue: disruption bucketCounts: - "0" - "0" - "0" - - "2" - - "3" - "0" - - "1" - "0" - "0" - "0" - "0" - count: "6" + - "0" + - "0" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -3377,25 +3572,24 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.005635317999999999 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: node_lifecycle_controller_pods + stringValue: disruption_recheck bucketCounts: - "0" - "0" - "0" - - "2" - - "11" - "0" - "0" - "0" - "0" - "0" - "0" - count: "13" + - "0" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -3408,25 +3602,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.000421109 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: noexec_taint_node + stringValue: endpoint bucketCounts: - "0" - "0" - "0" - "0" - - "1" - - "0" - - "0" + - "5" - "0" + - "9" - "0" + - "1" - "0" - "0" - count: "1" + count: "15" explicitBounds: - 1e-08 - 1e-07 @@ -3439,22 +3633,22 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 6.999e-05 + sum: 0.4078671729999999 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: noexec_taint_pod + stringValue: endpoint_slice bucketCounts: - "0" - "0" - "0" - - "17" - - "5" - - "0" - - "0" - "0" + - "5" + - "7" + - "9" - "0" + - "1" - "0" - "0" count: "22" @@ -3470,24 +3664,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.00030656600000000003 + sum: 0.823891467 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: orphaned_pods_nodes + stringValue: endpoint_slice_mirroring bucketCounts: - "0" - "0" - "0" - "0" + - "11" - "0" - "0" - "0" - "0" - "0" - "0" - - "0" + count: "11" explicitBounds: - 1e-08 - 1e-07 @@ -3500,12 +3695,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.00023125200000000001 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: pvcprotection + stringValue: ephemeral_volume bucketCounts: - "0" - "0" @@ -3535,11 +3730,12 @@ resourceMetrics: - attributes: - key: name value: - stringValue: pvcs + stringValue: garbage_collector_attempt_to_delete bucketCounts: - "0" - "0" - "0" + - "1" - "0" - "0" - "0" @@ -3547,7 +3743,7 @@ resourceMetrics: - "0" - "0" - "0" - - "0" + count: "1" explicitBounds: - 1e-08 - 1e-07 @@ -3560,12 +3756,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 7.925e-06 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: pvprotection + stringValue: garbage_collector_attempt_to_orphan bucketCounts: - "0" - "0" @@ -3595,20 +3791,20 @@ resourceMetrics: - attributes: - key: name value: - stringValue: replicaset + stringValue: garbage_collector_graph_changes bucketCounts: - "0" - "0" - "0" + - "599" + - "73" + - "5" - "0" - - "11" - - "11" - - "8" - - "9" - - "2" - "0" - "0" - count: "41" + - "0" + - "0" + count: "677" explicitBounds: - 1e-08 - 1e-07 @@ -3621,12 +3817,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 1.2286506730000004 + sum: 0.005562633000000005 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: replicationmanager + stringValue: horizontalpodautoscaler bucketCounts: - "0" - "0" @@ -3656,7 +3852,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: resource_quota_controller_resource_changes + stringValue: job bucketCounts: - "0" - "0" @@ -3686,7 +3882,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: resourcequota_primary + stringValue: job_orphan_pod bucketCounts: - "0" - "0" @@ -3716,7 +3912,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: resourcequota_priority + stringValue: namespace bucketCounts: - "0" - "0" @@ -3746,7 +3942,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: root_ca_cert_publisher + stringValue: node bucketCounts: - "0" - "0" @@ -3754,12 +3950,11 @@ resourceMetrics: - "0" - "0" - "0" - - "5" - "0" - - "1" - "0" - "0" - count: "6" + - "0" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -3772,24 +3967,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.689833046 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: service + stringValue: node_lifecycle_controller bucketCounts: - "0" - "0" - "0" + - "4" - "0" + - "1" - "0" + - "1" - "0" - "0" - "0" - - "0" - - "0" - - "0" + count: "6" explicitBounds: - 1e-08 - 1e-07 @@ -3802,25 +3998,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.010733758 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: serviceaccount + stringValue: node_lifecycle_controller_pods bucketCounts: - "0" - "0" - "0" - "0" - - "0" - - "0" - - "5" - - "0" + - "8" - "1" + - "2" + - "2" - "0" - "0" - count: "6" + - "0" + count: "13" explicitBounds: - 1e-08 - 1e-07 @@ -3833,24 +4029,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.6498003660000001 + sum: 0.03915494300000001 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: serviceaccount_tokens_secret + stringValue: noexec_taint_node bucketCounts: - "0" - "0" - "0" - "0" + - "1" - "0" - "0" - "0" - "0" - "0" - "0" - - "0" + count: "1" explicitBounds: - 1e-08 - 1e-07 @@ -3863,25 +4060,25 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 6.5383e-05 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: serviceaccount_tokens_service + stringValue: noexec_taint_pod bucketCounts: - "0" - "0" - "0" - - "43" - - "3" + - "17" + - "5" - "0" - "0" - "0" - "0" - "0" - "0" - count: "46" + count: "22" explicitBounds: - 1e-08 - 1e-07 @@ -3894,12 +4091,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.00032618900000000006 + sum: 0.0003418389999999999 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: stale_pod_disruption + stringValue: orphaned_pods_nodes bucketCounts: - "0" - "0" @@ -3929,7 +4126,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: statefulset + stringValue: pvcprotection bucketCounts: - "0" - "0" @@ -3959,20 +4156,19 @@ resourceMetrics: - attributes: - key: name value: - stringValue: token_cleaner + stringValue: pvcs bucketCounts: - "0" - "0" - "0" - "0" - - "1" - "0" - "0" - "0" - "0" - "0" - "0" - count: "1" + - "0" explicitBounds: - 1e-08 - 1e-07 @@ -3985,12 +4181,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 1.2023e-05 + sum: 0 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: ttl_jobs_to_delete + stringValue: pvprotection bucketCounts: - "0" - "0" @@ -4020,20 +4216,20 @@ resourceMetrics: - attributes: - key: name value: - stringValue: ttlcontroller + stringValue: replicaset bucketCounts: - "0" - "0" - "0" - - "5" - - "0" - - "0" - - "1" - "0" - - "1" + - "9" + - "14" + - "9" + - "6" + - "2" - "0" - "0" - count: "7" + count: "40" explicitBounds: - 1e-08 - 1e-07 @@ -4046,12 +4242,12 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.521166584 + sum: 0.776397369 timeUnixNano: "1000000" - attributes: - key: name value: - stringValue: volume_expand + stringValue: replicationmanager bucketCounts: - "0" - "0" @@ -4081,7 +4277,7 @@ resourceMetrics: - attributes: - key: name value: - stringValue: volumes + stringValue: resource_quota_controller_resource_changes bucketCounts: - "0" - "0" @@ -4108,58 +4304,41 @@ resourceMetrics: startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: workqueue_work_duration_seconds - - description: '[ALPHA] Number of EndpointSlices changed on each Service sync' - histogram: - aggregationTemporality: 2 - dataPoints: - attributes: - - key: topology + - key: name value: - stringValue: Disabled + stringValue: resourcequota_primary bucketCounts: - - "8" - "0" - "0" - "0" - "0" - "0" - "0" - - "10" - "0" - "0" - "0" - "0" - count: "18" + - "0" explicitBounds: - - 0.005 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 - 0.01 - - 0.025 - - 0.05 - 0.1 - - 0.25 - - 0.5 - 1 - - 2.5 - - 5 - 10 startTimeUnixNano: "1000000" - sum: 10 + sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: endpoint_slice_controller_endpointslices_changed_per_sync - - description: '[STABLE] Time between when a cronjob is scheduled to be run, and when the corresponding job is created' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: + - attributes: + - key: name + value: + stringValue: resourcequota_priority + bucketCounts: - "0" - "0" - "0" @@ -4172,76 +4351,55 @@ resourceMetrics: - "0" - "0" explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 - 1 - - 2 - - 4 - - 8 - - 16 - - 32 - - 64 - - 128 - - 256 - - 512 - startTimeUnixNano: "1000000" - sum: 0 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: cronjob_controller_job_creation_skew_duration_seconds - - description: '[ALPHA] ' - histogram: - aggregationTemporality: 2 - dataPoints: + - 10 + startTimeUnixNano: "1000000" + sum: 0 + timeUnixNano: "1000000" - attributes: - - key: status + - key: name value: - stringValue: miss + stringValue: root_ca_cert_publisher bucketCounts: - - "0" - - "1" - - "0" - "0" - "0" - "0" - "0" - "0" - "0" + - "5" - "0" + - "1" - "0" - "0" - count: "1" + count: "6" explicitBounds: - - 0.005 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 - 0.01 - - 0.025 - - 0.05 - 0.1 - - 0.25 - - 0.5 - 1 - - 2.5 - - 5 - 10 startTimeUnixNano: "1000000" - sum: 0.007 + sum: 0.2893691909999999 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: authentication_token_cache_request_duration_seconds - - description: '[ALPHA] Duration of syncEndpoints() in seconds' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "11" - - "0" - - "0" - - "0" - - "0" + - attributes: + - key: name + value: + stringValue: service + bucketCounts: - "0" - "0" - "0" @@ -4253,87 +4411,56 @@ resourceMetrics: - "0" - "0" - "0" - count: "11" explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 - 0.001 - - 0.002 - - 0.004 - - 0.008 - - 0.016 - - 0.032 - - 0.064 - - 0.128 - - 0.256 - - 0.512 - - 1.024 - - 2.048 - - 4.096 - - 8.192 - - 16.384 + - 0.01 + - 0.1 + - 1 + - 10 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: endpoint_slice_mirroring_controller_endpoints_sync_duration - - description: '[ALPHA] Number of namespace syncs happened in root ca cert publisher.' - histogram: - aggregationTemporality: 2 - dataPoints: - attributes: - - key: code + - key: name value: - stringValue: "200" + stringValue: serviceaccount bucketCounts: - - "0" - - "3" - - "2" - "0" - "0" - "0" - "0" - "0" - "0" + - "5" - "0" - "1" - "0" - "0" - - "0" - - "0" - - "0" count: "6" explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 - 0.001 - - 0.002 - - 0.004 - - 0.008 - - 0.016 - - 0.032 - - 0.064 - - 0.128 - - 0.256 - - 0.512 - - 1.024 - - 2.048 - - 4.096 - - 8.192 - - 16.384 + - 0.01 + - 0.1 + - 1 + - 10 startTimeUnixNano: "1000000" - sum: 0.689789036 + sum: 0.569136966 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: root_ca_cert_publisher_sync_duration_seconds - - description: '[ALPHA] CEL compilation time in seconds.' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "0" + - attributes: + - key: name + value: + stringValue: serviceaccount_tokens_secret + bucketCounts: - "0" - "0" - "0" @@ -4346,70 +4473,55 @@ resourceMetrics: - "0" - "0" explicitBounds: - - 0.005 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 - 0.01 - - 0.025 - - 0.05 - 0.1 - - 0.25 - - 0.5 - 1 - - 2.5 - - 5 - 10 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: apiserver_cel_compilation_duration_seconds - - description: '[ALPHA] Duration in seconds for NodeController to update the health of all nodes.' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "26" + - attributes: + - key: name + value: + stringValue: serviceaccount_tokens_service + bucketCounts: + - "0" - "0" - "0" + - "40" + - "6" - "0" - "0" - "0" - "0" - "0" - "0" - count: "26" + count: "46" explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 - 0.01 - - 0.04 - - 0.16 - - 0.64 - - 2.56 - - 10.24 - - 40.96 - - 163.84 + - 0.1 + - 1 + - 10 startTimeUnixNano: "1000000" - sum: 0.0032119060000000005 + sum: 0.00042148 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: node_collector_update_all_nodes_health_duration_seconds - - description: '[ALPHA] Authorization duration in seconds broken out by result.' - histogram: - aggregationTemporality: 2 - dataPoints: - attributes: - - key: result + - key: name value: - stringValue: allowed + stringValue: stale_pod_disruption bucketCounts: - - "15" - - "0" - - "0" - - "0" - "0" - "0" - "0" @@ -4421,76 +4533,25 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - count: "15" explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 - 0.001 - - 0.002 - - 0.004 - - 0.008 - - 0.016 - - 0.032 - - 0.064 - - 0.128 - - 0.256 - - 0.512 - - 1.024 - - 2.048 - - 4.096 - - 8.192 - - 16.384 - startTimeUnixNano: "1000000" - sum: 6.2065e-05 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: authorization_duration_seconds - - description: Distribution of heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "19877" - - "284990" - - "165772" - - "112327" - - "65353" - - "14769" - - "4759" - - "2124" - - "2356" - - "520" - - "289" - - "185" - count: "673321" - explicitBounds: - - 8.999999999999998 - - 24.999999999999996 - - 64.99999999999999 - - 144.99999999999997 - - 320.99999999999994 - - 704.9999999999999 - - 1536.9999999999998 - - 3200.9999999999995 - - 6528.999999999999 - - 13568.999999999998 - - 27264.999999999996 - startTimeUnixNano: "1000000" - sum: 9.0568496e+07 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: go_gc_heap_allocs_by_size_bytes - - description: '[ALPHA] CEL evaluation time in seconds.' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "0" + - 0.01 + - 0.1 + - 1 + - 10 + startTimeUnixNano: "1000000" + sum: 0 + timeUnixNano: "1000000" + - attributes: + - key: name + value: + stringValue: statefulset + bucketCounts: - "0" - "0" - "0" @@ -4503,77 +4564,55 @@ resourceMetrics: - "0" - "0" explicitBounds: - - 0.005 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 - 0.01 - - 0.025 - - 0.05 - 0.1 - - 0.25 - - 0.5 - 1 - - 2.5 - - 5 - 10 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: apiserver_cel_evaluation_duration_seconds - - description: '[ALPHA] Client side rate limiter latency in seconds. Broken down by verb, and host.' - histogram: - aggregationTemporality: 2 - dataPoints: - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: GET + stringValue: token_cleaner bucketCounts: - - "200" - "0" - - "39" - - "5" - - "10" - - "2" - "0" - "0" - "0" + - "1" + - "0" + - "0" - "0" - "0" - "0" - "0" - count: "256" + count: "1" explicitBounds: - - 0.005 - - 0.025 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 - 0.1 - - 0.25 - - 0.5 - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 10 startTimeUnixNano: "1000000" - sum: 7.4734964810000015 + sum: 1.9486e-05 timeUnixNano: "1000000" - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: PATCH + stringValue: ttl_jobs_to_delete bucketCounts: - - "16" - - "0" - "0" - "0" - "0" @@ -4585,71 +4624,56 @@ resourceMetrics: - "0" - "0" - "0" - count: "16" explicitBounds: - - 0.005 - - 0.025 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 - 0.1 - - 0.25 - - 0.5 - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 10 startTimeUnixNano: "1000000" - sum: 2.2611e-05 + sum: 0 timeUnixNano: "1000000" - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: POST + stringValue: ttlcontroller bucketCounts: - - "72" - - "0" - - "20" - - "1" - - "8" - "0" - "0" - "0" + - "2" + - "3" - "0" - "0" - "0" + - "1" - "0" - "0" - count: "101" + count: "6" explicitBounds: - - 0.005 - - 0.025 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 - 0.1 - - 0.25 - - 0.5 - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 10 startTimeUnixNano: "1000000" - sum: 4.284271337000001 + sum: 0.739651219 timeUnixNano: "1000000" - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: PUT + stringValue: volume_expand bucketCounts: - - "147" - - "0" - "0" - "0" - "0" @@ -4661,38 +4685,25 @@ resourceMetrics: - "0" - "0" - "0" - count: "147" explicitBounds: - - 0.005 - - 0.025 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 - 0.1 - - 0.25 - - 0.5 - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 10 startTimeUnixNano: "1000000" - sum: 0.00017979100000000003 + sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: rest_client_rate_limiter_duration_seconds - - description: '[ALPHA] Number of endpoints removed on each Service sync' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "18" - - "0" - - "0" - - "0" - - "0" + - attributes: + - key: name + value: + stringValue: volumes + bucketCounts: - "0" - "0" - "0" @@ -4704,23 +4715,17 @@ resourceMetrics: - "0" - "0" - "0" - count: "18" explicitBounds: - - 2 - - 4 - - 8 - - 16 - - 32 - - 64 - - 128 - - 256 - - 512 - - 1024 - - 2048 - - 4096 - - 8192 - - 16384 - - 32768 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" @@ -4728,8 +4733,8 @@ resourceMetrics: - key: prometheus.type value: stringValue: histogram - name: endpoint_slice_controller_endpoints_removed_per_sync - - description: '[ALPHA] Distribution of the remaining lifetime on the certificate used to authenticate a request.' + name: workqueue_work_duration_seconds + - description: '[ALPHA] Histogram of the number of seconds the last auth exec plugin client certificate lived before being rotated. If auth exec plugin client certificates are unused, histogram will contain no data.' histogram: aggregationTemporality: 2 dataPoints: @@ -4746,24 +4751,18 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "0" - - "0" explicitBounds: - - 0 + - 600 - 1800 - 3600 - - 7200 - - 21600 - - 43200 + - 14400 - 86400 - - 172800 - - 345600 - 604800 - 2.592e+06 - 7.776e+06 - 1.5552e+07 - 3.1104e+07 + - 1.24416e+08 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" @@ -4771,44 +4770,45 @@ resourceMetrics: - key: prometheus.type value: stringValue: histogram - name: apiserver_client_certificate_expiration_seconds - - description: '[ALPHA] Histogram of the number of seconds the last auth exec plugin client certificate lived before being rotated. If auth exec plugin client certificates are unused, histogram will contain no data.' + name: rest_client_exec_plugin_certificate_rotation_age + - description: Distribution of freed heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. histogram: aggregationTemporality: 2 dataPoints: - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" + - "15296" + - "258183" + - "137653" + - "99329" + - "65527" + - "12109" + - "4236" + - "1776" + - "2433" + - "416" + - "93" + - "84" + count: "597135" explicitBounds: - - 600 - - 1800 - - 3600 - - 14400 - - 86400 - - 604800 - - 2.592e+06 - - 7.776e+06 - - 1.5552e+07 - - 3.1104e+07 - - 1.24416e+08 + - 8.999999999999998 + - 24.999999999999996 + - 64.99999999999999 + - 144.99999999999997 + - 320.99999999999994 + - 704.9999999999999 + - 1536.9999999999998 + - 3200.9999999999995 + - 6528.999999999999 + - 13568.999999999998 + - 27264.999999999996 startTimeUnixNano: "1000000" - sum: 0 + sum: 7.6026192e+07 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: rest_client_exec_plugin_certificate_rotation_age + name: go_gc_heap_frees_by_size_bytes scope: - name: otelcol/prometheusreceiver - version: v0.105.0 + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver + version: v0.110.0 diff --git a/functional_tests/testdata_histogram/expected/v1.28/coredns_metrics.yaml b/functional_tests/testdata_histogram/expected/v1.28/coredns_metrics.yaml index 914c38019..4d1648d09 100644 --- a/functional_tests/testdata_histogram/expected/v1.28/coredns_metrics.yaml +++ b/functional_tests/testdata_histogram/expected/v1.28/coredns_metrics.yaml @@ -18,13 +18,13 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-5dd5756b68-brndm + stringValue: coredns-5dd5756b68-msjcx - key: k8s.pod.uid value: - stringValue: 031fe94b-2926-4e4d-84a3-2c0a3568f9a9 + stringValue: c817b371-bccf-4deb-b2ff-f6878ee05e24 - key: net.host.name value: - stringValue: 10.244.0.4 + stringValue: 10.244.0.2 - key: net.host.port value: stringValue: "9153" @@ -33,13 +33,13 @@ resourceMetrics: stringValue: linux - key: server.address value: - stringValue: 10.244.0.4 + stringValue: 10.244.0.2 - key: server.port value: stringValue: "9153" - key: service.instance.id value: - stringValue: 10.244.0.4:9153 + stringValue: 10.244.0.2:9153 - key: service.name value: stringValue: coredns @@ -49,62 +49,7 @@ resourceMetrics: schemaUrl: https://opentelemetry.io/schemas/1.6.1 scopeMetrics: - metrics: - - description: Histogram of the time (in seconds) each request took per zone. - histogram: - aggregationTemporality: 2 - dataPoints: - - attributes: - - key: server - value: - stringValue: dns://:53 - - key: zone - value: - stringValue: . - bucketCounts: - - "3" - - "0" - - "0" - - "0" - - "0" - - "2" - - "2" - - "1" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - count: "8" - explicitBounds: - - 0.00025 - - 0.0005 - - 0.001 - - 0.002 - - 0.004 - - 0.008 - - 0.016 - - 0.032 - - 0.064 - - 0.128 - - 0.256 - - 0.512 - - 1.024 - - 2.048 - - 4.096 - - 8.192 - startTimeUnixNano: "1000000" - sum: 0.054349755 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: coredns_dns_request_duration_seconds - - description: Size of the EDNS0 UDP buffer in bytes (64K for TCP) per zone and protocol. + - description: Size of the returned response in bytes. histogram: aggregationTemporality: 2 dataPoints: @@ -120,8 +65,8 @@ resourceMetrics: stringValue: . bucketCounts: - "0" - - "5" - - "3" + - "0" + - "4" - "0" - "0" - "0" @@ -134,7 +79,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "8" + count: "4" explicitBounds: - 0 - 100 @@ -151,81 +96,61 @@ resourceMetrics: - 48000 - 64000 startTimeUnixNano: "1000000" - sum: 641 + sum: 598 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: coredns_dns_request_size_bytes - - description: Histogram of the time each request took. + name: coredns_dns_response_size_bytes + - description: Histogram of the time (in seconds) each request took. histogram: aggregationTemporality: 2 dataPoints: - - attributes: - - key: rcode - value: - stringValue: NOERROR - - key: to - value: - stringValue: 172.18.0.1:53 - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "1" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" + - bucketCounts: + - "6" + - "106" + - "2" - "0" - "0" - "0" - count: "1" + count: "114" explicitBounds: - 0.00025 - - 0.0005 - - 0.001 - - 0.002 - - 0.004 - - 0.008 - - 0.016 - - 0.032 - - 0.064 - - 0.128 - - 0.256 - - 0.512 - - 1.024 - - 2.048 - - 4.096 - - 8.192 + - 0.0025 + - 0.025 + - 0.25 + - 2.5 startTimeUnixNano: "1000000" - sum: 0.024471975 + sum: 0.05046487300000003 timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: coredns_health_request_duration_seconds + - description: Histogram of the time (in seconds) each request took per zone. + histogram: + aggregationTemporality: 2 + dataPoints: - attributes: - - key: rcode + - key: server value: - stringValue: NXDOMAIN - - key: to + stringValue: dns://:53 + - key: zone value: - stringValue: 172.18.0.1:53 + stringValue: . bucketCounts: + - "3" - "0" - "0" - "0" - "0" - "0" - - "2" - - "2" - "0" - "0" - "0" + - "1" - "0" - "0" - "0" @@ -252,14 +177,14 @@ resourceMetrics: - 4.096 - 8.192 startTimeUnixNano: "1000000" - sum: 0.028992529 + sum: 0.06858326199999999 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: coredns_forward_request_duration_seconds - - description: Size of the returned response in bytes. + name: coredns_dns_request_duration_seconds + - description: Size of the EDNS0 UDP buffer in bytes (64K for TCP) per zone and protocol. histogram: aggregationTemporality: 2 dataPoints: @@ -275,9 +200,8 @@ resourceMetrics: stringValue: . bucketCounts: - "0" + - "4" - "0" - - "5" - - "3" - "0" - "0" - "0" @@ -289,7 +213,8 @@ resourceMetrics: - "0" - "0" - "0" - count: "8" + - "0" + count: "4" explicitBounds: - 0 - 100 @@ -306,39 +231,68 @@ resourceMetrics: - 48000 - 64000 startTimeUnixNano: "1000000" - sum: 1344 + sum: 277 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: coredns_dns_response_size_bytes - - description: Histogram of the time (in seconds) each request took. + name: coredns_dns_request_size_bytes + - description: Histogram of the time each request took. histogram: aggregationTemporality: 2 dataPoints: - - bucketCounts: - - "4" - - "122" + - attributes: + - key: rcode + value: + stringValue: NXDOMAIN + - key: to + value: + stringValue: 172.18.0.1:53 + bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" - "1" - "0" - "0" - "0" - count: "127" + - "0" + - "0" + - "0" + - "0" + count: "1" explicitBounds: - 0.00025 - - 0.0025 - - 0.025 - - 0.25 - - 2.5 + - 0.0005 + - 0.001 + - 0.002 + - 0.004 + - 0.008 + - 0.016 + - 0.032 + - 0.064 + - 0.128 + - 0.256 + - 0.512 + - 1.024 + - 2.048 + - 4.096 + - 8.192 startTimeUnixNano: "1000000" - sum: 0.04600371200000002 + sum: 0.068129575 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: coredns_health_request_duration_seconds + name: coredns_forward_request_duration_seconds scope: - name: otelcol/prometheusreceiver - version: v0.105.0 + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver + version: v0.110.0 diff --git a/functional_tests/testdata_histogram/expected/v1.29/controller_manager_metrics.yaml b/functional_tests/testdata_histogram/expected/v1.29/controller_manager_metrics.yaml index cfce82e22..cf6457736 100644 --- a/functional_tests/testdata_histogram/expected/v1.29/controller_manager_metrics.yaml +++ b/functional_tests/testdata_histogram/expected/v1.29/controller_manager_metrics.yaml @@ -4882,5 +4882,5 @@ resourceMetrics: stringValue: histogram name: node_ipam_controller_cidrset_allocation_tries_per_request scope: - name: otelcol/prometheusreceiver - version: v0.105.0 + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver + version: v0.110.0 diff --git a/functional_tests/testdata_histogram/expected/v1.29/coredns_metrics.yaml b/functional_tests/testdata_histogram/expected/v1.29/coredns_metrics.yaml index 632c0122e..a379eb94d 100644 --- a/functional_tests/testdata_histogram/expected/v1.29/coredns_metrics.yaml +++ b/functional_tests/testdata_histogram/expected/v1.29/coredns_metrics.yaml @@ -387,5 +387,5 @@ resourceMetrics: stringValue: histogram name: coredns_kubernetes_rest_client_rate_limiter_duration_seconds scope: - name: otelcol/prometheusreceiver - version: v0.105.0 + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver + version: v0.110.0 diff --git a/functional_tests/testdata_histogram/expected/v1.30/api_metrics.yaml b/functional_tests/testdata_histogram/expected/v1.30/api_metrics.yaml index 8c3e23e60..67e0152c3 100644 --- a/functional_tests/testdata_histogram/expected/v1.30/api_metrics.yaml +++ b/functional_tests/testdata_histogram/expected/v1.30/api_metrics.yaml @@ -2,7 +2,7 @@ resourceMetrics: - resource: {} scopeMetrics: - metrics: - - name: apiserver_webhooks_x509_insecure_sha1_total + - name: go_godebug_non_default_behavior_panicnil_events_total sum: aggregationTemporality: 2 dataPoints: @@ -28,7 +28,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -55,9 +55,11 @@ resourceMetrics: stringValue: https timeUnixNano: "1000000" isMonotonic: true - - gauge: + - name: go_godebug_non_default_behavior_tlsmaxrsasize_events_total + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 2.0671416e+07 + - asDouble: 0 attributes: - key: host.name value: @@ -79,7 +81,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -105,8 +107,8 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_gc_scan_heap_bytes - - name: service_controller_loadbalancer_sync_total + isMonotonic: true + - name: aggregator_discovery_aggregation_count_total sum: aggregationTemporality: 2 dataPoints: @@ -132,7 +134,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -159,11 +161,11 @@ resourceMetrics: stringValue: https timeUnixNano: "1000000" isMonotonic: true - - name: apiserver_storage_data_key_generation_failures_total + - name: go_gc_heap_allocs_bytes_total sum: aggregationTemporality: 2 dataPoints: - - asDouble: 0 + - asDouble: 8.669548e+07 attributes: - key: host.name value: @@ -185,7 +187,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -212,9 +214,11 @@ resourceMetrics: stringValue: https timeUnixNano: "1000000" isMonotonic: true - - gauge: + - name: go_gc_heap_tiny_allocs_objects_total + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 104800 + - asDouble: 52737 attributes: - key: host.name value: @@ -236,7 +240,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -262,10 +266,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memory_classes_metadata_mspan_free_bytes - - gauge: + isMonotonic: true + - name: go_memstats_frees_total + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 1.72445037276e+09 + - asDouble: 562544 attributes: - key: host.name value: @@ -287,7 +293,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -313,12 +319,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: process_start_time_seconds - - name: service_controller_nodesync_error_total - sum: - aggregationTemporality: 2 + isMonotonic: true + - gauge: dataPoints: - - asDouble: 0 + - asDouble: 2.509844e+07 attributes: - key: host.name value: @@ -340,7 +344,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -366,10 +370,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true + name: go_memstats_heap_alloc_bytes - gauge: dataPoints: - - asDouble: 2602 + - asDouble: 1.048576e+06 attributes: - key: host.name value: @@ -391,7 +395,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -417,12 +421,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: scrape_samples_scraped - - name: apiserver_audit_event_total - sum: - aggregationTemporality: 2 + name: process_max_fds + - gauge: dataPoints: - - asDouble: 0 + - asDouble: 1.72739364224e+09 attributes: - key: host.name value: @@ -444,7 +446,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -470,8 +472,8 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: ephemeral_volume_controller_create_total + name: process_start_time_seconds + - name: go_godebug_non_default_behavior_gocachetest_events_total sum: aggregationTemporality: 2 dataPoints: @@ -497,7 +499,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -524,11 +526,11 @@ resourceMetrics: stringValue: https timeUnixNano: "1000000" isMonotonic: true - - name: go_gc_cycles_automatic_gc_cycles_total + - name: go_godebug_non_default_behavior_gotypesalias_events_total sum: aggregationTemporality: 2 dataPoints: - - asDouble: 13 + - asDouble: 0 attributes: - key: host.name value: @@ -550,7 +552,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -577,9 +579,11 @@ resourceMetrics: stringValue: https timeUnixNano: "1000000" isMonotonic: true - - gauge: + - name: go_godebug_non_default_behavior_multipartmaxheaders_events_total + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 0.023497814 + - asDouble: 0 attributes: - key: host.name value: @@ -601,7 +605,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -627,10 +631,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: scrape_duration_seconds + isMonotonic: true - gauge: dataPoints: - - asDouble: 2.507076e+07 + - asDouble: 3.4660352e+07 attributes: - key: host.name value: @@ -652,7 +656,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -678,10 +682,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memstats_heap_alloc_bytes + name: go_memstats_heap_inuse_bytes - gauge: dataPoints: - - asDouble: 7 + - asDouble: 685440 attributes: - key: host.name value: @@ -703,7 +707,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -729,14 +733,11 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_threads + name: go_memstats_mspan_sys_bytes - gauge: dataPoints: - - asDouble: 256 + - asDouble: 714233 attributes: - - key: clusterCIDR - value: - stringValue: 10.244.0.0/16 - key: host.name value: stringValue: kind-control-plane @@ -757,7 +758,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -783,12 +784,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: node_ipam_controller_cirdset_max_cidrs - - name: authentication_token_cache_request_total + name: go_memstats_other_sys_bytes + - name: registered_metrics_total sum: aggregationTemporality: 2 dataPoints: - - asDouble: 1 + - asDouble: 141 attributes: - key: host.name value: @@ -810,7 +811,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -832,19 +833,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: status + - key: stability_level value: - stringValue: miss + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: disabled_metrics_total - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 0 + - asDouble: 4 attributes: - key: host.name value: @@ -866,7 +862,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -888,16 +884,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stability_level + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: go_godebug_non_default_behavior_tlsmaxrsasize_events_total - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 0 + - asDouble: 11 attributes: - key: host.name value: @@ -919,7 +913,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -941,15 +935,18 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stability_level + value: + stringValue: STABLE - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: - dataPoints: - - asDouble: 8.950616e+06 + - asDouble: 2 attributes: + - key: deprecated_version + value: + stringValue: 1.30.0 - key: host.name value: stringValue: kind-control-plane @@ -970,7 +967,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -992,16 +989,17 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stability_level + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - name: go_memory_classes_heap_unused_bytes - - name: authenticated_user_requests - sum: - aggregationTemporality: 2 + isMonotonic: true + - gauge: dataPoints: - - asDouble: 14 + - asDouble: 1 attributes: - key: host.name value: @@ -1023,7 +1021,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -1048,16 +1046,11 @@ resourceMetrics: - key: url.scheme value: stringValue: https - - key: username - value: - stringValue: other timeUnixNano: "1000000" - isMonotonic: true - - name: go_cpu_classes_scavenge_background_cpu_seconds_total - sum: - aggregationTemporality: 2 + name: up + - gauge: dataPoints: - - asDouble: 1.81e-07 + - asDouble: 3 attributes: - key: host.name value: @@ -1079,7 +1072,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -1105,12 +1098,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: go_gc_cycles_total_gc_cycles_total + name: rest_client_transport_cache_entries + - name: go_gc_duration_seconds_count sum: aggregationTemporality: 2 dataPoints: - - asDouble: 13 + - asInt: "13" attributes: - key: host.name value: @@ -1132,7 +1125,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -1159,11 +1152,11 @@ resourceMetrics: stringValue: https timeUnixNano: "1000000" isMonotonic: true - - name: go_godebug_non_default_behavior_multipartmaxheaders_events_total + - name: go_gc_duration_seconds_sum sum: aggregationTemporality: 2 dataPoints: - - asDouble: 0 + - asDouble: 0.002359035 attributes: - key: host.name value: @@ -1185,7 +1178,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -1212,11 +1205,9 @@ resourceMetrics: stringValue: https timeUnixNano: "1000000" isMonotonic: true - - name: go_godebug_non_default_behavior_gocachehash_events_total - sum: - aggregationTemporality: 2 + - gauge: dataPoints: - - asDouble: 0 + - asDouble: 1.5108e-05 attributes: - key: host.name value: @@ -1238,7 +1229,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -1248,6 +1239,9 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: quantile + value: + stringValue: "0" - key: server.address value: stringValue: 172.18.0.2 @@ -1264,10 +1258,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: - dataPoints: - - asDouble: 5.7627912e+07 + - asDouble: 7.7065e-05 attributes: - key: host.name value: @@ -1289,7 +1280,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -1299,6 +1290,9 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: quantile + value: + stringValue: "0.25" - key: server.address value: stringValue: 172.18.0.2 @@ -1315,12 +1309,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memstats_sys_bytes - - name: authentication_attempts - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 14 + - asDouble: 0.000178115 attributes: - key: host.name value: @@ -1342,7 +1331,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -1352,9 +1341,9 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: result + - key: quantile value: - stringValue: success + stringValue: "0.5" - key: server.address value: stringValue: 172.18.0.2 @@ -1371,12 +1360,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: go_cpu_classes_scavenge_total_cpu_seconds_total - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 3.51e-07 + - asDouble: 0.000251013 attributes: - key: host.name value: @@ -1398,7 +1382,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -1408,6 +1392,9 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: quantile + value: + stringValue: "0.75" - key: server.address value: stringValue: 172.18.0.2 @@ -1424,12 +1411,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: apiserver_storage_envelope_transformation_cache_misses_total - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 0 + - asDouble: 0.000400865 attributes: - key: host.name value: @@ -1451,7 +1433,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -1461,6 +1443,9 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: quantile + value: + stringValue: "1" - key: server.address value: stringValue: 172.18.0.2 @@ -1477,10 +1462,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: + name: go_gc_duration_seconds_quantile + - name: go_godebug_non_default_behavior_http2client_events_total + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 1221 + - asDouble: 0 attributes: - key: host.name value: @@ -1502,7 +1489,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -1528,10 +1515,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_goroutines - - gauge: + isMonotonic: true + - name: go_godebug_non_default_behavior_multipartmaxparts_events_total + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 3.4021376e+07 + - asDouble: 0 attributes: - key: host.name value: @@ -1553,7 +1542,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -1579,12 +1568,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memstats_heap_inuse_bytes - - name: go_sync_mutex_wait_total_seconds_total + isMonotonic: true + - name: go_godebug_non_default_behavior_tls10server_events_total sum: aggregationTemporality: 2 dataPoints: - - asDouble: 96.998484528 + - asDouble: 0 attributes: - key: host.name value: @@ -1606,7 +1595,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -1633,15 +1622,10 @@ resourceMetrics: stringValue: https timeUnixNano: "1000000" isMonotonic: true - - name: root_ca_cert_publisher_sync_total - sum: - aggregationTemporality: 2 + - gauge: dataPoints: - - asDouble: 6 + - asDouble: 6.291456e+06 attributes: - - key: code - value: - stringValue: "200" - key: host.name value: stringValue: kind-control-plane @@ -1662,7 +1646,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -1688,10 +1672,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true + name: go_memory_classes_heap_stacks_bytes - gauge: dataPoints: - - asDouble: 5.1190152e+07 + - asDouble: 7 attributes: - key: host.name value: @@ -1713,7 +1697,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -1739,12 +1723,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_gc_heap_goal_bytes - - name: go_cpu_classes_gc_mark_idle_cpu_seconds_total - sum: - aggregationTemporality: 2 + name: go_threads + - gauge: dataPoints: - - asDouble: 0.134754001 + - asDouble: 0.021050625 attributes: - key: host.name value: @@ -1766,7 +1748,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -1792,10 +1774,8 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: go_godebug_non_default_behavior_multipathtcp_events_total - sum: - aggregationTemporality: 2 + name: scrape_duration_seconds + - gauge: dataPoints: - asDouble: 0 attributes: @@ -1819,7 +1799,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -1845,10 +1825,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true + name: go_gc_limiter_last_enabled_gc_cycle - gauge: dataPoints: - - asDouble: 100 + - asDouble: 131040 attributes: - key: host.name value: @@ -1870,7 +1850,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -1896,13 +1876,29 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_gc_gogc_percent - - name: go_godebug_non_default_behavior_httpmuxgo121_events_total - sum: - aggregationTemporality: 2 + name: go_memory_classes_metadata_mspan_free_bytes + - gauge: dataPoints: - - asDouble: 0 + - asDouble: 1 attributes: + - key: build_date + value: + stringValue: "2024-05-13T22:00:36Z" + - key: compiler + value: + stringValue: gc + - key: git_commit + value: + stringValue: 7c48c2bd72b9bf5c44d21d7338cc7bea77d0ad2a + - key: git_tree_state + value: + stringValue: clean + - key: git_version + value: + stringValue: v1.30.0 + - key: go_version + value: + stringValue: go1.22.2 - key: host.name value: stringValue: kind-control-plane @@ -1923,7 +1919,13 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: major + value: + stringValue: "1" + - key: minor + value: + stringValue: "30" - key: net.host.name value: stringValue: 172.18.0.2 @@ -1933,6 +1935,9 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: platform + value: + stringValue: linux/amd64 - key: server.address value: stringValue: 172.18.0.2 @@ -1949,12 +1954,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: hidden_metrics_total - sum: - aggregationTemporality: 2 + name: kubernetes_build_info + - gauge: dataPoints: - - asDouble: 0 + - asDouble: 100 attributes: - key: host.name value: @@ -1976,7 +1979,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -2002,12 +2005,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: retroactive_storageclass_errors_total - sum: - aggregationTemporality: 2 + name: node_collector_zone_health + - gauge: dataPoints: - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -2029,7 +2030,13 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: manager + value: + stringValue: kube-controller-manager + - key: name + value: + stringValue: nodeipam - key: net.host.name value: stringValue: 172.18.0.2 @@ -2055,10 +2062,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: + name: running_managed_controllers + - name: endpoint_slice_controller_changes + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 6.258688e+06 + - asDouble: 3 attributes: - key: host.name value: @@ -2080,13 +2089,16 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 - key: net.host.port value: stringValue: "10257" + - key: operation + value: + stringValue: create - key: os.type value: stringValue: linux @@ -2106,12 +2118,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memstats_stack_sys_bytes - - name: aggregator_discovery_aggregation_count_total - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 0 + - asDouble: 6 attributes: - key: host.name value: @@ -2133,13 +2140,16 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 - key: net.host.port value: stringValue: "10257" + - key: operation + value: + stringValue: update - key: os.type value: stringValue: linux @@ -2162,26 +2172,8 @@ resourceMetrics: isMonotonic: true - gauge: dataPoints: - - asDouble: 1 + - asDouble: 1.493567e+06 attributes: - - key: build_date - value: - stringValue: "2024-05-13T22:00:36Z" - - key: compiler - value: - stringValue: gc - - key: git_commit - value: - stringValue: 7c48c2bd72b9bf5c44d21d7338cc7bea77d0ad2a - - key: git_tree_state - value: - stringValue: clean - - key: git_version - value: - stringValue: v1.30.0 - - key: go_version - value: - stringValue: go1.22.2 - key: host.name value: stringValue: kind-control-plane @@ -2202,13 +2194,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: major - value: - stringValue: "1" - - key: minor - value: - stringValue: "30" + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -2218,9 +2204,6 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: platform - value: - stringValue: linux/amd64 - key: server.address value: stringValue: 172.18.0.2 @@ -2237,10 +2220,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: kubernetes_build_info + name: go_memstats_buck_hash_sys_bytes - gauge: dataPoints: - - asDouble: 100 + - asDouble: 1.20082432e+08 attributes: - key: host.name value: @@ -2262,7 +2245,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -2288,8 +2271,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: node_collector_zone_health - - gauge: + name: process_resident_memory_bytes + - name: service_controller_loadbalancer_sync_total + sum: + aggregationTemporality: 2 dataPoints: - asDouble: 0 attributes: @@ -2313,7 +2298,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -2339,10 +2324,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_gc_limiter_last_enabled_gc_cycle + isMonotonic: true - gauge: dataPoints: - - asDouble: 6.258688e+06 + - asDouble: 0 attributes: - key: host.name value: @@ -2364,7 +2349,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -2390,12 +2375,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memstats_stack_inuse_bytes - - name: ephemeral_volume_controller_create_failures_total + name: apiserver_envelope_encryption_dek_cache_fill_percent + - name: go_cpu_classes_scavenge_total_cpu_seconds_total sum: aggregationTemporality: 2 dataPoints: - - asDouble: 0 + - asDouble: 3.01e-07 attributes: - key: host.name value: @@ -2417,7 +2402,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -2444,11 +2429,11 @@ resourceMetrics: stringValue: https timeUnixNano: "1000000" isMonotonic: true - - name: go_cpu_classes_gc_total_cpu_seconds_total + - name: go_gc_cycles_forced_gc_cycles_total sum: aggregationTemporality: 2 dataPoints: - - asDouble: 0.220271843 + - asDouble: 0 attributes: - key: host.name value: @@ -2470,7 +2455,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -2497,11 +2482,9 @@ resourceMetrics: stringValue: https timeUnixNano: "1000000" isMonotonic: true - - name: go_godebug_non_default_behavior_tlsrsakex_events_total - sum: - aggregationTemporality: 2 + - gauge: dataPoints: - - asDouble: 0 + - asDouble: 5.1061512e+07 attributes: - key: host.name value: @@ -2523,7 +2506,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -2549,10 +2532,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true + name: go_gc_heap_goal_bytes - gauge: dataPoints: - - asDouble: 4 + - asDouble: 554400 attributes: - key: host.name value: @@ -2574,7 +2557,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -2600,10 +2583,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: endpoint_slice_controller_endpoints_desired - - gauge: + name: go_memstats_mspan_inuse_bytes + - name: go_cpu_classes_idle_cpu_seconds_total + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 1.009921e+06 + - asDouble: 553.231358484 attributes: - key: host.name value: @@ -2625,7 +2610,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -2651,10 +2636,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memstats_other_sys_bytes - - gauge: + isMonotonic: true + - name: go_cpu_classes_scavenge_assist_cpu_seconds_total + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 0 + - asDouble: 1.61e-07 attributes: - key: host.name value: @@ -2676,10 +2663,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: ClusterRoleAggregator + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -2705,6 +2689,11 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" + isMonotonic: true + - name: go_godebug_non_default_behavior_http2server_events_total + sum: + aggregationTemporality: 2 + dataPoints: - asDouble: 0 attributes: - key: host.name @@ -2727,10 +2716,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: DynamicCABundle-client-ca-bundle + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -2756,6 +2742,11 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" + isMonotonic: true + - name: go_godebug_non_default_behavior_tlsrsakex_events_total + sum: + aggregationTemporality: 2 + dataPoints: - asDouble: 0 attributes: - key: host.name @@ -2778,10 +2769,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: DynamicCABundle-csr-controller + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -2807,7 +2795,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + isMonotonic: true + - gauge: + dataPoints: + - asDouble: 4.12528e+06 attributes: - key: host.name value: @@ -2829,10 +2820,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: DynamicCABundle-request-header + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -2858,7 +2846,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + name: go_memory_classes_metadata_other_bytes + - gauge: + dataPoints: + - asDouble: 4800 attributes: - key: host.name value: @@ -2880,10 +2871,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: DynamicServingCertificateController + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -2909,7 +2897,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + name: go_memstats_mcache_inuse_bytes + - gauge: + dataPoints: + - asDouble: 1 attributes: - key: host.name value: @@ -2931,10 +2922,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: bootstrap_signer_queue + stringValue: APIListChunking - key: net.host.name value: stringValue: 172.18.0.2 @@ -2960,7 +2951,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -2982,10 +2973,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: certificate + stringValue: APIPriorityAndFairness - key: net.host.name value: stringValue: 172.18.0.2 @@ -3011,7 +3002,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -3033,10 +3024,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: claims + stringValue: AdmissionWebhookMatchConditions - key: net.host.name value: stringValue: 172.18.0.2 @@ -3062,7 +3053,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -3084,10 +3075,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: cronjob + stringValue: AggregatedDiscoveryEndpoint - key: net.host.name value: stringValue: 172.18.0.2 @@ -3113,7 +3104,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -3135,10 +3126,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: daemonset + stringValue: CPUManager - key: net.host.name value: stringValue: 172.18.0.2 @@ -3164,7 +3155,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -3186,10 +3177,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: deployment + stringValue: CSINodeExpandSecret - key: net.host.name value: stringValue: 172.18.0.2 @@ -3215,7 +3206,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -3237,10 +3228,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: disruption + stringValue: CloudDualStackNodeIPs - key: net.host.name value: stringValue: 172.18.0.2 @@ -3266,7 +3257,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -3288,10 +3279,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: disruption_recheck + stringValue: ConsistentHTTPGetHandlers - key: net.host.name value: stringValue: 172.18.0.2 @@ -3317,7 +3308,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -3339,10 +3330,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: endpoint + stringValue: CustomResourceValidationExpressions - key: net.host.name value: stringValue: 172.18.0.2 @@ -3368,7 +3359,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -3390,10 +3381,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: endpoint_slice + stringValue: EfficientWatchResumption - key: net.host.name value: stringValue: 172.18.0.2 @@ -3419,7 +3410,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -3441,10 +3432,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: endpoint_slice_mirroring + stringValue: ExecProbeTimeout - key: net.host.name value: stringValue: 172.18.0.2 @@ -3470,7 +3461,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -3492,10 +3483,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ephemeral_volume + stringValue: HPAContainerMetrics - key: net.host.name value: stringValue: 172.18.0.2 @@ -3521,7 +3512,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -3543,10 +3534,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: garbage_collector_attempt_to_delete + stringValue: JobReadyPods - key: net.host.name value: stringValue: 172.18.0.2 @@ -3572,7 +3563,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -3594,10 +3585,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: garbage_collector_attempt_to_orphan + stringValue: KMSv2 - key: net.host.name value: stringValue: 172.18.0.2 @@ -3623,7 +3614,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -3645,10 +3636,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: garbage_collector_graph_changes + stringValue: KMSv2KDF - key: net.host.name value: stringValue: 172.18.0.2 @@ -3674,7 +3665,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -3696,10 +3687,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: horizontalpodautoscaler + stringValue: LegacyServiceAccountTokenCleanUp - key: net.host.name value: stringValue: 172.18.0.2 @@ -3725,7 +3716,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -3747,10 +3738,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: job + stringValue: MinDomainsInPodTopologySpread - key: net.host.name value: stringValue: 172.18.0.2 @@ -3776,7 +3767,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -3798,10 +3789,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: job_orphan_pod + stringValue: NewVolumeManagerReconstruction - key: net.host.name value: stringValue: 172.18.0.2 @@ -3827,7 +3818,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -3849,10 +3840,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: namespace + stringValue: NodeOutOfServiceVolumeDetach - key: net.host.name value: stringValue: 172.18.0.2 @@ -3878,7 +3869,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -3900,10 +3891,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: node + stringValue: PodHostIPs - key: net.host.name value: stringValue: 172.18.0.2 @@ -3929,7 +3920,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -3951,10 +3942,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: node_lifecycle_controller + stringValue: PodSchedulingReadiness - key: net.host.name value: stringValue: 172.18.0.2 @@ -3980,7 +3971,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -4002,10 +3993,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: node_lifecycle_controller_pods + stringValue: ReadWriteOncePod - key: net.host.name value: stringValue: 172.18.0.2 @@ -4031,7 +4022,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -4053,10 +4044,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: noexec_taint_node + stringValue: RemainingItemCount - key: net.host.name value: stringValue: 172.18.0.2 @@ -4082,7 +4073,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -4104,10 +4095,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: noexec_taint_pod + stringValue: ServerSideApply - key: net.host.name value: stringValue: 172.18.0.2 @@ -4133,7 +4124,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -4155,10 +4146,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: orphaned_pods_nodes + stringValue: ServerSideFieldValidation - key: net.host.name value: stringValue: 172.18.0.2 @@ -4184,7 +4175,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -4206,10 +4197,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: pvcprotection + stringValue: ServiceNodePortStaticSubrange - key: net.host.name value: stringValue: 172.18.0.2 @@ -4235,7 +4226,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -4257,10 +4248,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: pvcs + stringValue: StableLoadBalancerNodeSet - key: net.host.name value: stringValue: 172.18.0.2 @@ -4286,7 +4277,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -4308,10 +4299,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: pvprotection + stringValue: ValidatingAdmissionPolicy - key: net.host.name value: stringValue: 172.18.0.2 @@ -4337,7 +4328,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -4359,10 +4350,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: replicaset + stringValue: WatchBookmark - key: net.host.name value: stringValue: 172.18.0.2 @@ -4388,7 +4379,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -4410,10 +4401,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: replicationmanager + stringValue: ZeroLimitedNominalConcurrencyShares - key: net.host.name value: stringValue: 172.18.0.2 @@ -4439,7 +4430,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -4461,10 +4452,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: resource_quota_controller_resource_changes + stringValue: APIResponseCompression - key: net.host.name value: stringValue: 172.18.0.2 @@ -4486,11 +4477,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -4512,10 +4506,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: resourcequota_primary + stringValue: APIServerIdentity - key: net.host.name value: stringValue: 172.18.0.2 @@ -4537,11 +4531,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -4563,10 +4560,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: resourcequota_priority + stringValue: APIServerTracing - key: net.host.name value: stringValue: 172.18.0.2 @@ -4588,11 +4585,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -4614,10 +4614,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: root_ca_cert_publisher + stringValue: APIServingWithRoutine - key: net.host.name value: stringValue: 172.18.0.2 @@ -4639,6 +4639,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https @@ -4665,10 +4668,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: service + stringValue: AllAlpha - key: net.host.name value: stringValue: 172.18.0.2 @@ -4690,6 +4693,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -4716,10 +4722,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: serviceaccount + stringValue: AllBeta - key: net.host.name value: stringValue: 172.18.0.2 @@ -4741,6 +4747,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https @@ -4767,10 +4776,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: serviceaccount_tokens_secret + stringValue: AllowServiceLBStatusOnNonLB - key: net.host.name value: stringValue: 172.18.0.2 @@ -4792,11 +4801,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: DEPRECATED - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -4818,10 +4830,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: serviceaccount_tokens_service + stringValue: AnyVolumeDataSource - key: net.host.name value: stringValue: 172.18.0.2 @@ -4843,11 +4855,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -4869,10 +4884,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: stale_pod_disruption + stringValue: AppArmor - key: net.host.name value: stringValue: 172.18.0.2 @@ -4894,11 +4909,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -4920,10 +4938,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: statefulset + stringValue: AppArmorFields - key: net.host.name value: stringValue: 172.18.0.2 @@ -4945,6 +4963,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https @@ -4971,10 +4992,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: token_cleaner + stringValue: CPUManagerPolicyAlphaOptions - key: net.host.name value: stringValue: 172.18.0.2 @@ -4996,11 +5017,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -5022,10 +5046,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ttl_jobs_to_delete + stringValue: CPUManagerPolicyBetaOptions - key: net.host.name value: stringValue: 172.18.0.2 @@ -5047,11 +5071,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -5073,10 +5100,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ttlcontroller + stringValue: CPUManagerPolicyOptions - key: net.host.name value: stringValue: 172.18.0.2 @@ -5098,11 +5125,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -5124,10 +5154,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: validatingadmissionpolicy-status + stringValue: CRDValidationRatcheting - key: net.host.name value: stringValue: 172.18.0.2 @@ -5149,6 +5179,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https @@ -5175,10 +5208,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: volume_expand + stringValue: CSIMigrationPortworx - key: net.host.name value: stringValue: 172.18.0.2 @@ -5200,6 +5233,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https @@ -5226,10 +5262,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: volumes + stringValue: CSIMigrationRBD - key: net.host.name value: stringValue: 172.18.0.2 @@ -5251,14 +5287,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: DEPRECATED - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - name: workqueue_unfinished_work_seconds - - gauge: - dataPoints: - - asDouble: 2.4437976e+07 + - asDouble: 0 attributes: - key: host.name value: @@ -5280,7 +5316,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: CSIVolumeHealth - key: net.host.name value: stringValue: 172.18.0.2 @@ -5302,14 +5341,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - name: go_gc_heap_live_bytes - - gauge: - dataPoints: - - asDouble: 2.2985616e+07 + - asDouble: 0 attributes: - key: host.name value: @@ -5331,7 +5370,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: CloudControllerManagerWebhook - key: net.host.name value: stringValue: 172.18.0.2 @@ -5353,14 +5395,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - name: go_gc_scan_total_bytes - - gauge: - dataPoints: - - asDouble: 2.507076e+07 + - asDouble: 0 attributes: - key: host.name value: @@ -5382,7 +5424,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: ClusterTrustBundle - key: net.host.name value: stringValue: 172.18.0.2 @@ -5404,14 +5449,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - name: go_memory_classes_heap_objects_bytes - - gauge: - dataPoints: - - asDouble: 4800 + - asDouble: 0 attributes: - key: host.name value: @@ -5433,7 +5478,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: ClusterTrustBundleProjection - key: net.host.name value: stringValue: 172.18.0.2 @@ -5455,16 +5503,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - name: go_memstats_mcache_inuse_bytes - - name: go_cpu_classes_total_cpu_seconds_total - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 491.177804168 + - asDouble: 1 attributes: - key: host.name value: @@ -5486,7 +5532,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: ComponentSLIs - key: net.host.name value: stringValue: 172.18.0.2 @@ -5508,15 +5557,13 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: workqueue_retries_total - sum: - aggregationTemporality: 2 - dataPoints: - asDouble: 0 attributes: - key: host.name @@ -5539,10 +5586,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ClusterRoleAggregator + stringValue: ConsistentListFromCache - key: net.host.name value: stringValue: 172.18.0.2 @@ -5564,11 +5611,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -5590,10 +5640,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: DynamicCABundle-client-ca-bundle + stringValue: ContainerCheckpoint - key: net.host.name value: stringValue: 172.18.0.2 @@ -5615,11 +5665,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -5641,10 +5694,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: DynamicCABundle-csr-controller + stringValue: ContextualLogging - key: net.host.name value: stringValue: 172.18.0.2 @@ -5666,11 +5719,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -5692,10 +5748,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: DynamicCABundle-request-header + stringValue: CronJobsScheduledAnnotation - key: net.host.name value: stringValue: 172.18.0.2 @@ -5717,6 +5773,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https @@ -5743,10 +5802,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: DynamicServingCertificateController + stringValue: CrossNamespaceVolumeDataSource - key: net.host.name value: stringValue: 172.18.0.2 @@ -5768,6 +5827,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -5794,10 +5856,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: bootstrap_signer_queue + stringValue: CustomCPUCFSQuotaPeriod - key: net.host.name value: stringValue: 172.18.0.2 @@ -5819,6 +5881,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -5845,10 +5910,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: certificate + stringValue: CustomResourceFieldSelectors - key: net.host.name value: stringValue: 172.18.0.2 @@ -5870,6 +5935,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -5896,10 +5964,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: cronjob + stringValue: DefaultHostNetworkHostPortsInPodTemplates - key: net.host.name value: stringValue: 172.18.0.2 @@ -5921,11 +5989,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: DEPRECATED - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -5947,10 +6018,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: daemonset + stringValue: DevicePluginCDIDevices - key: net.host.name value: stringValue: 172.18.0.2 @@ -5972,11 +6043,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 21 + - asDouble: 1 attributes: - key: host.name value: @@ -5998,10 +6072,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: deployment + stringValue: DisableCloudProviders - key: net.host.name value: stringValue: 172.18.0.2 @@ -6023,11 +6097,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -6049,10 +6126,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: disruption + stringValue: DisableKubeletCloudCredentialProviders - key: net.host.name value: stringValue: 172.18.0.2 @@ -6074,6 +6151,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https @@ -6100,10 +6180,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: disruption_recheck + stringValue: DisableNodeKubeProxyVersion - key: net.host.name value: stringValue: 172.18.0.2 @@ -6125,11 +6205,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 10 + - asDouble: 0 attributes: - key: host.name value: @@ -6151,10 +6234,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: endpoint + stringValue: DynamicResourceAllocation - key: net.host.name value: stringValue: 172.18.0.2 @@ -6176,11 +6259,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 16 + - asDouble: 1 attributes: - key: host.name value: @@ -6202,10 +6288,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: endpoint_slice + stringValue: ElasticIndexedJob - key: net.host.name value: stringValue: 172.18.0.2 @@ -6227,6 +6313,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https @@ -6253,10 +6342,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: endpoint_slice_mirroring + stringValue: EventedPLEG - key: net.host.name value: stringValue: 172.18.0.2 @@ -6278,11 +6367,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: url.scheme + - key: stage + value: + stringValue: ALPHA + - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -6304,10 +6396,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ephemeral_volume + stringValue: GracefulNodeShutdown - key: net.host.name value: stringValue: 172.18.0.2 @@ -6329,11 +6421,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -6355,10 +6450,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: garbage_collector_attempt_to_delete + stringValue: GracefulNodeShutdownBasedOnPodPriority - key: net.host.name value: stringValue: 172.18.0.2 @@ -6380,6 +6475,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https @@ -6406,10 +6504,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: garbage_collector_attempt_to_orphan + stringValue: HPAScaleToZero - key: net.host.name value: stringValue: 172.18.0.2 @@ -6431,6 +6529,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -6457,10 +6558,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: garbage_collector_graph_changes + stringValue: HonorPVReclaimPolicy - key: net.host.name value: stringValue: 172.18.0.2 @@ -6482,11 +6583,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -6508,10 +6612,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: horizontalpodautoscaler + stringValue: ImageMaximumGCAge - key: net.host.name value: stringValue: 172.18.0.2 @@ -6533,6 +6637,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https @@ -6559,10 +6666,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: job + stringValue: InPlacePodVerticalScaling - key: net.host.name value: stringValue: 172.18.0.2 @@ -6584,6 +6691,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -6610,10 +6720,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: job_orphan_pod + stringValue: InTreePluginAWSUnregister - key: net.host.name value: stringValue: 172.18.0.2 @@ -6635,6 +6745,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -6661,10 +6774,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: namespace + stringValue: InTreePluginAzureDiskUnregister - key: net.host.name value: stringValue: 172.18.0.2 @@ -6686,6 +6799,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -6712,10 +6828,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: node + stringValue: InTreePluginAzureFileUnregister - key: net.host.name value: stringValue: 172.18.0.2 @@ -6737,6 +6853,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -6763,10 +6882,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: node_lifecycle_controller_pods + stringValue: InTreePluginGCEUnregister - key: net.host.name value: stringValue: 172.18.0.2 @@ -6788,6 +6907,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -6814,10 +6936,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: orphaned_pods_nodes + stringValue: InTreePluginOpenStackUnregister - key: net.host.name value: stringValue: 172.18.0.2 @@ -6839,6 +6961,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -6865,10 +6990,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: pvcprotection + stringValue: InTreePluginPortworxUnregister - key: net.host.name value: stringValue: 172.18.0.2 @@ -6890,6 +7015,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -6916,10 +7044,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: pvcs + stringValue: InTreePluginRBDUnregister - key: net.host.name value: stringValue: 172.18.0.2 @@ -6941,6 +7069,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: DEPRECATED - key: url.scheme value: stringValue: https @@ -6967,10 +7098,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: pvprotection + stringValue: InTreePluginvSphereUnregister - key: net.host.name value: stringValue: 172.18.0.2 @@ -6992,6 +7123,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -7018,10 +7152,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: replicaset + stringValue: InformerResourceVersion - key: net.host.name value: stringValue: 172.18.0.2 @@ -7043,11 +7177,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -7069,10 +7206,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: replicationmanager + stringValue: JobBackoffLimitPerIndex - key: net.host.name value: stringValue: 172.18.0.2 @@ -7094,6 +7231,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https @@ -7120,10 +7260,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: resource_quota_controller_resource_changes + stringValue: JobManagedBy - key: net.host.name value: stringValue: 172.18.0.2 @@ -7145,11 +7285,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -7171,10 +7314,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: resourcequota_primary + stringValue: JobPodFailurePolicy - key: net.host.name value: stringValue: 172.18.0.2 @@ -7196,11 +7339,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -7222,10 +7368,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: resourcequota_priority + stringValue: JobPodReplacementPolicy - key: net.host.name value: stringValue: 172.18.0.2 @@ -7247,6 +7393,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https @@ -7273,10 +7422,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: root_ca_cert_publisher + stringValue: JobSuccessPolicy - key: net.host.name value: stringValue: 172.18.0.2 @@ -7298,6 +7447,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -7324,10 +7476,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: service + stringValue: KMSv1 - key: net.host.name value: stringValue: 172.18.0.2 @@ -7349,11 +7501,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: DEPRECATED - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -7375,10 +7530,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: serviceaccount + stringValue: KubeProxyDrainingTerminatingNodes - key: net.host.name value: stringValue: 172.18.0.2 @@ -7400,6 +7555,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https @@ -7426,10 +7584,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: serviceaccount_tokens_secret + stringValue: KubeletCgroupDriverFromCRI - key: net.host.name value: stringValue: 172.18.0.2 @@ -7451,6 +7609,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -7477,10 +7638,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: serviceaccount_tokens_service + stringValue: KubeletInUserNamespace - key: net.host.name value: stringValue: 172.18.0.2 @@ -7502,6 +7663,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -7528,10 +7692,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: stale_pod_disruption + stringValue: KubeletPodResourcesDynamicResources - key: net.host.name value: stringValue: 172.18.0.2 @@ -7553,6 +7717,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -7579,10 +7746,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: statefulset + stringValue: KubeletPodResourcesGet - key: net.host.name value: stringValue: 172.18.0.2 @@ -7604,11 +7771,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -7630,10 +7800,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: token_cleaner + stringValue: KubeletSeparateDiskGC - key: net.host.name value: stringValue: 172.18.0.2 @@ -7655,11 +7825,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -7681,10 +7854,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ttl_jobs_to_delete + stringValue: KubeletTracing - key: net.host.name value: stringValue: 172.18.0.2 @@ -7706,11 +7879,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -7732,10 +7908,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ttlcontroller + stringValue: LoadBalancerIPMode - key: net.host.name value: stringValue: 172.18.0.2 @@ -7757,6 +7933,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https @@ -7783,10 +7962,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: validatingadmissionpolicy-status + stringValue: LocalStorageCapacityIsolationFSQuotaMonitoring - key: net.host.name value: stringValue: 172.18.0.2 @@ -7808,11 +7987,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -7834,10 +8016,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: volume_expand + stringValue: LogarithmicScaleDown - key: net.host.name value: stringValue: 172.18.0.2 @@ -7859,14 +8041,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: - dataPoints: - - asDouble: 5.1190152e+07 + - asDouble: 0 attributes: - key: host.name value: @@ -7888,7 +8070,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: LoggingAlphaOptions - key: net.host.name value: stringValue: 172.18.0.2 @@ -7910,14 +8095,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - name: go_memstats_next_gc_bytes - - gauge: - dataPoints: - - asDouble: 1.7244504959129157e+09 + - asDouble: 1 attributes: - key: host.name value: @@ -7939,7 +8124,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: LoggingBetaOptions - key: net.host.name value: stringValue: 172.18.0.2 @@ -7961,14 +8149,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - name: go_memstats_last_gc_time_seconds - - gauge: - dataPoints: - - asDouble: 1.337700352e+09 + - asDouble: 0 attributes: - key: host.name value: @@ -7990,7 +8178,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: MatchLabelKeysInPodAffinity - key: net.host.name value: stringValue: 172.18.0.2 @@ -8012,16 +8203,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - name: process_virtual_memory_bytes - - name: go_godebug_non_default_behavior_gocachetest_events_total - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -8043,7 +8232,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: MatchLabelKeysInPodTopologySpread - key: net.host.name value: stringValue: 172.18.0.2 @@ -8065,14 +8257,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: - dataPoints: - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -8094,10 +8286,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: APIListChunking + stringValue: MaxUnavailableStatefulSet - key: net.host.name value: stringValue: 172.18.0.2 @@ -8119,6 +8311,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -8145,10 +8340,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: APIPriorityAndFairness + stringValue: MemoryManager - key: net.host.name value: stringValue: 172.18.0.2 @@ -8170,11 +8365,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -8196,10 +8394,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: AdmissionWebhookMatchConditions + stringValue: MemoryQoS - key: net.host.name value: stringValue: 172.18.0.2 @@ -8221,11 +8419,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -8247,10 +8448,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: AggregatedDiscoveryEndpoint + stringValue: MultiCIDRServiceAllocator - key: net.host.name value: stringValue: 172.18.0.2 @@ -8272,11 +8473,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -8298,10 +8502,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: CPUManager + stringValue: MutatingAdmissionPolicy - key: net.host.name value: stringValue: 172.18.0.2 @@ -8323,11 +8527,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -8349,10 +8556,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: CSINodeExpandSecret + stringValue: NFTablesProxyMode - key: net.host.name value: stringValue: 172.18.0.2 @@ -8374,6 +8581,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -8400,10 +8610,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: CloudDualStackNodeIPs + stringValue: NodeInclusionPolicyInPodTopologySpread - key: net.host.name value: stringValue: 172.18.0.2 @@ -8425,11 +8635,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -8451,10 +8664,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ConsistentHTTPGetHandlers + stringValue: NodeLogQuery - key: net.host.name value: stringValue: 172.18.0.2 @@ -8476,6 +8689,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https @@ -8502,10 +8718,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: CustomResourceValidationExpressions + stringValue: NodeSwap - key: net.host.name value: stringValue: 172.18.0.2 @@ -8527,6 +8743,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https @@ -8553,10 +8772,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: EfficientWatchResumption + stringValue: OpenAPIEnums - key: net.host.name value: stringValue: 172.18.0.2 @@ -8578,6 +8797,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https @@ -8604,10 +8826,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ExecProbeTimeout + stringValue: PDBUnhealthyPodEvictionPolicy - key: net.host.name value: stringValue: 172.18.0.2 @@ -8629,6 +8851,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https @@ -8655,10 +8880,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: HPAContainerMetrics + stringValue: PersistentVolumeLastPhaseTransitionTime - key: net.host.name value: stringValue: 172.18.0.2 @@ -8680,11 +8905,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -8706,10 +8934,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: JobReadyPods + stringValue: PodAndContainerStatsFromCRI - key: net.host.name value: stringValue: 172.18.0.2 @@ -8731,6 +8959,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -8757,10 +8988,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: KMSv2 + stringValue: PodDeletionCost - key: net.host.name value: stringValue: 172.18.0.2 @@ -8782,6 +9013,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https @@ -8808,10 +9042,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: KMSv2KDF + stringValue: PodDisruptionConditions - key: net.host.name value: stringValue: 172.18.0.2 @@ -8833,6 +9067,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https @@ -8859,10 +9096,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: LegacyServiceAccountTokenCleanUp + stringValue: PodIndexLabel - key: net.host.name value: stringValue: 172.18.0.2 @@ -8884,6 +9121,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https @@ -8910,10 +9150,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: MinDomainsInPodTopologySpread + stringValue: PodLifecycleSleepAction - key: net.host.name value: stringValue: 172.18.0.2 @@ -8935,6 +9175,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https @@ -8961,10 +9204,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: NewVolumeManagerReconstruction + stringValue: PodReadyToStartContainersCondition - key: net.host.name value: stringValue: 172.18.0.2 @@ -8986,11 +9229,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -9012,10 +9258,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: NodeOutOfServiceVolumeDetach + stringValue: PortForwardWebsockets - key: net.host.name value: stringValue: 172.18.0.2 @@ -9037,11 +9283,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -9063,10 +9312,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: PodHostIPs + stringValue: ProcMountType - key: net.host.name value: stringValue: 172.18.0.2 @@ -9088,11 +9337,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -9114,10 +9366,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: PodSchedulingReadiness + stringValue: QOSReserved - key: net.host.name value: stringValue: 172.18.0.2 @@ -9139,11 +9391,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -9165,10 +9420,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ReadWriteOncePod + stringValue: RecoverVolumeExpansionFailure - key: net.host.name value: stringValue: 172.18.0.2 @@ -9190,11 +9445,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -9216,10 +9474,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: RemainingItemCount + stringValue: RecursiveReadOnlyMounts - key: net.host.name value: stringValue: 172.18.0.2 @@ -9241,11 +9499,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -9267,10 +9528,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ServerSideApply + stringValue: RelaxedEnvironmentVariableValidation - key: net.host.name value: stringValue: 172.18.0.2 @@ -9292,11 +9553,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -9318,10 +9582,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ServerSideFieldValidation + stringValue: RetryGenerateName - key: net.host.name value: stringValue: 172.18.0.2 @@ -9343,6 +9607,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -9369,10 +9636,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ServiceNodePortStaticSubrange + stringValue: RotateKubeletServerCertificate - key: net.host.name value: stringValue: 172.18.0.2 @@ -9394,11 +9661,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -9420,10 +9690,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: StableLoadBalancerNodeSet + stringValue: RuntimeClassInImageCriApi - key: net.host.name value: stringValue: 172.18.0.2 @@ -9445,11 +9715,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -9471,10 +9744,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ValidatingAdmissionPolicy + stringValue: SELinuxMount - key: net.host.name value: stringValue: 172.18.0.2 @@ -9496,6 +9769,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -9522,10 +9798,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: WatchBookmark + stringValue: SELinuxMountReadWriteOncePod - key: net.host.name value: stringValue: 172.18.0.2 @@ -9547,11 +9823,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -9573,10 +9852,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ZeroLimitedNominalConcurrencyShares + stringValue: SchedulerQueueingHints - key: net.host.name value: stringValue: 172.18.0.2 @@ -9598,6 +9877,9 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: stage + value: + stringValue: BETA - key: url.scheme value: stringValue: https @@ -9624,10 +9906,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: APIResponseCompression + stringValue: SeparateCacheWatchRPC - key: net.host.name value: stringValue: 172.18.0.2 @@ -9678,10 +9960,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: APIServerIdentity + stringValue: SeparateTaintEvictionController - key: net.host.name value: stringValue: 172.18.0.2 @@ -9732,10 +10014,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: APIServerTracing + stringValue: ServiceAccountTokenJTI - key: net.host.name value: stringValue: 172.18.0.2 @@ -9764,7 +10046,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -9786,10 +10068,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: APIServingWithRoutine + stringValue: ServiceAccountTokenNodeBinding - key: net.host.name value: stringValue: 172.18.0.2 @@ -9813,12 +10095,12 @@ resourceMetrics: stringValue: kube-controller-manager - key: stage value: - stringValue: BETA + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -9840,10 +10122,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: AllAlpha + stringValue: ServiceAccountTokenNodeBindingValidation - key: net.host.name value: stringValue: 172.18.0.2 @@ -9867,12 +10149,12 @@ resourceMetrics: stringValue: kube-controller-manager - key: stage value: - stringValue: ALPHA + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -9894,10 +10176,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: AllBeta + stringValue: ServiceAccountTokenPodNodeInfo - key: net.host.name value: stringValue: 172.18.0.2 @@ -9948,10 +10230,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: AllowServiceLBStatusOnNonLB + stringValue: ServiceTrafficDistribution - key: net.host.name value: stringValue: 172.18.0.2 @@ -9975,7 +10257,7 @@ resourceMetrics: stringValue: kube-controller-manager - key: stage value: - stringValue: DEPRECATED + stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -10002,10 +10284,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: AnyVolumeDataSource + stringValue: SidecarContainers - key: net.host.name value: stringValue: 172.18.0.2 @@ -10056,10 +10338,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: AppArmor + stringValue: SizeMemoryBackedVolumes - key: net.host.name value: stringValue: 172.18.0.2 @@ -10110,10 +10392,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: AppArmorFields + stringValue: SkipReadOnlyValidationGCE - key: net.host.name value: stringValue: 172.18.0.2 @@ -10137,12 +10419,12 @@ resourceMetrics: stringValue: kube-controller-manager - key: stage value: - stringValue: BETA + stringValue: DEPRECATED - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -10164,10 +10446,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: CPUManagerPolicyAlphaOptions + stringValue: StatefulSetAutoDeletePVC - key: net.host.name value: stringValue: 172.18.0.2 @@ -10191,7 +10473,7 @@ resourceMetrics: stringValue: kube-controller-manager - key: stage value: - stringValue: ALPHA + stringValue: BETA - key: url.scheme value: stringValue: https @@ -10218,10 +10500,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: CPUManagerPolicyBetaOptions + stringValue: StatefulSetStartOrdinal - key: net.host.name value: stringValue: 172.18.0.2 @@ -10272,10 +10554,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: CPUManagerPolicyOptions + stringValue: StorageNamespaceIndex - key: net.host.name value: stringValue: 172.18.0.2 @@ -10304,7 +10586,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -10326,10 +10608,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: CRDValidationRatcheting + stringValue: StorageVersionAPI - key: net.host.name value: stringValue: 172.18.0.2 @@ -10353,12 +10635,12 @@ resourceMetrics: stringValue: kube-controller-manager - key: stage value: - stringValue: BETA + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -10380,10 +10662,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: CSIMigrationPortworx + stringValue: StorageVersionHash - key: net.host.name value: stringValue: 172.18.0.2 @@ -10434,10 +10716,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: CSIMigrationRBD + stringValue: StorageVersionMigrator - key: net.host.name value: stringValue: 172.18.0.2 @@ -10461,12 +10743,12 @@ resourceMetrics: stringValue: kube-controller-manager - key: stage value: - stringValue: DEPRECATED + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -10488,10 +10770,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: CSIVolumeHealth + stringValue: StructuredAuthenticationConfiguration - key: net.host.name value: stringValue: 172.18.0.2 @@ -10515,12 +10797,12 @@ resourceMetrics: stringValue: kube-controller-manager - key: stage value: - stringValue: ALPHA + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -10542,10 +10824,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: CloudControllerManagerWebhook + stringValue: StructuredAuthorizationConfiguration - key: net.host.name value: stringValue: 172.18.0.2 @@ -10569,12 +10851,12 @@ resourceMetrics: stringValue: kube-controller-manager - key: stage value: - stringValue: ALPHA + stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -10596,10 +10878,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ClusterTrustBundle + stringValue: TopologyAwareHints - key: net.host.name value: stringValue: 172.18.0.2 @@ -10623,7 +10905,7 @@ resourceMetrics: stringValue: kube-controller-manager - key: stage value: - stringValue: ALPHA + stringValue: BETA - key: url.scheme value: stringValue: https @@ -10650,10 +10932,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ClusterTrustBundleProjection + stringValue: TopologyManagerPolicyAlphaOptions - key: net.host.name value: stringValue: 172.18.0.2 @@ -10704,10 +10986,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ComponentSLIs + stringValue: TopologyManagerPolicyBetaOptions - key: net.host.name value: stringValue: 172.18.0.2 @@ -10736,7 +11018,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -10758,10 +11040,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ConsistentListFromCache + stringValue: TopologyManagerPolicyOptions - key: net.host.name value: stringValue: 172.18.0.2 @@ -10785,7 +11067,7 @@ resourceMetrics: stringValue: kube-controller-manager - key: stage value: - stringValue: ALPHA + stringValue: BETA - key: url.scheme value: stringValue: https @@ -10812,10 +11094,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ContainerCheckpoint + stringValue: TranslateStreamCloseWebsocketRequests - key: net.host.name value: stringValue: 172.18.0.2 @@ -10866,10 +11148,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ContextualLogging + stringValue: UnauthenticatedHTTP2DOSMitigation - key: net.host.name value: stringValue: 172.18.0.2 @@ -10898,7 +11180,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -10920,10 +11202,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: CronJobsScheduledAnnotation + stringValue: UnknownVersionInteroperabilityProxy - key: net.host.name value: stringValue: 172.18.0.2 @@ -10947,7 +11229,7 @@ resourceMetrics: stringValue: kube-controller-manager - key: stage value: - stringValue: BETA + stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -10974,10 +11256,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: CrossNamespaceVolumeDataSource + stringValue: UserNamespacesPodSecurityStandards - key: net.host.name value: stringValue: 172.18.0.2 @@ -11028,10 +11310,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: CustomCPUCFSQuotaPeriod + stringValue: UserNamespacesSupport - key: net.host.name value: stringValue: 172.18.0.2 @@ -11055,7 +11337,7 @@ resourceMetrics: stringValue: kube-controller-manager - key: stage value: - stringValue: ALPHA + stringValue: BETA - key: url.scheme value: stringValue: https @@ -11082,10 +11364,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: CustomResourceFieldSelectors + stringValue: VolumeAttributesClass - key: net.host.name value: stringValue: 172.18.0.2 @@ -11136,10 +11418,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: DefaultHostNetworkHostPortsInPodTemplates + stringValue: VolumeCapacityPriority - key: net.host.name value: stringValue: 172.18.0.2 @@ -11163,12 +11445,12 @@ resourceMetrics: stringValue: kube-controller-manager - key: stage value: - stringValue: DEPRECATED + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -11190,10 +11472,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: DevicePluginCDIDevices + stringValue: WatchFromStorageWithoutResourceVersion - key: net.host.name value: stringValue: 172.18.0.2 @@ -11222,7 +11504,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -11244,10 +11526,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: DisableCloudProviders + stringValue: WatchList - key: net.host.name value: stringValue: 172.18.0.2 @@ -11271,12 +11553,12 @@ resourceMetrics: stringValue: kube-controller-manager - key: stage value: - stringValue: BETA + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -11298,10 +11580,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: DisableKubeletCloudCredentialProviders + stringValue: WatchListClient - key: net.host.name value: stringValue: 172.18.0.2 @@ -11352,10 +11634,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: DisableNodeKubeProxyVersion + stringValue: WinDSR - key: net.host.name value: stringValue: 172.18.0.2 @@ -11384,7 +11666,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -11406,10 +11688,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: DynamicResourceAllocation + stringValue: WinOverlay - key: net.host.name value: stringValue: 172.18.0.2 @@ -11433,7 +11715,7 @@ resourceMetrics: stringValue: kube-controller-manager - key: stage value: - stringValue: ALPHA + stringValue: BETA - key: url.scheme value: stringValue: https @@ -11460,10 +11742,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ElasticIndexedJob + stringValue: WindowsHostNetwork - key: net.host.name value: stringValue: 172.18.0.2 @@ -11487,11 +11769,16 @@ resourceMetrics: stringValue: kube-controller-manager - key: stage value: - stringValue: BETA + stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" + name: kubernetes_feature_enabled + - name: disabled_metrics_total + sum: + aggregationTemporality: 2 + dataPoints: - asDouble: 0 attributes: - key: host.name @@ -11514,10 +11801,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: EventedPLEG + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -11539,14 +11823,16 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + isMonotonic: true + - name: go_godebug_non_default_behavior_httplaxcontentlength_events_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 attributes: - key: host.name value: @@ -11568,10 +11854,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: GracefulNodeShutdown + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -11593,14 +11876,16 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + isMonotonic: true + - name: go_godebug_non_default_behavior_x509usefallbackroots_events_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 attributes: - key: host.name value: @@ -11622,10 +11907,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: GracefulNodeShutdownBasedOnPodPriority + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -11647,14 +11929,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + isMonotonic: true + - gauge: + dataPoints: + - asDouble: 2.509844e+07 attributes: - key: host.name value: @@ -11676,10 +11958,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: HPAScaleToZero + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -11701,13 +11980,15 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" + name: go_memory_classes_heap_objects_bytes + - name: go_godebug_non_default_behavior_execerrdot_events_total + sum: + aggregationTemporality: 2 + dataPoints: - asDouble: 0 attributes: - key: host.name @@ -11730,10 +12011,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: HonorPVReclaimPolicy + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -11755,14 +12033,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + isMonotonic: true + - gauge: + dataPoints: + - asDouble: 2.3093568e+07 attributes: - key: host.name value: @@ -11784,10 +12062,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: ImageMaximumGCAge + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -11809,13 +12084,15 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" + name: go_gc_scan_total_bytes + - name: go_godebug_non_default_behavior_tarinsecurepath_events_total + sum: + aggregationTemporality: 2 + dataPoints: - asDouble: 0 attributes: - key: host.name @@ -11838,10 +12115,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: InPlacePodVerticalScaling + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -11863,13 +12137,15 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" + isMonotonic: true + - name: go_godebug_non_default_behavior_x509sha1_events_total + sum: + aggregationTemporality: 2 + dataPoints: - asDouble: 0 attributes: - key: host.name @@ -11892,10 +12168,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: InTreePluginAWSUnregister + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -11917,14 +12190,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + isMonotonic: true + - gauge: + dataPoints: + - asDouble: 3.104768e+06 attributes: - key: host.name value: @@ -11946,10 +12219,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: InTreePluginAzureDiskUnregister + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -11971,14 +12241,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + name: go_memory_classes_heap_released_bytes + - gauge: + dataPoints: + - asDouble: 10800 attributes: - key: host.name value: @@ -12000,10 +12270,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: InTreePluginAzureFileUnregister + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -12025,14 +12292,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + name: go_memory_classes_metadata_mcache_free_bytes + - gauge: + dataPoints: + - asDouble: Infinity attributes: - key: host.name value: @@ -12054,10 +12321,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: InTreePluginGCEUnregister + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -12079,15 +12343,20 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + name: rest_client_exec_plugin_ttl_seconds + - name: root_ca_cert_publisher_sync_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 6 attributes: + - key: code + value: + stringValue: "200" - key: host.name value: stringValue: kind-control-plane @@ -12108,10 +12377,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: InTreePluginOpenStackUnregister + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -12133,13 +12399,15 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" + isMonotonic: true + - name: apiserver_webhooks_x509_insecure_sha1_total + sum: + aggregationTemporality: 2 + dataPoints: - asDouble: 0 attributes: - key: host.name @@ -12162,10 +12430,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: InTreePluginPortworxUnregister + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -12187,14 +12452,16 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + isMonotonic: true + - name: go_gc_cycles_total_gc_cycles_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 13 attributes: - key: host.name value: @@ -12216,10 +12483,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: InTreePluginRBDUnregister + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -12241,13 +12505,15 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: DEPRECATED - key: url.scheme value: stringValue: https timeUnixNano: "1000000" + isMonotonic: true + - name: go_godebug_non_default_behavior_tlsunsafeekm_events_total + sum: + aggregationTemporality: 2 + dataPoints: - asDouble: 0 attributes: - key: host.name @@ -12270,10 +12536,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: InTreePluginvSphereUnregister + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -12295,14 +12558,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + isMonotonic: true + - gauge: + dataPoints: + - asDouble: 15600 attributes: - key: host.name value: @@ -12324,10 +12587,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: InformerResourceVersion + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -12349,13 +12609,15 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" + name: go_memstats_mcache_sys_bytes + - name: authentication_token_cache_fetch_total + sum: + aggregationTemporality: 2 + dataPoints: - asDouble: 1 attributes: - key: host.name @@ -12378,10 +12640,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: JobBackoffLimitPerIndex + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -12403,14 +12662,17 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage + - key: status value: - stringValue: BETA + stringValue: ok - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + isMonotonic: true + - gauge: + dataPoints: + - asDouble: 1.7273937822093172e+09 attributes: - key: host.name value: @@ -12432,10 +12694,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: JobManagedBy + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -12457,13 +12716,13 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" + name: go_memstats_last_gc_time_seconds + - gauge: + dataPoints: - asDouble: 1 attributes: - key: host.name @@ -12486,10 +12745,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: JobPodFailurePolicy + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -12511,21 +12767,26 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https - timeUnixNano: "1000000" - - asDouble: 1 - attributes: - - key: host.name - value: - stringValue: kind-control-plane - - key: http.scheme + - key: version value: - stringValue: https + stringValue: go1.22.2 + timeUnixNano: "1000000" + name: go_info + - name: go_gc_cycles_automatic_gc_cycles_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 13 + attributes: + - key: host.name + value: + stringValue: kind-control-plane + - key: http.scheme + value: + stringValue: https - key: k8s.cluster.name value: stringValue: sock @@ -12540,10 +12801,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: JobPodReplacementPolicy + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -12565,14 +12823,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + isMonotonic: true + - gauge: + dataPoints: + - asDouble: 9.223372036854776e+18 attributes: - key: host.name value: @@ -12594,10 +12852,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: JobSuccessPolicy + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -12619,14 +12874,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + name: go_gc_gomemlimit_bytes + - gauge: + dataPoints: + - asDouble: 1.506536e+06 attributes: - key: host.name value: @@ -12648,10 +12903,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: KMSv1 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -12673,14 +12925,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: DEPRECATED - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + name: go_gc_scan_stack_bytes + - gauge: + dataPoints: + - asDouble: 0 attributes: - key: host.name value: @@ -12702,10 +12954,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: KubeProxyDrainingTerminatingNodes + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -12727,13 +12976,15 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" + name: go_memory_classes_os_stacks_bytes + - name: apiserver_webhooks_x509_missing_san_total + sum: + aggregationTemporality: 2 + dataPoints: - asDouble: 0 attributes: - key: host.name @@ -12756,10 +13007,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: KubeletCgroupDriverFromCRI + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -12781,14 +13029,16 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + isMonotonic: true + - name: authentication_token_cache_request_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 1 attributes: - key: host.name value: @@ -12810,10 +13060,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: KubeletInUserNamespace + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -12835,14 +13082,19 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage + - key: status value: - stringValue: ALPHA + stringValue: miss - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + isMonotonic: true + - name: go_cpu_classes_scavenge_background_cpu_seconds_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 1.4e-07 attributes: - key: host.name value: @@ -12864,10 +13116,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: KubeletPodResourcesDynamicResources + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -12889,14 +13138,16 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + isMonotonic: true + - name: go_gc_heap_allocs_objects_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 631305 attributes: - key: host.name value: @@ -12918,10 +13169,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: KubeletPodResourcesGet + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -12943,14 +13191,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + isMonotonic: true + - gauge: + dataPoints: + - asDouble: 4.8234496e+07 attributes: - key: host.name value: @@ -12972,10 +13220,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: KubeletSeparateDiskGC + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -12997,14 +13242,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + name: go_memstats_heap_sys_bytes + - gauge: + dataPoints: + - asDouble: 6.1560072e+07 attributes: - key: host.name value: @@ -13026,10 +13271,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: KubeletTracing + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -13051,14 +13293,16 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + name: go_memstats_sys_bytes + - name: apiserver_audit_event_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 attributes: - key: host.name value: @@ -13080,10 +13324,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: LoadBalancerIPMode + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -13105,13 +13346,13 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" + isMonotonic: true + - gauge: + dataPoints: - asDouble: 0 attributes: - key: host.name @@ -13134,10 +13375,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: LocalStorageCapacityIsolationFSQuotaMonitoring + stringValue: ClusterRoleAggregator - key: net.host.name value: stringValue: 172.18.0.2 @@ -13159,14 +13400,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -13188,10 +13426,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: LogarithmicScaleDown + stringValue: DynamicCABundle-client-ca-bundle - key: net.host.name value: stringValue: 172.18.0.2 @@ -13213,9 +13451,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https @@ -13242,10 +13477,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: LoggingAlphaOptions + stringValue: DynamicCABundle-csr-controller - key: net.host.name value: stringValue: 172.18.0.2 @@ -13267,14 +13502,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -13296,10 +13528,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: LoggingBetaOptions + stringValue: DynamicCABundle-request-header - key: net.host.name value: stringValue: 172.18.0.2 @@ -13321,9 +13553,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https @@ -13350,10 +13579,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: MatchLabelKeysInPodAffinity + stringValue: DynamicServingCertificateController - key: net.host.name value: stringValue: 172.18.0.2 @@ -13375,14 +13604,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -13404,10 +13630,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: MatchLabelKeysInPodTopologySpread + stringValue: bootstrap_signer_queue - key: net.host.name value: stringValue: 172.18.0.2 @@ -13429,9 +13655,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https @@ -13458,10 +13681,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: MaxUnavailableStatefulSet + stringValue: certificate - key: net.host.name value: stringValue: 172.18.0.2 @@ -13483,14 +13706,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -13512,10 +13732,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: MemoryManager + stringValue: claims - key: net.host.name value: stringValue: 172.18.0.2 @@ -13537,9 +13757,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https @@ -13566,10 +13783,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: MemoryQoS + stringValue: cronjob - key: net.host.name value: stringValue: 172.18.0.2 @@ -13591,9 +13808,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -13620,10 +13834,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: MultiCIDRServiceAllocator + stringValue: daemonset - key: net.host.name value: stringValue: 172.18.0.2 @@ -13645,9 +13859,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -13674,10 +13885,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: MutatingAdmissionPolicy + stringValue: deployment - key: net.host.name value: stringValue: 172.18.0.2 @@ -13699,9 +13910,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -13728,10 +13936,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: NFTablesProxyMode + stringValue: disruption - key: net.host.name value: stringValue: 172.18.0.2 @@ -13753,14 +13961,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -13782,10 +13987,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: NodeInclusionPolicyInPodTopologySpread + stringValue: disruption_recheck - key: net.host.name value: stringValue: 172.18.0.2 @@ -13807,9 +14012,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https @@ -13836,10 +14038,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: NodeLogQuery + stringValue: endpoint - key: net.host.name value: stringValue: 172.18.0.2 @@ -13861,14 +14063,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -13890,10 +14089,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: NodeSwap + stringValue: endpoint_slice - key: net.host.name value: stringValue: 172.18.0.2 @@ -13915,14 +14114,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -13944,10 +14140,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: OpenAPIEnums + stringValue: endpoint_slice_mirroring - key: net.host.name value: stringValue: 172.18.0.2 @@ -13969,14 +14165,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -13998,10 +14191,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: PDBUnhealthyPodEvictionPolicy + stringValue: ephemeral_volume - key: net.host.name value: stringValue: 172.18.0.2 @@ -14023,14 +14216,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -14052,10 +14242,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: PersistentVolumeLastPhaseTransitionTime + stringValue: garbage_collector_attempt_to_delete - key: net.host.name value: stringValue: 172.18.0.2 @@ -14077,9 +14267,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https @@ -14106,10 +14293,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: PodAndContainerStatsFromCRI + stringValue: garbage_collector_attempt_to_orphan - key: net.host.name value: stringValue: 172.18.0.2 @@ -14131,14 +14318,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -14160,10 +14344,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: PodDeletionCost + stringValue: garbage_collector_graph_changes - key: net.host.name value: stringValue: 172.18.0.2 @@ -14185,14 +14369,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -14214,10 +14395,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: PodDisruptionConditions + stringValue: horizontalpodautoscaler - key: net.host.name value: stringValue: 172.18.0.2 @@ -14239,14 +14420,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -14268,10 +14446,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: PodIndexLabel + stringValue: job - key: net.host.name value: stringValue: 172.18.0.2 @@ -14293,14 +14471,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -14322,10 +14497,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: PodLifecycleSleepAction + stringValue: job_orphan_pod - key: net.host.name value: stringValue: 172.18.0.2 @@ -14347,14 +14522,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -14376,10 +14548,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: PodReadyToStartContainersCondition + stringValue: namespace - key: net.host.name value: stringValue: 172.18.0.2 @@ -14401,14 +14573,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -14430,10 +14599,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: PortForwardWebsockets + stringValue: node - key: net.host.name value: stringValue: 172.18.0.2 @@ -14455,9 +14624,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -14484,10 +14650,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ProcMountType + stringValue: node_lifecycle_controller - key: net.host.name value: stringValue: 172.18.0.2 @@ -14509,9 +14675,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -14538,10 +14701,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: QOSReserved + stringValue: node_lifecycle_controller_pods - key: net.host.name value: stringValue: 172.18.0.2 @@ -14563,9 +14726,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -14592,10 +14752,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: RecoverVolumeExpansionFailure + stringValue: noexec_taint_node - key: net.host.name value: stringValue: 172.18.0.2 @@ -14617,9 +14777,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -14646,10 +14803,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: RecursiveReadOnlyMounts + stringValue: noexec_taint_pod - key: net.host.name value: stringValue: 172.18.0.2 @@ -14671,9 +14828,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -14700,10 +14854,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: RelaxedEnvironmentVariableValidation + stringValue: orphaned_pods_nodes - key: net.host.name value: stringValue: 172.18.0.2 @@ -14725,9 +14879,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -14754,10 +14905,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: RetryGenerateName + stringValue: pvcprotection - key: net.host.name value: stringValue: 172.18.0.2 @@ -14779,14 +14930,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -14808,10 +14956,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: RotateKubeletServerCertificate + stringValue: pvcs - key: net.host.name value: stringValue: 172.18.0.2 @@ -14833,9 +14981,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https @@ -14862,10 +15007,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: RuntimeClassInImageCriApi + stringValue: pvprotection - key: net.host.name value: stringValue: 172.18.0.2 @@ -14887,9 +15032,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -14916,10 +15058,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: SELinuxMount + stringValue: replicaset - key: net.host.name value: stringValue: 172.18.0.2 @@ -14941,14 +15083,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -14970,10 +15109,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: SELinuxMountReadWriteOncePod + stringValue: replicationmanager - key: net.host.name value: stringValue: 172.18.0.2 @@ -14995,9 +15134,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https @@ -15024,10 +15160,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: SchedulerQueueingHints + stringValue: resource_quota_controller_resource_changes - key: net.host.name value: stringValue: 172.18.0.2 @@ -15049,14 +15185,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -15078,10 +15211,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: SeparateCacheWatchRPC + stringValue: resourcequota_primary - key: net.host.name value: stringValue: 172.18.0.2 @@ -15103,14 +15236,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -15132,10 +15262,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: SeparateTaintEvictionController + stringValue: resourcequota_priority - key: net.host.name value: stringValue: 172.18.0.2 @@ -15157,14 +15287,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -15186,10 +15313,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ServiceAccountTokenJTI + stringValue: root_ca_cert_publisher - key: net.host.name value: stringValue: 172.18.0.2 @@ -15211,9 +15338,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https @@ -15240,10 +15364,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ServiceAccountTokenNodeBinding + stringValue: service - key: net.host.name value: stringValue: 172.18.0.2 @@ -15265,14 +15389,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -15294,10 +15415,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ServiceAccountTokenNodeBindingValidation + stringValue: serviceaccount - key: net.host.name value: stringValue: 172.18.0.2 @@ -15319,14 +15440,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -15348,10 +15466,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ServiceAccountTokenPodNodeInfo + stringValue: serviceaccount_tokens_secret - key: net.host.name value: stringValue: 172.18.0.2 @@ -15373,9 +15491,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https @@ -15402,10 +15517,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ServiceTrafficDistribution + stringValue: serviceaccount_tokens_service - key: net.host.name value: stringValue: 172.18.0.2 @@ -15427,14 +15542,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -15456,10 +15568,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: SidecarContainers + stringValue: stale_pod_disruption - key: net.host.name value: stringValue: 172.18.0.2 @@ -15481,14 +15593,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -15510,10 +15619,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: SizeMemoryBackedVolumes + stringValue: statefulset - key: net.host.name value: stringValue: 172.18.0.2 @@ -15535,14 +15644,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -15564,10 +15670,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: SkipReadOnlyValidationGCE + stringValue: token_cleaner - key: net.host.name value: stringValue: 172.18.0.2 @@ -15589,14 +15695,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: DEPRECATED - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -15618,10 +15721,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: StatefulSetAutoDeletePVC + stringValue: ttl_jobs_to_delete - key: net.host.name value: stringValue: 172.18.0.2 @@ -15643,14 +15746,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -15672,10 +15772,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: StatefulSetStartOrdinal + stringValue: ttlcontroller - key: net.host.name value: stringValue: 172.18.0.2 @@ -15697,14 +15797,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -15726,10 +15823,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: StorageNamespaceIndex + stringValue: validatingadmissionpolicy-status - key: net.host.name value: stringValue: 172.18.0.2 @@ -15751,9 +15848,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https @@ -15780,10 +15874,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: StorageVersionAPI + stringValue: volume_expand - key: net.host.name value: stringValue: 172.18.0.2 @@ -15805,14 +15899,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -15834,10 +15925,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: StorageVersionHash + stringValue: volumes - key: net.host.name value: stringValue: 172.18.0.2 @@ -15859,13 +15950,13 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" + name: workqueue_depth + - gauge: + dataPoints: - asDouble: 0 attributes: - key: host.name @@ -15888,10 +15979,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: StorageVersionMigrator + stringValue: ClusterRoleAggregator - key: net.host.name value: stringValue: 172.18.0.2 @@ -15913,14 +16004,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -15942,10 +16030,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: StructuredAuthenticationConfiguration + stringValue: DynamicCABundle-client-ca-bundle - key: net.host.name value: stringValue: 172.18.0.2 @@ -15967,14 +16055,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -15996,10 +16081,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: StructuredAuthorizationConfiguration + stringValue: DynamicCABundle-csr-controller - key: net.host.name value: stringValue: 172.18.0.2 @@ -16021,14 +16106,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -16050,10 +16132,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: TopologyAwareHints + stringValue: DynamicCABundle-request-header - key: net.host.name value: stringValue: 172.18.0.2 @@ -16075,9 +16157,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https @@ -16104,10 +16183,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: TopologyManagerPolicyAlphaOptions + stringValue: DynamicServingCertificateController - key: net.host.name value: stringValue: 172.18.0.2 @@ -16129,14 +16208,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -16158,10 +16234,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: TopologyManagerPolicyBetaOptions + stringValue: bootstrap_signer_queue - key: net.host.name value: stringValue: 172.18.0.2 @@ -16183,14 +16259,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -16212,10 +16285,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: TopologyManagerPolicyOptions + stringValue: certificate - key: net.host.name value: stringValue: 172.18.0.2 @@ -16237,14 +16310,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -16266,10 +16336,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: TranslateStreamCloseWebsocketRequests + stringValue: claims - key: net.host.name value: stringValue: 172.18.0.2 @@ -16291,14 +16361,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -16320,10 +16387,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: UnauthenticatedHTTP2DOSMitigation + stringValue: cronjob - key: net.host.name value: stringValue: 172.18.0.2 @@ -16345,10 +16412,7 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - - key: url.scheme + - key: url.scheme value: stringValue: https timeUnixNano: "1000000" @@ -16374,10 +16438,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: UnknownVersionInteroperabilityProxy + stringValue: daemonset - key: net.host.name value: stringValue: 172.18.0.2 @@ -16399,9 +16463,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -16428,10 +16489,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: UserNamespacesPodSecurityStandards + stringValue: deployment - key: net.host.name value: stringValue: 172.18.0.2 @@ -16453,9 +16514,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -16482,10 +16540,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: UserNamespacesSupport + stringValue: disruption - key: net.host.name value: stringValue: 172.18.0.2 @@ -16507,9 +16565,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https @@ -16536,10 +16591,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: VolumeAttributesClass + stringValue: disruption_recheck - key: net.host.name value: stringValue: 172.18.0.2 @@ -16561,9 +16616,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -16590,10 +16642,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: VolumeCapacityPriority + stringValue: endpoint - key: net.host.name value: stringValue: 172.18.0.2 @@ -16615,9 +16667,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -16644,10 +16693,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: WatchFromStorageWithoutResourceVersion + stringValue: endpoint_slice - key: net.host.name value: stringValue: 172.18.0.2 @@ -16669,9 +16718,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https @@ -16698,10 +16744,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: WatchList + stringValue: endpoint_slice_mirroring - key: net.host.name value: stringValue: 172.18.0.2 @@ -16723,9 +16769,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https @@ -16752,10 +16795,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: WatchListClient + stringValue: ephemeral_volume - key: net.host.name value: stringValue: 172.18.0.2 @@ -16777,9 +16820,6 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https @@ -16806,10 +16846,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: WinDSR + stringValue: garbage_collector_attempt_to_delete - key: net.host.name value: stringValue: 172.18.0.2 @@ -16831,14 +16871,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -16860,10 +16897,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: WinOverlay + stringValue: garbage_collector_attempt_to_orphan - key: net.host.name value: stringValue: 172.18.0.2 @@ -16885,14 +16922,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -16914,10 +16948,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: WindowsHostNetwork + stringValue: garbage_collector_graph_changes - key: net.host.name value: stringValue: 172.18.0.2 @@ -16939,17 +16973,11 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stage - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - name: kubernetes_feature_enabled - - gauge: - dataPoints: - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -16971,7 +16999,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: horizontalpodautoscaler - key: net.host.name value: stringValue: 172.18.0.2 @@ -16997,11 +17028,6 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: up - - name: apiserver_audit_requests_rejected_total - sum: - aggregationTemporality: 2 - dataPoints: - asDouble: 0 attributes: - key: host.name @@ -17024,7 +17050,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: job - key: net.host.name value: stringValue: 172.18.0.2 @@ -17050,10 +17079,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: - dataPoints: - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -17075,7 +17101,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: job_orphan_pod - key: net.host.name value: stringValue: 172.18.0.2 @@ -17101,11 +17130,6 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: node_collector_zone_size - - name: go_memstats_lookups_total - sum: - aggregationTemporality: 2 - dataPoints: - asDouble: 0 attributes: - key: host.name @@ -17128,7 +17152,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: namespace - key: net.host.name value: stringValue: 172.18.0.2 @@ -17154,12 +17181,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: workqueue_adds_total - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 59 + - asDouble: 0 attributes: - key: host.name value: @@ -17181,10 +17203,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ClusterRoleAggregator + stringValue: node - key: net.host.name value: stringValue: 172.18.0.2 @@ -17210,7 +17232,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -17232,10 +17254,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: DynamicCABundle-client-ca-bundle + stringValue: node_lifecycle_controller - key: net.host.name value: stringValue: 172.18.0.2 @@ -17261,7 +17283,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 4 + - asDouble: 0 attributes: - key: host.name value: @@ -17283,10 +17305,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: DynamicCABundle-csr-controller + stringValue: node_lifecycle_controller_pods - key: net.host.name value: stringValue: 172.18.0.2 @@ -17312,7 +17334,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -17334,10 +17356,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: DynamicCABundle-request-header + stringValue: noexec_taint_node - key: net.host.name value: stringValue: 172.18.0.2 @@ -17363,7 +17385,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 3 + - asDouble: 0 attributes: - key: host.name value: @@ -17385,10 +17407,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: DynamicServingCertificateController + stringValue: noexec_taint_pod - key: net.host.name value: stringValue: 172.18.0.2 @@ -17414,7 +17436,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 2 + - asDouble: 0 attributes: - key: host.name value: @@ -17436,10 +17458,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: bootstrap_signer_queue + stringValue: orphaned_pods_nodes - key: net.host.name value: stringValue: 172.18.0.2 @@ -17465,7 +17487,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 30 + - asDouble: 0 attributes: - key: host.name value: @@ -17487,10 +17509,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: certificate + stringValue: pvcprotection - key: net.host.name value: stringValue: 172.18.0.2 @@ -17538,10 +17560,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: claims + stringValue: pvcs - key: net.host.name value: stringValue: 172.18.0.2 @@ -17589,10 +17611,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: cronjob + stringValue: pvprotection - key: net.host.name value: stringValue: 172.18.0.2 @@ -17618,7 +17640,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 18 + - asDouble: 0 attributes: - key: host.name value: @@ -17640,10 +17662,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: daemonset + stringValue: replicaset - key: net.host.name value: stringValue: 172.18.0.2 @@ -17669,7 +17691,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 42 + - asDouble: 0 attributes: - key: host.name value: @@ -17691,10 +17713,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: deployment + stringValue: replicationmanager - key: net.host.name value: stringValue: 172.18.0.2 @@ -17742,10 +17764,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: disruption + stringValue: resource_quota_controller_resource_changes - key: net.host.name value: stringValue: 172.18.0.2 @@ -17793,10 +17815,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: disruption_recheck + stringValue: resourcequota_primary - key: net.host.name value: stringValue: 172.18.0.2 @@ -17822,7 +17844,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 14 + - asDouble: 0 attributes: - key: host.name value: @@ -17844,10 +17866,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: endpoint + stringValue: resourcequota_priority - key: net.host.name value: stringValue: 172.18.0.2 @@ -17873,7 +17895,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 19 + - asDouble: 0 attributes: - key: host.name value: @@ -17895,10 +17917,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: endpoint_slice + stringValue: root_ca_cert_publisher - key: net.host.name value: stringValue: 172.18.0.2 @@ -17924,7 +17946,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 10 + - asDouble: 0 attributes: - key: host.name value: @@ -17946,10 +17968,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: endpoint_slice_mirroring + stringValue: service - key: net.host.name value: stringValue: 172.18.0.2 @@ -17997,10 +18019,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ephemeral_volume + stringValue: serviceaccount - key: net.host.name value: stringValue: 172.18.0.2 @@ -18026,7 +18048,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -18048,10 +18070,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: garbage_collector_attempt_to_delete + stringValue: serviceaccount_tokens_secret - key: net.host.name value: stringValue: 172.18.0.2 @@ -18099,10 +18121,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: garbage_collector_attempt_to_orphan + stringValue: serviceaccount_tokens_service - key: net.host.name value: stringValue: 172.18.0.2 @@ -18128,7 +18150,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 637 + - asDouble: 0 attributes: - key: host.name value: @@ -18150,10 +18172,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: garbage_collector_graph_changes + stringValue: stale_pod_disruption - key: net.host.name value: stringValue: 172.18.0.2 @@ -18201,10 +18223,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: horizontalpodautoscaler + stringValue: statefulset - key: net.host.name value: stringValue: 172.18.0.2 @@ -18252,10 +18274,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: job + stringValue: token_cleaner - key: net.host.name value: stringValue: 172.18.0.2 @@ -18303,10 +18325,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: job_orphan_pod + stringValue: ttl_jobs_to_delete - key: net.host.name value: stringValue: 172.18.0.2 @@ -18354,10 +18376,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: namespace + stringValue: ttlcontroller - key: net.host.name value: stringValue: 172.18.0.2 @@ -18383,7 +18405,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -18405,10 +18427,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: node + stringValue: validatingadmissionpolicy-status - key: net.host.name value: stringValue: 172.18.0.2 @@ -18434,7 +18456,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 5 + - asDouble: 0 attributes: - key: host.name value: @@ -18456,10 +18478,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: node_lifecycle_controller + stringValue: volume_expand - key: net.host.name value: stringValue: 172.18.0.2 @@ -18485,7 +18507,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 13 + - asDouble: 0 attributes: - key: host.name value: @@ -18507,10 +18529,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: node_lifecycle_controller_pods + stringValue: volumes - key: net.host.name value: stringValue: 172.18.0.2 @@ -18536,7 +18558,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + name: workqueue_unfinished_work_seconds + - name: authorization_attempts_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 16 attributes: - key: host.name value: @@ -18558,10 +18585,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: noexec_taint_node + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -18571,6 +18595,9 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: result + value: + stringValue: allowed - key: server.address value: stringValue: 172.18.0.2 @@ -18587,7 +18614,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 22 + isMonotonic: true + - name: garbagecollector_controller_resources_sync_error_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 attributes: - key: host.name value: @@ -18609,10 +18641,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: noexec_taint_pod + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -18638,7 +18667,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + isMonotonic: true + - gauge: + dataPoints: + - asDouble: 4096 attributes: - key: host.name value: @@ -18660,10 +18692,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: orphaned_pods_nodes + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -18689,7 +18718,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + name: go_gc_stack_starting_size_bytes + - gauge: + dataPoints: + - asDouble: 3.104768e+06 attributes: - key: host.name value: @@ -18711,10 +18743,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: pvcprotection + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -18740,7 +18769,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + name: go_memstats_heap_released_bytes + - gauge: + dataPoints: + - asDouble: 1.337438208e+09 attributes: - key: host.name value: @@ -18762,10 +18794,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: pvcs + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -18791,7 +18820,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + name: process_virtual_memory_bytes + - gauge: + dataPoints: + - asDouble: 9.561912e+06 attributes: - key: host.name value: @@ -18813,10 +18845,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: pvprotection + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -18842,7 +18871,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 38 + name: go_memory_classes_heap_unused_bytes + - gauge: + dataPoints: + - asDouble: 714233 attributes: - key: host.name value: @@ -18864,10 +18896,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: replicaset + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -18893,7 +18922,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + name: go_memory_classes_other_bytes + - gauge: + dataPoints: + - asDouble: 121498 attributes: - key: host.name value: @@ -18915,10 +18947,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: replicationmanager + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -18944,7 +18973,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + name: go_memstats_heap_objects + - name: go_sync_mutex_wait_total_seconds_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 119.663328176 attributes: - key: host.name value: @@ -18966,10 +19000,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: resource_quota_controller_resource_changes + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -18995,6 +19026,9 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" + isMonotonic: true + - gauge: + dataPoints: - asDouble: 0 attributes: - key: host.name @@ -19017,10 +19051,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: resourcequota_primary + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -19046,8 +19077,14 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + name: node_collector_unhealthy_nodes_in_zone + - gauge: + dataPoints: + - asDouble: 256 attributes: + - key: clusterCIDR + value: + stringValue: 10.244.0.0/16 - key: host.name value: stringValue: kind-control-plane @@ -19068,10 +19105,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: resourcequota_priority + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -19097,7 +19131,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 6 + name: node_ipam_controller_cirdset_max_cidrs + - gauge: + dataPoints: + - asDouble: 18 attributes: - key: host.name value: @@ -19119,10 +19156,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: root_ca_cert_publisher + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -19148,8 +19182,16 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + name: process_open_fds + - name: apiserver_delegated_authz_request_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 1 attributes: + - key: code + value: + stringValue: "201" - key: host.name value: stringValue: kind-control-plane @@ -19170,10 +19212,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: service + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -19199,7 +19238,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 6 + isMonotonic: true + - name: go_gc_heap_frees_bytes_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 6.159704e+07 attributes: - key: host.name value: @@ -19221,10 +19265,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: serviceaccount + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -19250,7 +19291,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + isMonotonic: true + - gauge: + dataPoints: + - asDouble: 1.493567e+06 attributes: - key: host.name value: @@ -19272,10 +19316,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: serviceaccount_tokens_secret + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -19301,7 +19342,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 48 + name: go_memory_classes_profiling_buckets_bytes + - name: go_memstats_lookups_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 attributes: - key: host.name value: @@ -19323,10 +19369,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: serviceaccount_tokens_service + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -19352,8 +19395,16 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + isMonotonic: true + - name: node_ipam_controller_cidrset_cidrs_allocations_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 1 attributes: + - key: clusterCIDR + value: + stringValue: 10.244.0.0/16 - key: host.name value: stringValue: kind-control-plane @@ -19374,10 +19425,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: stale_pod_disruption + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -19403,7 +19451,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + isMonotonic: true + - name: endpoint_slice_controller_syncs + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 19 attributes: - key: host.name value: @@ -19425,10 +19478,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: statefulset + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -19438,6 +19488,9 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: result + value: + stringValue: success - key: server.address value: stringValue: 172.18.0.2 @@ -19454,7 +19507,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + isMonotonic: true + - name: go_gc_heap_frees_objects_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 509807 attributes: - key: host.name value: @@ -19476,10 +19534,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: token_cleaner + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -19505,7 +19560,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + isMonotonic: true + - gauge: + dataPoints: + - asDouble: 2.4372352e+07 attributes: - key: host.name value: @@ -19527,10 +19585,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: ttl_jobs_to_delete + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -19556,7 +19611,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 6 + name: go_gc_heap_live_bytes + - name: go_godebug_non_default_behavior_httpmuxgo121_events_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 attributes: - key: host.name value: @@ -19578,10 +19638,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: ttlcontroller + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -19607,6 +19664,11 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" + isMonotonic: true + - name: go_godebug_non_default_behavior_randautoseed_events_total + sum: + aggregationTemporality: 2 + dataPoints: - asDouble: 0 attributes: - key: host.name @@ -19629,10 +19691,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: validatingadmissionpolicy-status + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -19658,7 +19717,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + isMonotonic: true + - gauge: + dataPoints: + - asDouble: 6.291456e+06 attributes: - key: host.name value: @@ -19680,10 +19742,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: volume_expand + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -19709,7 +19768,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + name: go_memstats_stack_inuse_bytes + - gauge: + dataPoints: + - asDouble: 1222 attributes: - key: host.name value: @@ -19731,10 +19793,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: volumes + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -19760,12 +19819,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: authentication_token_cache_fetch_total - sum: - aggregationTemporality: 2 + name: go_sched_goroutines_goroutines + - gauge: dataPoints: - - asDouble: 1 + - asDouble: 2602 attributes: - key: host.name value: @@ -19787,7 +19844,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -19809,19 +19866,16 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: status - value: - stringValue: ok - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: go_gc_heap_frees_bytes_total + name: scrape_samples_scraped + - name: authentication_attempts sum: aggregationTemporality: 2 dataPoints: - - asDouble: 6.3773216e+07 + - asDouble: 16 attributes: - key: host.name value: @@ -19843,7 +19897,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -19853,6 +19907,9 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: result + value: + stringValue: success - key: server.address value: stringValue: 172.18.0.2 @@ -19870,11 +19927,9 @@ resourceMetrics: stringValue: https timeUnixNano: "1000000" isMonotonic: true - - name: go_godebug_non_default_behavior_execerrdot_events_total - sum: - aggregationTemporality: 2 + - gauge: dataPoints: - - asDouble: 0 + - asDouble: 2602 attributes: - key: host.name value: @@ -19896,7 +19951,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -19922,10 +19977,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: + name: scrape_samples_post_metric_relabeling + - name: go_cpu_classes_gc_pause_cpu_seconds_total + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 121220 + - asDouble: 0.009457184 attributes: - key: host.name value: @@ -19947,7 +20004,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -19973,8 +20030,8 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memstats_heap_objects - - name: go_godebug_non_default_behavior_gotypesalias_events_total + isMonotonic: true + - name: go_godebug_non_default_behavior_jstmpllitinterp_events_total sum: aggregationTemporality: 2 dataPoints: @@ -20000,7 +20057,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -20027,11 +20084,11 @@ resourceMetrics: stringValue: https timeUnixNano: "1000000" isMonotonic: true - - name: go_memstats_alloc_bytes_total + - name: process_cpu_seconds_total sum: aggregationTemporality: 2 dataPoints: - - asDouble: 8.8843976e+07 + - asDouble: 2.74 attributes: - key: host.name value: @@ -20053,7 +20110,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -20080,10 +20137,15 @@ resourceMetrics: stringValue: https timeUnixNano: "1000000" isMonotonic: true - - gauge: + - name: apiserver_delegated_authn_request_total + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 360448 + - asDouble: 1 attributes: + - key: code + value: + stringValue: "201" - key: host.name value: stringValue: kind-control-plane @@ -20104,7 +20166,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -20130,12 +20192,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memstats_heap_released_bytes - - name: go_gc_heap_tiny_allocs_objects_total - sum: - aggregationTemporality: 2 + isMonotonic: true + - gauge: dataPoints: - - asDouble: 51589 + - asDouble: 121498 attributes: - key: host.name value: @@ -20157,7 +20217,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -20183,10 +20243,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true + name: go_gc_heap_objects_objects - gauge: dataPoints: - - asDouble: 1 + - asDouble: 554400 attributes: - key: host.name value: @@ -20208,13 +20268,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: manager - value: - stringValue: kube-controller-manager - - key: name - value: - stringValue: nodeipam + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -20240,10 +20294,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: running_managed_controllers - - gauge: + name: go_memory_classes_metadata_mspan_inuse_bytes + - name: node_collector_evictions_total + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 531680 + - asDouble: 0 attributes: - key: host.name value: @@ -20265,7 +20321,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -20291,12 +20347,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memstats_mspan_inuse_bytes - - name: registered_metrics_total - sum: - aggregationTemporality: 2 + isMonotonic: true + - gauge: dataPoints: - - asDouble: 141 + - asDouble: 2.077676e+07 attributes: - key: host.name value: @@ -20318,7 +20372,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -20340,14 +20394,16 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stability_level - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 4 + name: go_gc_scan_heap_bytes + - name: go_godebug_non_default_behavior_x509usepolicies_events_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 attributes: - key: host.name value: @@ -20369,7 +20425,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -20391,14 +20447,16 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stability_level - value: - stringValue: BETA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 11 + isMonotonic: true + - name: service_controller_nodesync_error_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 attributes: - key: host.name value: @@ -20420,7 +20478,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -20442,18 +20500,17 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stability_level - value: - stringValue: STABLE - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - - asDouble: 2 + isMonotonic: true + - name: cardinality_enforcement_unexpected_categorizations_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 attributes: - - key: deprecated_version - value: - stringValue: 1.30.0 - key: host.name value: stringValue: kind-control-plane @@ -20474,7 +20531,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -20496,19 +20553,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: stability_level - value: - stringValue: ALPHA - key: url.scheme value: stringValue: https timeUnixNano: "1000000" isMonotonic: true - - name: retroactive_storageclass_total - sum: - aggregationTemporality: 2 + - gauge: dataPoints: - - asDouble: 0 + - asDouble: 810272 attributes: - key: host.name value: @@ -20530,7 +20582,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -20556,8 +20608,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: + name: go_gc_scan_globals_bytes + - name: go_godebug_non_default_behavior_gocacheverify_events_total + sum: + aggregationTemporality: 2 dataPoints: - asDouble: 0 attributes: @@ -20581,7 +20635,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -20607,10 +20661,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: apiserver_envelope_encryption_dek_cache_fill_percent + isMonotonic: true - gauge: dataPoints: - - asDouble: 4096 + - asDouble: 6.291456e+06 attributes: - key: host.name value: @@ -20632,7 +20686,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -20658,12 +20712,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_gc_stack_starting_size_bytes - - name: go_godebug_non_default_behavior_httplaxcontentlength_events_total - sum: - aggregationTemporality: 2 + name: go_memstats_stack_sys_bytes + - gauge: dataPoints: - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -20685,7 +20737,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -20711,10 +20763,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: + name: node_collector_zone_size + - name: taint_eviction_controller_pod_deletions_total + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 9.691136e+06 + - asDouble: 0 attributes: - key: host.name value: @@ -20736,7 +20790,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -20762,10 +20816,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memory_classes_heap_free_bytes - - gauge: + isMonotonic: true + - name: apiserver_audit_requests_rejected_total + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 1.503928e+06 + - asDouble: 0 attributes: - key: host.name value: @@ -20787,7 +20843,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -20813,10 +20869,8 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_gc_scan_stack_bytes - - name: go_godebug_non_default_behavior_jstmpllitinterp_events_total - sum: - aggregationTemporality: 2 + isMonotonic: true + - gauge: dataPoints: - asDouble: 0 attributes: @@ -20840,7 +20894,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -20862,15 +20916,13 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: status + value: + stringValue: blocked - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: go_godebug_non_default_behavior_x509sha1_events_total - sum: - aggregationTemporality: 2 - dataPoints: - asDouble: 0 attributes: - key: host.name @@ -20893,7 +20945,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -20915,16 +20967,19 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager + - key: status + value: + stringValue: in_flight - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: go_memstats_frees_total + name: authentication_token_cache_active_fetch_count + - name: go_memstats_alloc_bytes_total sum: aggregationTemporality: 2 dataPoints: - - asDouble: 554041 + - asDouble: 8.669548e+07 attributes: - key: host.name value: @@ -20946,7 +21001,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -20973,15 +21028,12 @@ resourceMetrics: stringValue: https timeUnixNano: "1000000" isMonotonic: true - - name: apiserver_delegated_authz_request_total + - name: hidden_metrics_total sum: aggregationTemporality: 2 dataPoints: - - asDouble: 1 + - asDouble: 0 attributes: - - key: code - value: - stringValue: "201" - key: host.name value: stringValue: kind-control-plane @@ -21002,7 +21054,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -21029,13 +21081,12 @@ resourceMetrics: stringValue: https timeUnixNano: "1000000" isMonotonic: true - - gauge: + - name: rest_client_transport_create_calls_total + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 0.00390625 + - asDouble: 54 attributes: - - key: clusterCIDR - value: - stringValue: 10.244.0.0/16 - key: host.name value: stringValue: kind-control-plane @@ -21056,7 +21107,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -21066,6 +21117,9 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: result + value: + stringValue: hit - key: server.address value: stringValue: 172.18.0.2 @@ -21082,12 +21136,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: node_ipam_controller_cidrset_usage_cidrs - - name: go_godebug_non_default_behavior_gocacheverify_events_total - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 0 + - asDouble: 3 attributes: - key: host.name value: @@ -21109,7 +21158,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -21119,6 +21168,9 @@ resourceMetrics: - key: os.type value: stringValue: linux + - key: result + value: + stringValue: miss - key: server.address value: stringValue: 172.18.0.2 @@ -21136,11 +21188,11 @@ resourceMetrics: stringValue: https timeUnixNano: "1000000" isMonotonic: true - - name: go_godebug_non_default_behavior_http2client_events_total + - name: workqueue_adds_total sum: aggregationTemporality: 2 dataPoints: - - asDouble: 0 + - asDouble: 63 attributes: - key: host.name value: @@ -21162,7 +21214,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: ClusterRoleAggregator - key: net.host.name value: stringValue: 172.18.0.2 @@ -21188,10 +21243,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: - dataPoints: - - asDouble: 4 + - asDouble: 1 attributes: - key: host.name value: @@ -21213,7 +21265,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: DynamicCABundle-client-ca-bundle - key: net.host.name value: stringValue: 172.18.0.2 @@ -21239,12 +21294,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_sched_gomaxprocs_threads - - name: go_gc_heap_allocs_bytes_total - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 8.8843976e+07 + - asDouble: 5 attributes: - key: host.name value: @@ -21266,7 +21316,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: DynamicCABundle-csr-controller - key: net.host.name value: stringValue: 172.18.0.2 @@ -21292,9 +21345,6 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: - dataPoints: - asDouble: 1 attributes: - key: host.name @@ -21317,7 +21367,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: DynamicCABundle-request-header - key: net.host.name value: stringValue: 172.18.0.2 @@ -21342,14 +21395,8 @@ resourceMetrics: - key: url.scheme value: stringValue: https - - key: version - value: - stringValue: go1.22.2 timeUnixNano: "1000000" - name: go_info - - gauge: - dataPoints: - - asDouble: 1 + - asDouble: 3 attributes: - key: host.name value: @@ -21371,10 +21418,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: kube-controller-manager + stringValue: DynamicServingCertificateController - key: net.host.name value: stringValue: 172.18.0.2 @@ -21400,10 +21447,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: leader_election_master_status - - gauge: - dataPoints: - - asDouble: 1.048576e+06 + - asDouble: 2 attributes: - key: host.name value: @@ -21425,7 +21469,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: bootstrap_signer_queue - key: net.host.name value: stringValue: 172.18.0.2 @@ -21451,12 +21498,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: process_max_fds - - name: rest_client_transport_create_calls_total - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 54 + - asDouble: 30 attributes: - key: host.name value: @@ -21478,7 +21520,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: certificate - key: net.host.name value: stringValue: 172.18.0.2 @@ -21488,9 +21533,6 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: result - value: - stringValue: hit - key: server.address value: stringValue: 172.18.0.2 @@ -21507,7 +21549,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 3 + - asDouble: 0 attributes: - key: host.name value: @@ -21529,7 +21571,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: claims - key: net.host.name value: stringValue: 172.18.0.2 @@ -21539,9 +21584,6 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: result - value: - stringValue: miss - key: server.address value: stringValue: 172.18.0.2 @@ -21558,12 +21600,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: go_cpu_classes_gc_mark_assist_cpu_seconds_total - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 0.012906629 + - asDouble: 0 attributes: - key: host.name value: @@ -21585,7 +21622,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: cronjob - key: net.host.name value: stringValue: 172.18.0.2 @@ -21611,12 +21651,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: go_godebug_non_default_behavior_panicnil_events_total - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 0 + - asDouble: 17 attributes: - key: host.name value: @@ -21638,7 +21673,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: daemonset - key: net.host.name value: stringValue: 172.18.0.2 @@ -21664,10 +21702,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: - dataPoints: - - asDouble: 1.504815e+06 + - asDouble: 38 attributes: - key: host.name value: @@ -21689,7 +21724,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: deployment - key: net.host.name value: stringValue: 172.18.0.2 @@ -21715,19 +21753,8 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memstats_buck_hash_sys_bytes - - name: rest_client_requests_total - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 204 + - asDouble: 0 attributes: - - key: code - value: - stringValue: "200" - - key: host - value: - stringValue: 172.18.0.2:6443 - key: host.name value: stringValue: kind-control-plane @@ -21748,10 +21775,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: method + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name value: - stringValue: GET + stringValue: disruption - key: net.host.name value: stringValue: 172.18.0.2 @@ -21777,14 +21804,8 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 15 + - asDouble: 0 attributes: - - key: code - value: - stringValue: "200" - - key: host - value: - stringValue: 172.18.0.2:6443 - key: host.name value: stringValue: kind-control-plane @@ -21805,10 +21826,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: method + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name value: - stringValue: PATCH + stringValue: disruption_recheck - key: net.host.name value: stringValue: 172.18.0.2 @@ -21834,14 +21855,8 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 128 + - asDouble: 15 attributes: - - key: code - value: - stringValue: "200" - - key: host - value: - stringValue: 172.18.0.2:6443 - key: host.name value: stringValue: kind-control-plane @@ -21862,10 +21877,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: method + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name value: - stringValue: PUT + stringValue: endpoint - key: net.host.name value: stringValue: 172.18.0.2 @@ -21891,14 +21906,8 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 104 + - asDouble: 19 attributes: - - key: code - value: - stringValue: "201" - - key: host - value: - stringValue: 172.18.0.2:6443 - key: host.name value: stringValue: kind-control-plane @@ -21919,10 +21928,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: method + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name value: - stringValue: POST + stringValue: endpoint_slice - key: net.host.name value: stringValue: 172.18.0.2 @@ -21948,14 +21957,8 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 10 attributes: - - key: code - value: - stringValue: "403" - - key: host - value: - stringValue: 172.18.0.2:6443 - key: host.name value: stringValue: kind-control-plane @@ -21976,10 +21979,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: method + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name value: - stringValue: GET + stringValue: endpoint_slice_mirroring - key: net.host.name value: stringValue: 172.18.0.2 @@ -22005,14 +22008,8 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 35 + - asDouble: 0 attributes: - - key: code - value: - stringValue: "404" - - key: host - value: - stringValue: 172.18.0.2:6443 - key: host.name value: stringValue: kind-control-plane @@ -22033,10 +22030,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: method + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name value: - stringValue: GET + stringValue: ephemeral_volume - key: net.host.name value: stringValue: 172.18.0.2 @@ -22062,14 +22059,8 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 13 + - asDouble: 6 attributes: - - key: code - value: - stringValue: "409" - - key: host - value: - stringValue: 172.18.0.2:6443 - key: host.name value: stringValue: kind-control-plane @@ -22090,10 +22081,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: method + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name value: - stringValue: PUT + stringValue: garbage_collector_attempt_to_delete - key: net.host.name value: stringValue: 172.18.0.2 @@ -22119,11 +22110,6 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: go_godebug_non_default_behavior_randautoseed_events_total - sum: - aggregationTemporality: 2 - dataPoints: - asDouble: 0 attributes: - key: host.name @@ -22146,7 +22132,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: garbage_collector_attempt_to_orphan - key: net.host.name value: stringValue: 172.18.0.2 @@ -22172,10 +22161,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: - dataPoints: - - asDouble: 1.504815e+06 + - asDouble: 676 attributes: - key: host.name value: @@ -22197,7 +22183,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: garbage_collector_graph_changes - key: net.host.name value: stringValue: 172.18.0.2 @@ -22223,10 +22212,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memory_classes_profiling_buckets_bytes - - gauge: - dataPoints: - - asDouble: 2602 + - asDouble: 0 attributes: - key: host.name value: @@ -22248,7 +22234,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: horizontalpodautoscaler - key: net.host.name value: stringValue: 172.18.0.2 @@ -22274,10 +22263,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: scrape_samples_post_metric_relabeling - - gauge: - dataPoints: - - asDouble: 3 + - asDouble: 0 attributes: - key: host.name value: @@ -22299,7 +22285,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: job - key: net.host.name value: stringValue: 172.18.0.2 @@ -22325,11 +22314,6 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: endpoint_slice_controller_desired_endpoint_slices - - name: go_cgo_go_to_c_calls_calls_total - sum: - aggregationTemporality: 2 - dataPoints: - asDouble: 0 attributes: - key: host.name @@ -22352,7 +22336,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: job_orphan_pod - key: net.host.name value: stringValue: 172.18.0.2 @@ -22378,12 +22365,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: process_cpu_seconds_total - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 2.59 + - asDouble: 0 attributes: - key: host.name value: @@ -22405,7 +22387,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: namespace - key: net.host.name value: stringValue: 172.18.0.2 @@ -22431,12 +22416,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: go_gc_heap_frees_objects_total - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 502452 + - asDouble: 1 attributes: - key: host.name value: @@ -22458,7 +22438,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: node - key: net.host.name value: stringValue: 172.18.0.2 @@ -22484,16 +22467,8 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: node_ipam_controller_cidrset_cidrs_allocations_total - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 1 + - asDouble: 8 attributes: - - key: clusterCIDR - value: - stringValue: 10.244.0.0/16 - key: host.name value: stringValue: kind-control-plane @@ -22514,7 +22489,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: node_lifecycle_controller - key: net.host.name value: stringValue: 172.18.0.2 @@ -22540,10 +22518,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: - dataPoints: - - asDouble: 1.17448704e+08 + - asDouble: 13 attributes: - key: host.name value: @@ -22565,7 +22540,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: node_lifecycle_controller_pods - key: net.host.name value: stringValue: 172.18.0.2 @@ -22591,10 +22569,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: process_resident_memory_bytes - - gauge: - dataPoints: - - asDouble: Infinity + - asDouble: 1 attributes: - key: host.name value: @@ -22616,7 +22591,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: noexec_taint_node - key: net.host.name value: stringValue: 172.18.0.2 @@ -22642,10 +22620,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: rest_client_exec_plugin_ttl_seconds - - gauge: - dataPoints: - - asDouble: 6.258688e+06 + - asDouble: 22 attributes: - key: host.name value: @@ -22667,7 +22642,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: noexec_taint_pod - key: net.host.name value: stringValue: 172.18.0.2 @@ -22693,10 +22671,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memory_classes_heap_stacks_bytes - - gauge: - dataPoints: - - asDouble: 4800 + - asDouble: 0 attributes: - key: host.name value: @@ -22718,7 +22693,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: orphaned_pods_nodes - key: net.host.name value: stringValue: 172.18.0.2 @@ -22744,10 +22722,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memory_classes_metadata_mcache_inuse_bytes - - gauge: - dataPoints: - - asDouble: 18 + - asDouble: 0 attributes: - key: host.name value: @@ -22769,7 +22744,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: pvcprotection - key: net.host.name value: stringValue: 172.18.0.2 @@ -22795,12 +22773,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: process_open_fds - - name: endpoint_slice_controller_syncs - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 19 + - asDouble: 0 attributes: - key: host.name value: @@ -22822,7 +22795,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: pvcs - key: net.host.name value: stringValue: 172.18.0.2 @@ -22832,9 +22808,6 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: result - value: - stringValue: success - key: server.address value: stringValue: 172.18.0.2 @@ -22849,13 +22822,8 @@ resourceMetrics: stringValue: kube-controller-manager - key: url.scheme value: - stringValue: https - timeUnixNano: "1000000" - isMonotonic: true - - name: go_godebug_non_default_behavior_tls10server_events_total - sum: - aggregationTemporality: 2 - dataPoints: + stringValue: https + timeUnixNano: "1000000" - asDouble: 0 attributes: - key: host.name @@ -22878,7 +22846,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: pvprotection - key: net.host.name value: stringValue: 172.18.0.2 @@ -22904,10 +22875,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: - dataPoints: - - asDouble: 2602 + - asDouble: 39 attributes: - key: host.name value: @@ -22929,7 +22897,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: replicaset - key: net.host.name value: stringValue: 172.18.0.2 @@ -22955,11 +22926,6 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: scrape_series_added - - name: go_godebug_non_default_behavior_tarinsecurepath_events_total - sum: - aggregationTemporality: 2 - dataPoints: - asDouble: 0 attributes: - key: host.name @@ -22982,7 +22948,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: replicationmanager - key: net.host.name value: stringValue: 172.18.0.2 @@ -23008,11 +22977,6 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: go_godebug_non_default_behavior_http2server_events_total - sum: - aggregationTemporality: 2 - dataPoints: - asDouble: 0 attributes: - key: host.name @@ -23035,7 +22999,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: resource_quota_controller_resource_changes - key: net.host.name value: stringValue: 172.18.0.2 @@ -23061,11 +23028,6 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: go_godebug_non_default_behavior_installgoroot_events_total - sum: - aggregationTemporality: 2 - dataPoints: - asDouble: 0 attributes: - key: host.name @@ -23088,7 +23050,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: resourcequota_primary - key: net.host.name value: stringValue: 172.18.0.2 @@ -23114,9 +23079,6 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: - dataPoints: - asDouble: 0 attributes: - key: host.name @@ -23139,7 +23101,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: resourcequota_priority - key: net.host.name value: stringValue: 172.18.0.2 @@ -23165,10 +23130,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memory_classes_os_stacks_bytes - - gauge: - dataPoints: - - asDouble: 1.0051584e+07 + - asDouble: 6 attributes: - key: host.name value: @@ -23190,7 +23152,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: root_ca_cert_publisher - key: net.host.name value: stringValue: 172.18.0.2 @@ -23216,10 +23181,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memstats_heap_idle_bytes - - gauge: - dataPoints: - - asDouble: 636480 + - asDouble: 0 attributes: - key: host.name value: @@ -23241,7 +23203,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: service - key: net.host.name value: stringValue: 172.18.0.2 @@ -23267,10 +23232,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memstats_mspan_sys_bytes - - gauge: - dataPoints: - - asDouble: 1221 + - asDouble: 6 attributes: - key: host.name value: @@ -23292,7 +23254,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: serviceaccount - key: net.host.name value: stringValue: 172.18.0.2 @@ -23318,12 +23283,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_sched_goroutines_goroutines - - name: go_gc_heap_allocs_objects_total - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 623672 + - asDouble: 0 attributes: - key: host.name value: @@ -23345,7 +23305,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: serviceaccount_tokens_secret - key: net.host.name value: stringValue: 172.18.0.2 @@ -23371,10 +23334,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: - dataPoints: - - asDouble: 1.009921e+06 + - asDouble: 48 attributes: - key: host.name value: @@ -23396,7 +23356,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: serviceaccount_tokens_service - key: net.host.name value: stringValue: 172.18.0.2 @@ -23422,10 +23385,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memory_classes_other_bytes - - gauge: - dataPoints: - - asDouble: 5.7627912e+07 + - asDouble: 0 attributes: - key: host.name value: @@ -23447,7 +23407,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: stale_pod_disruption - key: net.host.name value: stringValue: 172.18.0.2 @@ -23473,10 +23436,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memory_classes_total_bytes - - gauge: - dataPoints: - - asDouble: 531680 + - asDouble: 0 attributes: - key: host.name value: @@ -23498,7 +23458,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: statefulset - key: net.host.name value: stringValue: 172.18.0.2 @@ -23524,10 +23487,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memory_classes_metadata_mspan_inuse_bytes - - gauge: - dataPoints: - - asDouble: 1.8446744073709552e+19 + - asDouble: 1 attributes: - key: host.name value: @@ -23549,7 +23509,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: token_cleaner - key: net.host.name value: stringValue: 172.18.0.2 @@ -23575,11 +23538,6 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: process_virtual_memory_max_bytes - - name: garbagecollector_controller_resources_sync_error_total - sum: - aggregationTemporality: 2 - dataPoints: - asDouble: 0 attributes: - key: host.name @@ -23602,7 +23560,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: ttl_jobs_to_delete - key: net.host.name value: stringValue: 172.18.0.2 @@ -23628,10 +23589,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: - dataPoints: - - asDouble: 4.407296e+07 + - asDouble: 8 attributes: - key: host.name value: @@ -23653,7 +23611,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: ttlcontroller - key: net.host.name value: stringValue: 172.18.0.2 @@ -23679,11 +23640,6 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memstats_heap_sys_bytes - - name: go_gc_cycles_forced_gc_cycles_total - sum: - aggregationTemporality: 2 - dataPoints: - asDouble: 0 attributes: - key: host.name @@ -23706,7 +23662,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: validatingadmissionpolicy-status - key: net.host.name value: stringValue: 172.18.0.2 @@ -23732,11 +23691,6 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: apiserver_webhooks_x509_missing_san_total - sum: - aggregationTemporality: 2 - dataPoints: - asDouble: 0 attributes: - key: host.name @@ -23759,7 +23713,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: volume_expand - key: net.host.name value: stringValue: 172.18.0.2 @@ -23785,9 +23742,6 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: - dataPoints: - asDouble: 0 attributes: - key: host.name @@ -23810,7 +23764,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: volumes - key: net.host.name value: stringValue: 172.18.0.2 @@ -23832,13 +23789,15 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: status - value: - stringValue: blocked - key: url.scheme value: stringValue: https timeUnixNano: "1000000" + isMonotonic: true + - name: apiserver_storage_envelope_transformation_cache_misses_total + sum: + aggregationTemporality: 2 + dataPoints: - asDouble: 0 attributes: - key: host.name @@ -23861,7 +23820,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -23883,19 +23842,14 @@ resourceMetrics: - key: service.name value: stringValue: kube-controller-manager - - key: status - value: - stringValue: in_flight - key: url.scheme value: stringValue: https timeUnixNano: "1000000" - name: authentication_token_cache_active_fetch_count - - name: cardinality_enforcement_unexpected_categorizations_total - sum: - aggregationTemporality: 2 + isMonotonic: true + - gauge: dataPoints: - - asDouble: 0 + - asDouble: 1.8446744073709552e+19 attributes: - key: host.name value: @@ -23917,7 +23871,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -23943,10 +23897,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: + name: process_virtual_memory_max_bytes + - name: go_godebug_non_default_behavior_zipinsecurepath_events_total + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 9.223372036854776e+18 + - asDouble: 0 attributes: - key: host.name value: @@ -23968,7 +23924,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -23994,12 +23950,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_gc_gomemlimit_bytes - - name: go_godebug_non_default_behavior_multipartmaxparts_events_total - sum: - aggregationTemporality: 2 + isMonotonic: true + - gauge: dataPoints: - - asDouble: 0 + - asDouble: 6.1560072e+07 attributes: - key: host.name value: @@ -24021,7 +23975,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -24047,10 +24001,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true + name: go_memory_classes_total_bytes - gauge: dataPoints: - - asDouble: 4.129448e+06 + - asDouble: 100 attributes: - key: host.name value: @@ -24072,7 +24026,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -24098,12 +24052,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memstats_gc_sys_bytes - - name: go_memstats_mallocs_total - sum: - aggregationTemporality: 2 + name: go_gc_gogc_percent + - gauge: dataPoints: - - asDouble: 675261 + - asDouble: 3 attributes: - key: host.name value: @@ -24125,7 +24077,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -24151,12 +24103,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: go_cpu_classes_scavenge_assist_cpu_seconds_total - sum: - aggregationTemporality: 2 + name: endpoint_slice_controller_num_endpoint_slices + - gauge: dataPoints: - - asDouble: 1.7e-07 + - asDouble: 1221 attributes: - key: host.name value: @@ -24178,7 +24128,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -24204,12 +24154,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: go_gc_duration_seconds_count - sum: - aggregationTemporality: 2 + name: go_goroutines + - gauge: dataPoints: - - asInt: "13" + - asDouble: 2.509844e+07 attributes: - key: host.name value: @@ -24231,7 +24179,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -24257,12 +24205,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: go_gc_duration_seconds_sum + name: go_memstats_alloc_bytes + - name: retroactive_storageclass_total sum: aggregationTemporality: 2 dataPoints: - - asDouble: 0.00203574 + - asDouble: 0 attributes: - key: host.name value: @@ -24284,7 +24232,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -24311,9 +24259,11 @@ resourceMetrics: stringValue: https timeUnixNano: "1000000" isMonotonic: true - - gauge: + - name: authenticated_user_requests + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 3.0207e-05 + - asDouble: 16 attributes: - key: host.name value: @@ -24335,7 +24285,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -24345,9 +24295,6 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: quantile - value: - stringValue: "0" - key: server.address value: stringValue: 172.18.0.2 @@ -24363,8 +24310,16 @@ resourceMetrics: - key: url.scheme value: stringValue: https + - key: username + value: + stringValue: other timeUnixNano: "1000000" - - asDouble: 8.9408e-05 + isMonotonic: true + - name: go_cpu_classes_user_cpu_seconds_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 2.544796341 attributes: - key: host.name value: @@ -24386,7 +24341,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -24396,9 +24351,6 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: quantile - value: - stringValue: "0.25" - key: server.address value: stringValue: 172.18.0.2 @@ -24415,7 +24367,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0.000118682 + isMonotonic: true + - name: go_godebug_non_default_behavior_multipathtcp_events_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 attributes: - key: host.name value: @@ -24437,7 +24394,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -24447,9 +24404,6 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: quantile - value: - stringValue: "0.5" - key: server.address value: stringValue: 172.18.0.2 @@ -24466,7 +24420,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0.000195958 + isMonotonic: true + - gauge: + dataPoints: + - asDouble: 5.1061512e+07 attributes: - key: host.name value: @@ -24488,7 +24445,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -24498,9 +24455,6 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: quantile - value: - stringValue: "0.75" - key: server.address value: stringValue: 172.18.0.2 @@ -24517,7 +24471,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0.000556802 + name: go_memstats_next_gc_bytes + - name: go_cgo_go_to_c_calls_calls_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 0 attributes: - key: host.name value: @@ -24539,7 +24498,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -24549,9 +24508,6 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: quantile - value: - stringValue: "1" - key: server.address value: stringValue: 172.18.0.2 @@ -24568,12 +24524,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_gc_duration_seconds_quantile - - name: go_godebug_non_default_behavior_x509usepolicies_events_total - sum: - aggregationTemporality: 2 + isMonotonic: true + - gauge: dataPoints: - - asDouble: 0 + - asDouble: 4 attributes: - key: host.name value: @@ -24595,7 +24549,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -24621,12 +24575,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: go_cpu_classes_user_cpu_seconds_total + name: endpoint_slice_controller_endpoints_desired + - name: go_cpu_classes_gc_mark_dedicated_cpu_seconds_total sum: aggregationTemporality: 2 dataPoints: - - asDouble: 2.735218013 + - asDouble: 0.069203205 attributes: - key: host.name value: @@ -24648,7 +24602,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -24675,9 +24629,11 @@ resourceMetrics: stringValue: https timeUnixNano: "1000000" isMonotonic: true - - gauge: + - name: go_cpu_classes_total_cpu_seconds_total + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 2.507076e+07 + - asDouble: 555.996617088 attributes: - key: host.name value: @@ -24699,7 +24655,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -24725,12 +24681,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memstats_alloc_bytes - - name: go_godebug_non_default_behavior_tlsunsafeekm_events_total - sum: - aggregationTemporality: 2 + isMonotonic: true + - gauge: dataPoints: - - asDouble: 0 + - asDouble: 4.12528e+06 attributes: - key: host.name value: @@ -24752,7 +24706,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -24778,13 +24732,14 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: authorization_attempts_total - sum: - aggregationTemporality: 2 + name: go_memstats_gc_sys_bytes + - gauge: dataPoints: - - asDouble: 14 + - asDouble: 0.00390625 attributes: + - key: clusterCIDR + value: + stringValue: 10.244.0.0/16 - key: host.name value: stringValue: kind-control-plane @@ -24805,7 +24760,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -24815,9 +24770,6 @@ resourceMetrics: - key: os.type value: stringValue: linux - - key: result - value: - stringValue: allowed - key: server.address value: stringValue: 172.18.0.2 @@ -24834,10 +24786,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: + name: node_ipam_controller_cidrset_usage_cidrs + - name: ephemeral_volume_controller_create_total + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 3 + - asDouble: 0 attributes: - key: host.name value: @@ -24859,7 +24813,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -24885,10 +24839,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: endpoint_slice_controller_num_endpoint_slices - - gauge: + isMonotonic: true + - name: go_cpu_classes_gc_mark_assist_cpu_seconds_total + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 10800 + - asDouble: 0.003772846 attributes: - key: host.name value: @@ -24910,7 +24866,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -24936,10 +24892,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memory_classes_metadata_mcache_free_bytes - - gauge: + isMonotonic: true + - name: go_cpu_classes_gc_mark_idle_cpu_seconds_total + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 15600 + - asDouble: 0.138028727 attributes: - key: host.name value: @@ -24961,7 +24919,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -24987,10 +24945,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memstats_mcache_sys_bytes + isMonotonic: true - gauge: dataPoints: - - asDouble: 3 + - asDouble: 4800 attributes: - key: host.name value: @@ -25012,7 +24970,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -25038,8 +24996,8 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: rest_client_transport_cache_entries - - name: go_godebug_non_default_behavior_x509usefallbackroots_events_total + name: go_memory_classes_metadata_mcache_inuse_bytes + - name: apiserver_storage_data_key_generation_failures_total sum: aggregationTemporality: 2 dataPoints: @@ -25065,7 +25023,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -25092,9 +25050,11 @@ resourceMetrics: stringValue: https timeUnixNano: "1000000" isMonotonic: true - - gauge: + - name: ephemeral_volume_controller_create_failures_total + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 360448 + - asDouble: 0 attributes: - key: host.name value: @@ -25116,7 +25076,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -25142,10 +25102,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memory_classes_heap_released_bytes - - gauge: + isMonotonic: true + - name: go_cpu_classes_gc_total_cpu_seconds_total + sum: + aggregationTemporality: 2 dataPoints: - - asDouble: 4.129448e+06 + - asDouble: 0.220461962 attributes: - key: host.name value: @@ -25167,7 +25129,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -25193,12 +25155,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_memory_classes_metadata_other_bytes - - name: endpoint_slice_controller_changes + isMonotonic: true + - name: go_godebug_non_default_behavior_installgoroot_events_total sum: aggregationTemporality: 2 dataPoints: - - asDouble: 3 + - asDouble: 0 attributes: - key: host.name value: @@ -25220,16 +25182,13 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 - key: net.host.port value: stringValue: "10257" - - key: operation - value: - stringValue: create - key: os.type value: stringValue: linux @@ -25249,7 +25208,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 6 + isMonotonic: true + - gauge: + dataPoints: + - asDouble: 1.0469376e+07 attributes: - key: host.name value: @@ -25271,16 +25233,13 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 - key: net.host.port value: stringValue: "10257" - - key: operation - value: - stringValue: update - key: os.type value: stringValue: linux @@ -25300,12 +25259,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: go_cpu_classes_gc_mark_dedicated_cpu_seconds_total - sum: - aggregationTemporality: 2 + name: go_memory_classes_heap_free_bytes + - gauge: dataPoints: - - asDouble: 0.064445585 + - asDouble: 0 attributes: - key: host.name value: @@ -25327,7 +25284,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: ClusterRoleAggregator - key: net.host.name value: stringValue: 172.18.0.2 @@ -25353,10 +25313,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: - dataPoints: - - asDouble: 121220 + - asDouble: 0 attributes: - key: host.name value: @@ -25378,7 +25335,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: DynamicCABundle-client-ca-bundle - key: net.host.name value: stringValue: 172.18.0.2 @@ -25404,16 +25364,8 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_gc_heap_objects_objects - - name: apiserver_delegated_authn_request_total - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 1 + - asDouble: 0 attributes: - - key: code - value: - stringValue: "201" - key: host.name value: stringValue: kind-control-plane @@ -25434,7 +25386,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: DynamicCABundle-csr-controller - key: net.host.name value: stringValue: 172.18.0.2 @@ -25460,12 +25415,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: go_cpu_classes_gc_pause_cpu_seconds_total - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 0.008165628 + - asDouble: 0 attributes: - key: host.name value: @@ -25487,7 +25437,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: DynamicCABundle-request-header - key: net.host.name value: stringValue: 172.18.0.2 @@ -25513,9 +25466,6 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: - dataPoints: - asDouble: 0 attributes: - key: host.name @@ -25538,10 +25488,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ClusterRoleAggregator + stringValue: DynamicServingCertificateController - key: net.host.name value: stringValue: 172.18.0.2 @@ -25589,10 +25539,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: DynamicCABundle-client-ca-bundle + stringValue: bootstrap_signer_queue - key: net.host.name value: stringValue: 172.18.0.2 @@ -25640,10 +25590,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: DynamicCABundle-csr-controller + stringValue: certificate - key: net.host.name value: stringValue: 172.18.0.2 @@ -25691,10 +25641,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: DynamicCABundle-request-header + stringValue: claims - key: net.host.name value: stringValue: 172.18.0.2 @@ -25742,10 +25692,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: DynamicServingCertificateController + stringValue: cronjob - key: net.host.name value: stringValue: 172.18.0.2 @@ -25793,10 +25743,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: bootstrap_signer_queue + stringValue: daemonset - key: net.host.name value: stringValue: 172.18.0.2 @@ -25844,10 +25794,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: certificate + stringValue: deployment - key: net.host.name value: stringValue: 172.18.0.2 @@ -25895,10 +25845,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: claims + stringValue: disruption - key: net.host.name value: stringValue: 172.18.0.2 @@ -25946,10 +25896,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: cronjob + stringValue: disruption_recheck - key: net.host.name value: stringValue: 172.18.0.2 @@ -25997,10 +25947,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: daemonset + stringValue: endpoint - key: net.host.name value: stringValue: 172.18.0.2 @@ -26048,10 +25998,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: deployment + stringValue: endpoint_slice - key: net.host.name value: stringValue: 172.18.0.2 @@ -26099,10 +26049,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: disruption + stringValue: endpoint_slice_mirroring - key: net.host.name value: stringValue: 172.18.0.2 @@ -26150,10 +26100,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: disruption_recheck + stringValue: ephemeral_volume - key: net.host.name value: stringValue: 172.18.0.2 @@ -26201,10 +26151,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: endpoint + stringValue: garbage_collector_attempt_to_delete - key: net.host.name value: stringValue: 172.18.0.2 @@ -26252,10 +26202,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: endpoint_slice + stringValue: garbage_collector_attempt_to_orphan - key: net.host.name value: stringValue: 172.18.0.2 @@ -26303,10 +26253,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: endpoint_slice_mirroring + stringValue: garbage_collector_graph_changes - key: net.host.name value: stringValue: 172.18.0.2 @@ -26354,10 +26304,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ephemeral_volume + stringValue: horizontalpodautoscaler - key: net.host.name value: stringValue: 172.18.0.2 @@ -26405,10 +26355,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: garbage_collector_attempt_to_delete + stringValue: job - key: net.host.name value: stringValue: 172.18.0.2 @@ -26456,10 +26406,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: garbage_collector_attempt_to_orphan + stringValue: job_orphan_pod - key: net.host.name value: stringValue: 172.18.0.2 @@ -26507,10 +26457,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: garbage_collector_graph_changes + stringValue: namespace - key: net.host.name value: stringValue: 172.18.0.2 @@ -26558,10 +26508,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: horizontalpodautoscaler + stringValue: node - key: net.host.name value: stringValue: 172.18.0.2 @@ -26609,10 +26559,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: job + stringValue: node_lifecycle_controller - key: net.host.name value: stringValue: 172.18.0.2 @@ -26660,10 +26610,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: job_orphan_pod + stringValue: node_lifecycle_controller_pods - key: net.host.name value: stringValue: 172.18.0.2 @@ -26711,10 +26661,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: namespace + stringValue: noexec_taint_node - key: net.host.name value: stringValue: 172.18.0.2 @@ -26740,7 +26690,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 1 + - asDouble: 0 attributes: - key: host.name value: @@ -26762,10 +26712,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: node + stringValue: noexec_taint_pod - key: net.host.name value: stringValue: 172.18.0.2 @@ -26813,10 +26763,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: node_lifecycle_controller + stringValue: orphaned_pods_nodes - key: net.host.name value: stringValue: 172.18.0.2 @@ -26864,10 +26814,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: node_lifecycle_controller_pods + stringValue: pvcprotection - key: net.host.name value: stringValue: 172.18.0.2 @@ -26915,10 +26865,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: noexec_taint_node + stringValue: pvcs - key: net.host.name value: stringValue: 172.18.0.2 @@ -26966,10 +26916,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: noexec_taint_pod + stringValue: pvprotection - key: net.host.name value: stringValue: 172.18.0.2 @@ -27017,10 +26967,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: orphaned_pods_nodes + stringValue: replicaset - key: net.host.name value: stringValue: 172.18.0.2 @@ -27068,10 +27018,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: pvcprotection + stringValue: replicationmanager - key: net.host.name value: stringValue: 172.18.0.2 @@ -27119,10 +27069,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: pvcs + stringValue: resource_quota_controller_resource_changes - key: net.host.name value: stringValue: 172.18.0.2 @@ -27170,10 +27120,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: pvprotection + stringValue: resourcequota_primary - key: net.host.name value: stringValue: 172.18.0.2 @@ -27221,10 +27171,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: replicaset + stringValue: resourcequota_priority - key: net.host.name value: stringValue: 172.18.0.2 @@ -27272,10 +27222,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: replicationmanager + stringValue: root_ca_cert_publisher - key: net.host.name value: stringValue: 172.18.0.2 @@ -27323,10 +27273,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: resource_quota_controller_resource_changes + stringValue: service - key: net.host.name value: stringValue: 172.18.0.2 @@ -27374,10 +27324,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: resourcequota_primary + stringValue: serviceaccount - key: net.host.name value: stringValue: 172.18.0.2 @@ -27425,10 +27375,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: resourcequota_priority + stringValue: serviceaccount_tokens_secret - key: net.host.name value: stringValue: 172.18.0.2 @@ -27476,10 +27426,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: root_ca_cert_publisher + stringValue: serviceaccount_tokens_service - key: net.host.name value: stringValue: 172.18.0.2 @@ -27527,10 +27477,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: service + stringValue: stale_pod_disruption - key: net.host.name value: stringValue: 172.18.0.2 @@ -27578,10 +27528,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: serviceaccount + stringValue: statefulset - key: net.host.name value: stringValue: 172.18.0.2 @@ -27629,10 +27579,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: serviceaccount_tokens_secret + stringValue: token_cleaner - key: net.host.name value: stringValue: 172.18.0.2 @@ -27680,10 +27630,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: serviceaccount_tokens_service + stringValue: ttl_jobs_to_delete - key: net.host.name value: stringValue: 172.18.0.2 @@ -27731,10 +27681,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: stale_pod_disruption + stringValue: ttlcontroller - key: net.host.name value: stringValue: 172.18.0.2 @@ -27782,10 +27732,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: statefulset + stringValue: validatingadmissionpolicy-status - key: net.host.name value: stringValue: 172.18.0.2 @@ -27833,10 +27783,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: token_cleaner + stringValue: volume_expand - key: net.host.name value: stringValue: 172.18.0.2 @@ -27884,10 +27834,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ttl_jobs_to_delete + stringValue: volumes - key: net.host.name value: stringValue: 172.18.0.2 @@ -27913,8 +27863,19 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + name: workqueue_longest_running_processor_seconds + - name: rest_client_requests_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 216 attributes: + - key: code + value: + stringValue: "200" + - key: host + value: + stringValue: 172.18.0.2:6443 - key: host.name value: stringValue: kind-control-plane @@ -27935,10 +27896,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: method value: - stringValue: ttlcontroller + stringValue: GET - key: net.host.name value: stringValue: 172.18.0.2 @@ -27964,8 +27925,14 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 15 attributes: + - key: code + value: + stringValue: "200" + - key: host + value: + stringValue: 172.18.0.2:6443 - key: host.name value: stringValue: kind-control-plane @@ -27986,10 +27953,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: method value: - stringValue: validatingadmissionpolicy-status + stringValue: PATCH - key: net.host.name value: stringValue: 172.18.0.2 @@ -28015,8 +27982,14 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 140 attributes: + - key: code + value: + stringValue: "200" + - key: host + value: + stringValue: 172.18.0.2:6443 - key: host.name value: stringValue: kind-control-plane @@ -28037,10 +28010,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: method value: - stringValue: volume_expand + stringValue: PUT - key: net.host.name value: stringValue: 172.18.0.2 @@ -28066,8 +28039,14 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 104 attributes: + - key: code + value: + stringValue: "201" + - key: host + value: + stringValue: 172.18.0.2:6443 - key: host.name value: stringValue: kind-control-plane @@ -28088,10 +28067,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: method value: - stringValue: volumes + stringValue: POST - key: net.host.name value: stringValue: 172.18.0.2 @@ -28117,11 +28096,14 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: workqueue_depth - - gauge: - dataPoints: - - asDouble: 0 + - asDouble: 1 attributes: + - key: code + value: + stringValue: "403" + - key: host + value: + stringValue: 172.18.0.2:6443 - key: host.name value: stringValue: kind-control-plane @@ -28142,10 +28124,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: method value: - stringValue: ClusterRoleAggregator + stringValue: GET - key: net.host.name value: stringValue: 172.18.0.2 @@ -28171,8 +28153,14 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 35 attributes: + - key: code + value: + stringValue: "404" + - key: host + value: + stringValue: 172.18.0.2:6443 - key: host.name value: stringValue: kind-control-plane @@ -28193,10 +28181,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: method value: - stringValue: DynamicCABundle-client-ca-bundle + stringValue: GET - key: net.host.name value: stringValue: 172.18.0.2 @@ -28222,8 +28210,14 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 9 attributes: + - key: code + value: + stringValue: "409" + - key: host + value: + stringValue: 172.18.0.2:6443 - key: host.name value: stringValue: kind-control-plane @@ -28244,10 +28238,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: method value: - stringValue: DynamicCABundle-csr-controller + stringValue: PUT - key: net.host.name value: stringValue: 172.18.0.2 @@ -28273,7 +28267,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + isMonotonic: true + - gauge: + dataPoints: + - asDouble: 2602 attributes: - key: host.name value: @@ -28295,10 +28292,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: DynamicCABundle-request-header + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -28324,7 +28318,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + name: scrape_series_added + - gauge: + dataPoints: + - asDouble: 1.3574144e+07 attributes: - key: host.name value: @@ -28346,10 +28343,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: DynamicServingCertificateController + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -28375,6 +28369,11 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" + name: go_memstats_heap_idle_bytes + - name: go_godebug_non_default_behavior_gocachehash_events_total + sum: + aggregationTemporality: 2 + dataPoints: - asDouble: 0 attributes: - key: host.name @@ -28397,10 +28396,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: bootstrap_signer_queue + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -28426,7 +28422,12 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + isMonotonic: true + - name: go_memstats_mallocs_total + sum: + aggregationTemporality: 2 + dataPoints: + - asDouble: 684042 attributes: - key: host.name value: @@ -28448,10 +28449,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: certificate + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -28477,7 +28475,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + isMonotonic: true + - gauge: + dataPoints: + - asDouble: 4 attributes: - key: host.name value: @@ -28499,10 +28500,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: claims + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -28528,7 +28526,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + name: go_sched_gomaxprocs_threads + - gauge: + dataPoints: + - asDouble: 1 attributes: - key: host.name value: @@ -28550,10 +28551,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: cronjob + stringValue: kube-controller-manager - key: net.host.name value: stringValue: 172.18.0.2 @@ -28579,6 +28580,11 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" + name: leader_election_master_status + - name: retroactive_storageclass_errors_total + sum: + aggregationTemporality: 2 + dataPoints: - asDouble: 0 attributes: - key: host.name @@ -28601,10 +28607,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: daemonset + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -28630,7 +28633,10 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + isMonotonic: true + - gauge: + dataPoints: + - asDouble: 3 attributes: - key: host.name value: @@ -28652,10 +28658,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 - - key: name - value: - stringValue: deployment + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: net.host.name value: stringValue: 172.18.0.2 @@ -28681,6 +28684,11 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" + name: endpoint_slice_controller_desired_endpoint_slices + - name: workqueue_retries_total + sum: + aggregationTemporality: 2 + dataPoints: - asDouble: 0 attributes: - key: host.name @@ -28703,10 +28711,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: disruption + stringValue: ClusterRoleAggregator - key: net.host.name value: stringValue: 172.18.0.2 @@ -28754,10 +28762,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: disruption_recheck + stringValue: DynamicCABundle-client-ca-bundle - key: net.host.name value: stringValue: 172.18.0.2 @@ -28805,10 +28813,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: endpoint + stringValue: DynamicCABundle-csr-controller - key: net.host.name value: stringValue: 172.18.0.2 @@ -28856,10 +28864,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: endpoint_slice + stringValue: DynamicCABundle-request-header - key: net.host.name value: stringValue: 172.18.0.2 @@ -28907,10 +28915,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: endpoint_slice_mirroring + stringValue: DynamicServingCertificateController - key: net.host.name value: stringValue: 172.18.0.2 @@ -28958,10 +28966,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ephemeral_volume + stringValue: bootstrap_signer_queue - key: net.host.name value: stringValue: 172.18.0.2 @@ -29009,10 +29017,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: garbage_collector_attempt_to_delete + stringValue: certificate - key: net.host.name value: stringValue: 172.18.0.2 @@ -29060,10 +29068,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: garbage_collector_attempt_to_orphan + stringValue: cronjob - key: net.host.name value: stringValue: 172.18.0.2 @@ -29111,10 +29119,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: garbage_collector_graph_changes + stringValue: daemonset - key: net.host.name value: stringValue: 172.18.0.2 @@ -29140,7 +29148,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 17 attributes: - key: host.name value: @@ -29162,10 +29170,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: horizontalpodautoscaler + stringValue: deployment - key: net.host.name value: stringValue: 172.18.0.2 @@ -29213,10 +29221,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: job + stringValue: disruption - key: net.host.name value: stringValue: 172.18.0.2 @@ -29264,10 +29272,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: job_orphan_pod + stringValue: disruption_recheck - key: net.host.name value: stringValue: 172.18.0.2 @@ -29293,7 +29301,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 10 attributes: - key: host.name value: @@ -29315,10 +29323,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: namespace + stringValue: endpoint - key: net.host.name value: stringValue: 172.18.0.2 @@ -29344,7 +29352,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - - asDouble: 0 + - asDouble: 16 attributes: - key: host.name value: @@ -29366,10 +29374,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: node + stringValue: endpoint_slice - key: net.host.name value: stringValue: 172.18.0.2 @@ -29417,10 +29425,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: node_lifecycle_controller + stringValue: endpoint_slice_mirroring - key: net.host.name value: stringValue: 172.18.0.2 @@ -29468,10 +29476,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: node_lifecycle_controller_pods + stringValue: ephemeral_volume - key: net.host.name value: stringValue: 172.18.0.2 @@ -29519,10 +29527,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: noexec_taint_node + stringValue: garbage_collector_attempt_to_delete - key: net.host.name value: stringValue: 172.18.0.2 @@ -29570,10 +29578,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: noexec_taint_pod + stringValue: garbage_collector_attempt_to_orphan - key: net.host.name value: stringValue: 172.18.0.2 @@ -29621,10 +29629,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: orphaned_pods_nodes + stringValue: garbage_collector_graph_changes - key: net.host.name value: stringValue: 172.18.0.2 @@ -29672,10 +29680,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: pvcprotection + stringValue: horizontalpodautoscaler - key: net.host.name value: stringValue: 172.18.0.2 @@ -29723,10 +29731,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: pvcs + stringValue: job - key: net.host.name value: stringValue: 172.18.0.2 @@ -29774,10 +29782,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: pvprotection + stringValue: job_orphan_pod - key: net.host.name value: stringValue: 172.18.0.2 @@ -29825,10 +29833,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: replicaset + stringValue: namespace - key: net.host.name value: stringValue: 172.18.0.2 @@ -29876,10 +29884,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: replicationmanager + stringValue: node - key: net.host.name value: stringValue: 172.18.0.2 @@ -29927,10 +29935,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: resource_quota_controller_resource_changes + stringValue: node_lifecycle_controller_pods - key: net.host.name value: stringValue: 172.18.0.2 @@ -29978,10 +29986,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: resourcequota_primary + stringValue: orphaned_pods_nodes - key: net.host.name value: stringValue: 172.18.0.2 @@ -30029,10 +30037,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: resourcequota_priority + stringValue: pvcprotection - key: net.host.name value: stringValue: 172.18.0.2 @@ -30080,10 +30088,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: root_ca_cert_publisher + stringValue: pvcs - key: net.host.name value: stringValue: 172.18.0.2 @@ -30131,10 +30139,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: service + stringValue: pvprotection - key: net.host.name value: stringValue: 172.18.0.2 @@ -30182,10 +30190,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: serviceaccount + stringValue: replicaset - key: net.host.name value: stringValue: 172.18.0.2 @@ -30233,10 +30241,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: serviceaccount_tokens_secret + stringValue: replicationmanager - key: net.host.name value: stringValue: 172.18.0.2 @@ -30284,10 +30292,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: serviceaccount_tokens_service + stringValue: resource_quota_controller_resource_changes - key: net.host.name value: stringValue: 172.18.0.2 @@ -30335,10 +30343,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: stale_pod_disruption + stringValue: resourcequota_primary - key: net.host.name value: stringValue: 172.18.0.2 @@ -30386,10 +30394,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: statefulset + stringValue: resourcequota_priority - key: net.host.name value: stringValue: 172.18.0.2 @@ -30437,10 +30445,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: token_cleaner + stringValue: root_ca_cert_publisher - key: net.host.name value: stringValue: 172.18.0.2 @@ -30488,10 +30496,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ttl_jobs_to_delete + stringValue: service - key: net.host.name value: stringValue: 172.18.0.2 @@ -30539,10 +30547,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: ttlcontroller + stringValue: serviceaccount - key: net.host.name value: stringValue: 172.18.0.2 @@ -30590,10 +30598,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: validatingadmissionpolicy-status + stringValue: serviceaccount_tokens_secret - key: net.host.name value: stringValue: 172.18.0.2 @@ -30641,10 +30649,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: volume_expand + stringValue: serviceaccount_tokens_service - key: net.host.name value: stringValue: 172.18.0.2 @@ -30692,10 +30700,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 - key: name value: - stringValue: volumes + stringValue: stale_pod_disruption - key: net.host.name value: stringValue: 172.18.0.2 @@ -30721,11 +30729,6 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: workqueue_longest_running_processor_seconds - - name: node_collector_evictions_total - sum: - aggregationTemporality: 2 - dataPoints: - asDouble: 0 attributes: - key: host.name @@ -30748,7 +30751,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: statefulset - key: net.host.name value: stringValue: 172.18.0.2 @@ -30774,12 +30780,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: go_godebug_non_default_behavior_zipinsecurepath_events_total - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 0 + - asDouble: 1 attributes: - key: host.name value: @@ -30801,7 +30802,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: token_cleaner - key: net.host.name value: stringValue: 172.18.0.2 @@ -30827,9 +30831,6 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: - dataPoints: - asDouble: 0 attributes: - key: host.name @@ -30852,7 +30853,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: ttl_jobs_to_delete - key: net.host.name value: stringValue: 172.18.0.2 @@ -30878,11 +30882,6 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: node_collector_unhealthy_nodes_in_zone - - name: taint_eviction_controller_pod_deletions_total - sum: - aggregationTemporality: 2 - dataPoints: - asDouble: 0 attributes: - key: host.name @@ -30905,7 +30904,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: ttlcontroller - key: net.host.name value: stringValue: 172.18.0.2 @@ -30931,12 +30933,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - name: go_cpu_classes_idle_cpu_seconds_total - sum: - aggregationTemporality: 2 - dataPoints: - - asDouble: 488.222313961 + - asDouble: 0 attributes: - key: host.name value: @@ -30958,7 +30955,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: validatingadmissionpolicy-status - key: net.host.name value: stringValue: 172.18.0.2 @@ -30984,10 +30984,7 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - isMonotonic: true - - gauge: - dataPoints: - - asDouble: 810272 + - asDouble: 0 attributes: - key: host.name value: @@ -31009,7 +31006,10 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 990c636f-88b0-4e3f-b97c-a46e1e3e22b7 + stringValue: 514ab4db-aa19-4655-8675-a99861a6a362 + - key: name + value: + stringValue: volume_expand - key: net.host.name value: stringValue: 172.18.0.2 @@ -31035,5 +31035,5 @@ resourceMetrics: value: stringValue: https timeUnixNano: "1000000" - name: go_gc_scan_globals_bytes + isMonotonic: true scope: {} diff --git a/functional_tests/testdata_histogram/expected/v1.30/controller_manager_metrics.yaml b/functional_tests/testdata_histogram/expected/v1.30/controller_manager_metrics.yaml index 241a43e27..fab92726c 100644 --- a/functional_tests/testdata_histogram/expected/v1.30/controller_manager_metrics.yaml +++ b/functional_tests/testdata_histogram/expected/v1.30/controller_manager_metrics.yaml @@ -21,7 +21,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 8fa45e1f-80b6-40d3-a87d-5e7c6e9ff8e7 + stringValue: d842c515-08ca-4e80-bc2e-59c069ac609c - key: net.host.name value: stringValue: 172.18.0.2 @@ -49,23 +49,15 @@ resourceMetrics: schemaUrl: https://opentelemetry.io/schemas/1.6.1 scopeMetrics: - metrics: - - description: '[ALPHA] Number of namespace syncs happened in root ca cert publisher.' + - description: '[ALPHA] CEL compilation time in seconds.' histogram: aggregationTemporality: 2 dataPoints: - - attributes: - - key: code - value: - stringValue: "200" - bucketCounts: + - bucketCounts: - "0" - - "1" - - "2" - - "2" - "0" - "0" - "0" - - "1" - "0" - "0" - "0" @@ -74,112 +66,37 @@ resourceMetrics: - "0" - "0" - "0" - count: "6" - explicitBounds: - - 0.001 - - 0.002 - - 0.004 - - 0.008 - - 0.016 - - 0.032 - - 0.064 - - 0.128 - - 0.256 - - 0.512 - - 1.024 - - 2.048 - - 4.096 - - 8.192 - - 16.384 - startTimeUnixNano: "1000000" - sum: 0.087509688 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: root_ca_cert_publisher_sync_duration_seconds - - description: Distribution of heap allocations by approximate size. Bucket counts increase monotonically. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "15365" - - "261941" - - "168711" - - "101213" - - "64280" - - "20598" - - "5154" - - "2043" - - "2194" - - "513" - - "283" - - "188" - count: "642483" - explicitBounds: - - 8.999999999999998 - - 24.999999999999996 - - 64.99999999999999 - - 144.99999999999997 - - 320.99999999999994 - - 704.9999999999999 - - 1536.9999999999998 - - 3200.9999999999995 - - 6528.999999999999 - - 13568.999999999998 - - 27264.999999999996 - startTimeUnixNano: "1000000" - sum: 9.1403968e+07 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: go_gc_heap_allocs_by_size_bytes - - description: Distribution of freed heap allocations by approximate size. Bucket counts increase monotonically. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "10882" - - "222030" - - "127604" - - "82554" - - "54068" - - "10349" - - "4436" - - "1693" - - "2014" - - "345" - - "82" - - "83" - count: "516140" explicitBounds: - - 8.999999999999998 - - 24.999999999999996 - - 64.99999999999999 - - 144.99999999999997 - - 320.99999999999994 - - 704.9999999999999 - - 1536.9999999999998 - - 3200.9999999999995 - - 6528.999999999999 - - 13568.999999999998 - - 27264.999999999996 + - 0.005 + - 0.01 + - 0.025 + - 0.05 + - 0.1 + - 0.25 + - 0.5 + - 1 + - 2.5 + - 5 + - 10 startTimeUnixNano: "1000000" - sum: 6.5807368e+07 + sum: 0 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: go_gc_heap_frees_by_size_bytes - - description: '[ALPHA] The ratio of chosen deleted pod''s ages to the current youngest pod''s age (at the time). Should be <2. The intent of this metric is to measure the rough efficacy of the LogarithmicScaleDown feature gate''s effect on the sorting (and deletion) of pods when a replicaset scales down. This only considers Ready pods when calculating and reporting.' + name: apiserver_cel_compilation_duration_seconds + - description: '[ALPHA] ' histogram: aggregationTemporality: 2 dataPoints: - - bucketCounts: + - attributes: + - key: status + value: + stringValue: miss + bucketCounts: + - "1" + - "0" - "0" - "0" - "0" @@ -187,27 +104,36 @@ resourceMetrics: - "0" - "0" - "0" + - "0" + - "0" + - "0" + count: "1" explicitBounds: + - 0.005 + - 0.01 + - 0.025 + - 0.05 + - 0.1 - 0.25 - 0.5 - 1 - - 2 - - 4 - - 8 + - 2.5 + - 5 + - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.005 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: replicaset_controller_sorting_deletion_age_ratio - - description: '[ALPHA] Duration of syncEndpoints() in seconds' + name: authentication_token_cache_request_duration_seconds + - description: '[ALPHA] Number of endpoints removed on each Service sync' histogram: aggregationTemporality: 2 dataPoints: - bucketCounts: - - "10" + - "16" - "0" - "0" - "0" @@ -223,23 +149,23 @@ resourceMetrics: - "0" - "0" - "0" - count: "10" + count: "16" explicitBounds: - - 0.001 - - 0.002 - - 0.004 - - 0.008 - - 0.016 - - 0.032 - - 0.064 - - 0.128 - - 0.256 - - 0.512 - - 1.024 - - 2.048 - - 4.096 - - 8.192 - - 16.384 + - 2 + - 4 + - 8 + - 16 + - 32 + - 64 + - 128 + - 256 + - 512 + - 1024 + - 2048 + - 4096 + - 8192 + - 16384 + - 32768 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" @@ -247,14 +173,12 @@ resourceMetrics: - key: prometheus.type value: stringValue: histogram - name: endpoint_slice_mirroring_controller_endpoints_sync_duration - - description: '[ALPHA] Number of endpoints removed on each Service sync' + name: endpoint_slice_controller_endpoints_removed_per_sync + - description: '[STABLE] Time between when a cronjob is scheduled to be run, and when the corresponding job is created' histogram: aggregationTemporality: 2 dataPoints: - bucketCounts: - - "16" - - "0" - "0" - "0" - "0" @@ -266,11 +190,8 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "0" - - "0" - count: "16" explicitBounds: + - 1 - 2 - 4 - 8 @@ -280,12 +201,6 @@ resourceMetrics: - 128 - 256 - 512 - - 1024 - - 2048 - - 4096 - - 8192 - - 16384 - - 32768 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" @@ -293,29 +208,29 @@ resourceMetrics: - key: prometheus.type value: stringValue: histogram - name: endpoint_slice_controller_endpoints_removed_per_sync - - description: '[ALPHA] ' + name: cronjob_controller_job_creation_skew_duration_seconds + - description: '[ALPHA] Number of EndpointSlices changed on each Service sync' histogram: aggregationTemporality: 2 dataPoints: - attributes: - - key: status + - key: topology value: - stringValue: miss + stringValue: Disabled bucketCounts: - - "1" - - "0" + - "7" - "0" - "0" - "0" - "0" - "0" - "0" + - "9" - "0" - "0" - "0" - "0" - count: "1" + count: "16" explicitBounds: - 0.005 - 0.01 @@ -329,25 +244,26 @@ resourceMetrics: - 5 - 10 startTimeUnixNano: "1000000" - sum: 0.004 + sum: 9 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: authentication_token_cache_request_duration_seconds - - description: '[ALPHA] Authorization duration in seconds broken out by result.' + name: endpoint_slice_controller_endpointslices_changed_per_sync + - description: '[ALPHA] Request size in bytes. Broken down by verb and host.' histogram: aggregationTemporality: 2 dataPoints: - attributes: - - key: result + - key: host value: - stringValue: allowed + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: GET bucketCounts: - - "16" - - "0" - - "0" + - "198" - "0" - "0" - "0" @@ -359,124 +275,43 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "0" - count: "16" + count: "198" explicitBounds: - - 0.001 - - 0.002 - - 0.004 - - 0.008 - - 0.016 - - 0.032 - - 0.064 - - 0.128 - - 0.256 - - 0.512 - - 1.024 - - 2.048 - - 4.096 - - 8.192 - - 16.384 + - 64 + - 256 + - 512 + - 1024 + - 4096 + - 16384 + - 65536 + - 262144 + - 1.048576e+06 + - 4.194304e+06 + - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 6.591399999999999e-05 + sum: 0 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: authorization_duration_seconds - - description: Deprecated. Prefer the identical /sched/pauses/total/gc:seconds. - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: + - attributes: + - key: host + value: + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: PATCH + bucketCounts: + - "1" + - "2" + - "1" - "0" + - "8" + - "3" - "0" - - "5" - - "15" - - "6" - - "0" - - "0" - - "0" - count: "26" - explicitBounds: - - 6.399999999999999e-08 - - 6.399999999999999e-07 - - 7.167999999999999e-06 - - 8.191999999999999e-05 - - 0.0009175039999999999 - - 0.010485759999999998 - - 0.11744051199999998 - startTimeUnixNano: "1000000" - sum: 0.0006022400000000001 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: go_gc_pauses_seconds - - description: '[ALPHA] Response size in bytes. Broken down by verb and host.' - histogram: - aggregationTemporality: 2 - dataPoints: - - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb - value: - stringValue: GET - bucketCounts: - - "19" - - "77" - - "39" - - "4" - - "19" - - "25" - - "17" - - "0" - - "0" - - "0" - - "0" - - "0" - count: "200" - explicitBounds: - - 64 - - 256 - - 512 - - 1024 - - 4096 - - 16384 - - 65536 - - 262144 - - 1.048576e+06 - - 4.194304e+06 - - 1.6777216e+07 - startTimeUnixNano: "1000000" - sum: 608322 - timeUnixNano: "1000000" - - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb - value: - stringValue: PATCH - bucketCounts: - - "0" - - "0" - - "0" - - "1" - - "16" - "0" - "0" - "0" - "0" - "0" - - "0" - - "0" - count: "17" + count: "15" explicitBounds: - 64 - 256 @@ -490,7 +325,7 @@ resourceMetrics: - 4.194304e+06 - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 54864 + sum: 38104 timeUnixNano: "1000000" - attributes: - key: host @@ -500,19 +335,19 @@ resourceMetrics: value: stringValue: POST bucketCounts: - - "0" - - "40" - "1" - - "23" - - "41" - - "2" + - "57" + - "19" + - "7" + - "19" + - "1" - "0" - "0" - "0" - "0" - "0" - "0" - count: "107" + count: "104" explicitBounds: - 64 - 256 @@ -526,7 +361,7 @@ resourceMetrics: - 4.194304e+06 - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 118523 + sum: 50739 timeUnixNano: "1000000" - attributes: - key: host @@ -537,18 +372,18 @@ resourceMetrics: stringValue: PUT bucketCounts: - "0" - - "4" - - "80" - - "6" - - "35" - - "20" + - "0" + - "78" + - "13" + - "54" + - "1" - "0" - "0" - "0" - "0" - "0" - "0" - count: "145" + count: "146" explicitBounds: - 64 - 256 @@ -562,14 +397,14 @@ resourceMetrics: - 4.194304e+06 - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 257868 + sum: 175421 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: rest_client_response_size_bytes - - description: '[ALPHA] Distribution of the remaining lifetime on the certificate used to authenticate a request.' + name: rest_client_request_size_bytes + - description: '[ALPHA] Time for comparison of old to new for the purposes of CRDValidationRatcheting during an UPDATE in seconds.' histogram: aggregationTemporality: 2 dataPoints: @@ -585,25 +420,17 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "0" - - "0" - - "0" explicitBounds: - - 0 - - 1800 - - 3600 - - 7200 - - 21600 - - 43200 - - 86400 - - 172800 - - 345600 - - 604800 - - 2.592e+06 - - 7.776e+06 - - 1.5552e+07 - - 3.1104e+07 + - 1e-05 + - 4e-05 + - 0.00016 + - 0.00064 + - 0.00256 + - 0.01024 + - 0.04096 + - 0.16384 + - 0.65536 + - 2.62144 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" @@ -611,50 +438,45 @@ resourceMetrics: - key: prometheus.type value: stringValue: histogram - name: apiserver_client_certificate_expiration_seconds - - description: '[ALPHA] Number of EndpointSlices changed on each Service sync' + name: apiextensions_apiserver_validation_ratcheting_seconds + - description: '[ALPHA] Histogram of the number of seconds the last auth exec plugin client certificate lived before being rotated. If auth exec plugin client certificates are unused, histogram will contain no data.' histogram: aggregationTemporality: 2 dataPoints: - - attributes: - - key: topology - value: - stringValue: Disabled - bucketCounts: - - "7" + - bucketCounts: + - "0" + - "0" - "0" - "0" - "0" - "0" - "0" - "0" - - "9" - "0" - "0" - "0" - "0" - count: "16" explicitBounds: - - 0.005 - - 0.01 - - 0.025 - - 0.05 - - 0.1 - - 0.25 - - 0.5 - - 1 - - 2.5 - - 5 - - 10 + - 600 + - 1800 + - 3600 + - 14400 + - 86400 + - 604800 + - 2.592e+06 + - 7.776e+06 + - 1.5552e+07 + - 3.1104e+07 + - 1.24416e+08 startTimeUnixNano: "1000000" - sum: 9 + sum: 0 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: endpoint_slice_controller_endpointslices_changed_per_sync - - description: '[ALPHA] How long in seconds an item stays in workqueue before being requested.' + name: rest_client_exec_plugin_certificate_rotation_age + - description: '[ALPHA] How long in seconds processing an item from workqueue takes.' histogram: aggregationTemporality: 2 dataPoints: @@ -666,15 +488,15 @@ resourceMetrics: - "0" - "0" - "0" - - "23" - - "10" - - "16" - - "7" - - "3" - "0" - "0" + - "46" + - "13" + - "1" + - "3" + - "0" - "0" - count: "59" + count: "63" explicitBounds: - 1e-08 - 1e-07 @@ -687,7 +509,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.3306854940000001 + sum: 2.0266241129999996 timeUnixNano: "1000000" - attributes: - key: name @@ -698,8 +520,8 @@ resourceMetrics: - "0" - "0" - "0" - - "1" - - "1" + - "2" + - "0" - "0" - "0" - "0" @@ -718,7 +540,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.000134299 + sum: 4.287e-05 timeUnixNano: "1000000" - attributes: - key: name @@ -729,14 +551,14 @@ resourceMetrics: - "0" - "0" - "0" - - "1" - - "3" + - "0" + - "6" - "0" - "0" - "0" - "0" - "0" - count: "4" + count: "6" explicitBounds: - 1e-08 - 1e-07 @@ -749,7 +571,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.000989034 + sum: 0.001040605 timeUnixNano: "1000000" - attributes: - key: name @@ -780,7 +602,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 6.3209e-05 + sum: 2.8283e-05 timeUnixNano: "1000000" - attributes: - key: name @@ -790,8 +612,8 @@ resourceMetrics: - "0" - "0" - "0" - - "1" - - "2" + - "0" + - "3" - "0" - "0" - "0" @@ -811,7 +633,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 5.3865999999999996e-05 + sum: 5.0294e-05 timeUnixNano: "1000000" - attributes: - key: name @@ -822,10 +644,10 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - "1" - "0" - "0" + - "0" - "1" - "0" - "0" @@ -842,7 +664,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.20039477500000002 + sum: 0.6034830919999999 timeUnixNano: "1000000" - attributes: - key: name @@ -852,11 +674,11 @@ resourceMetrics: - "0" - "0" - "0" - - "17" + - "15" + - "11" - "2" - - "1" - "0" - - "10" + - "2" - "0" - "0" - "0" @@ -873,7 +695,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.39918466599999997 + sum: 0.03881558800000001 timeUnixNano: "1000000" - attributes: - key: name @@ -943,15 +765,15 @@ resourceMetrics: - "0" - "0" - "0" - - "9" - "0" - - "3" - - "3" - - "4" - "0" + - "8" + - "4" + - "2" + - "2" - "0" - "0" - count: "19" + count: "16" explicitBounds: - 1e-08 - 1e-07 @@ -964,7 +786,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.240433734 + sum: 1.8957623210000003 timeUnixNano: "1000000" - attributes: - key: name @@ -974,15 +796,15 @@ resourceMetrics: - "0" - "0" - "0" - - "19" - - "3" - - "6" - - "7" - - "6" - "0" - "0" + - "13" + - "16" + - "7" + - "2" + - "0" - "0" - count: "41" + count: "38" explicitBounds: - 1e-08 - 1e-07 @@ -995,7 +817,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.274977654 + sum: 1.5920111130000003 timeUnixNano: "1000000" - attributes: - key: name @@ -1065,11 +887,11 @@ resourceMetrics: - "0" - "0" - "0" - - "12" - - "0" - "0" + - "5" - "0" - - "2" + - "8" + - "1" - "0" - "0" - "0" @@ -1086,7 +908,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.079190493 + sum: 0.09241719300000001 timeUnixNano: "1000000" - attributes: - key: name @@ -1096,12 +918,12 @@ resourceMetrics: - "0" - "0" - "0" - - "15" - - "0" - "0" + - "3" + - "5" + - "8" - "0" - - "2" - - "0" + - "1" - "0" - "0" count: "17" @@ -1117,7 +939,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.08151744899999999 + sum: 0.8373582580000002 timeUnixNano: "1000000" - attributes: - key: name @@ -1127,10 +949,10 @@ resourceMetrics: - "0" - "0" - "0" - - "9" + - "0" + - "10" - "0" - "0" - - "1" - "0" - "0" - "0" @@ -1148,7 +970,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.008489321999999997 + sum: 0.000194512 timeUnixNano: "1000000" - attributes: - key: name @@ -1189,14 +1011,14 @@ resourceMetrics: - "0" - "0" - "0" + - "1" - "0" + - "5" - "0" - "0" - "0" - - "5" - - "0" - "0" - count: "5" + count: "6" explicitBounds: - 1e-08 - 1e-07 @@ -1209,7 +1031,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 3.6698504740000004 + sum: 0.0368459 timeUnixNano: "1000000" - attributes: - key: name @@ -1248,16 +1070,16 @@ resourceMetrics: bucketCounts: - "0" - "0" - - "7" - - "473" - - "143" - - "36" - "0" + - "501" + - "165" + - "4" + - "1" - "0" - "0" - "0" - "0" - count: "659" + count: "671" explicitBounds: - 1e-08 - 1e-07 @@ -1270,7 +1092,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.012077585000000024 + sum: 0.008388135000000005 timeUnixNano: "1000000" - attributes: - key: name @@ -1430,15 +1252,15 @@ resourceMetrics: - "0" - "0" - "0" - - "6" - - "1" - - "0" + - "4" + - "2" - "0" - "1" - "0" - "0" - "0" - count: "8" + - "0" + count: "7" explicitBounds: - 1e-08 - 1e-07 @@ -1451,7 +1273,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.023829819999999995 + sum: 0.007379337999999999 timeUnixNano: "1000000" - attributes: - key: name @@ -1461,11 +1283,11 @@ resourceMetrics: - "0" - "0" - "0" - - "9" - - "0" - - "0" + - "3" + - "7" + - "2" + - "1" - "0" - - "4" - "0" - "0" - "0" @@ -1482,7 +1304,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.07911397599999999 + sum: 0.00205301 timeUnixNano: "1000000" - attributes: - key: name @@ -1492,15 +1314,15 @@ resourceMetrics: - "0" - "0" - "0" + - "0" - "1" - "0" - "0" - "0" - - "1" - "0" - "0" - "0" - count: "2" + count: "1" explicitBounds: - 1e-08 - 1e-07 @@ -1513,7 +1335,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.039778326999999995 + sum: 2.4676e-05 timeUnixNano: "1000000" - attributes: - key: name @@ -1523,11 +1345,11 @@ resourceMetrics: - "0" - "0" - "0" - - "17" - - "1" + - "13" + - "7" + - "2" - "0" - "0" - - "4" - "0" - "0" - "0" @@ -1544,7 +1366,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.14240075600000002 + sum: 0.000611182 timeUnixNano: "1000000" - attributes: - key: name @@ -1674,15 +1496,15 @@ resourceMetrics: - "0" - "0" - "0" - - "27" - - "1" - - "2" - - "4" - - "5" - "0" + - "9" + - "14" + - "6" + - "9" + - "2" - "0" - "0" - count: "39" + count: "40" explicitBounds: - 1e-08 - 1e-07 @@ -1695,7 +1517,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.11725713200000003 + sum: 0.9238042379999996 timeUnixNano: "1000000" - attributes: - key: name @@ -1825,12 +1647,12 @@ resourceMetrics: - "0" - "0" - "0" - - "1" - "0" - "0" - "0" - - "3" - - "2" + - "5" + - "0" + - "1" - "0" - "0" count: "6" @@ -1846,7 +1668,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.41562541199999997 + sum: 0.719315073 timeUnixNano: "1000000" - attributes: - key: name @@ -1886,12 +1708,12 @@ resourceMetrics: - "0" - "0" - "0" - - "1" - "0" - "0" - "0" + - "5" + - "0" - "1" - - "4" - "0" - "0" count: "6" @@ -1907,7 +1729,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 3.662269746 + sum: 0.563585007 timeUnixNano: "1000000" - attributes: - key: name @@ -1947,11 +1769,11 @@ resourceMetrics: - "0" - "0" - "0" - - "35" - - "0" + - "47" - "1" - "0" - - "12" + - "0" + - "0" - "0" - "0" - "0" @@ -1968,7 +1790,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.8110731150000003 + sum: 0.000290544 timeUnixNano: "1000000" - attributes: - key: name @@ -2039,13 +1861,13 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - "1" - "0" - "0" - "0" - "0" - "0" + - "0" count: "1" explicitBounds: - 1e-08 @@ -2059,7 +1881,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.000259194 + sum: 1.4076e-05 timeUnixNano: "1000000" - attributes: - key: name @@ -2099,11 +1921,11 @@ resourceMetrics: - "0" - "0" - "0" - - "4" + - "6" + - "0" + - "0" - "0" - "0" - - "1" - - "1" - "1" - "0" - "0" @@ -2120,7 +1942,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.6953364989999999 + sum: 0.755161755 timeUnixNano: "1000000" - attributes: - key: name @@ -2216,29 +2038,74 @@ resourceMetrics: - key: prometheus.type value: stringValue: histogram - name: workqueue_queue_duration_seconds - - description: '[ALPHA] Number of endpoints added on each Service sync' + name: workqueue_work_duration_seconds + - description: '[ALPHA] Duration in seconds for NodeController to update the health of a single node.' histogram: aggregationTemporality: 2 dataPoints: - - attributes: - - key: clusterCIDR - value: - stringValue: 10.244.0.0/16 - bucketCounts: + - bucketCounts: + - "27" - "1" - "0" - "0" - "0" - "0" - "0" - count: "1" + - "0" + - "0" + count: "28" explicitBounds: - - 1 - - 5 - - 25 - - 125 - - 625 + - 0.001 + - 0.004 + - 0.016 + - 0.064 + - 0.256 + - 1.024 + - 4.096 + - 16.384 + startTimeUnixNano: "1000000" + sum: 0.00284636 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: node_collector_update_node_health_duration_seconds + - description: '[ALPHA] Latencies in seconds of data encryption key(DEK) generation operations.' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + explicitBounds: + - 5e-06 + - 1e-05 + - 2e-05 + - 4e-05 + - 8e-05 + - 0.00016 + - 0.00032 + - 0.00064 + - 0.00128 + - 0.00256 + - 0.00512 + - 0.01024 + - 0.02048 + - 0.04096 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" @@ -2246,7 +2113,53 @@ resourceMetrics: - key: prometheus.type value: stringValue: histogram - name: node_ipam_controller_cidrset_allocation_tries_per_request + name: apiserver_storage_data_key_generation_duration_seconds + - description: '[ALPHA] Duration of syncEndpoints() in seconds' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "10" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + count: "10" + explicitBounds: + - 0.001 + - 0.002 + - 0.004 + - 0.008 + - 0.016 + - 0.032 + - 0.064 + - 0.128 + - 0.256 + - 0.512 + - 1.024 + - 2.048 + - 4.096 + - 8.192 + - 16.384 + startTimeUnixNano: "1000000" + sum: 0 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: endpoint_slice_mirroring_controller_endpoints_sync_duration - description: '[ALPHA] Client side rate limiter latency in seconds. Broken down by verb, and host.' histogram: aggregationTemporality: 2 @@ -2259,11 +2172,11 @@ resourceMetrics: value: stringValue: GET bucketCounts: - - "134" + - "131" - "1" - - "44" - - "8" - - "9" + - "45" + - "7" + - "10" - "4" - "0" - "0" @@ -2272,7 +2185,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "200" + count: "198" explicitBounds: - 0.005 - 0.025 @@ -2287,7 +2200,7 @@ resourceMetrics: - 30 - 60 startTimeUnixNano: "1000000" - sum: 9.836870049 + sum: 9.771708125000005 timeUnixNano: "1000000" - attributes: - key: host @@ -2297,7 +2210,7 @@ resourceMetrics: value: stringValue: PATCH bucketCounts: - - "17" + - "15" - "0" - "0" - "0" @@ -2310,7 +2223,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "17" + count: "15" explicitBounds: - 0.005 - 0.025 @@ -2325,7 +2238,7 @@ resourceMetrics: - 30 - 60 startTimeUnixNano: "1000000" - sum: 2.2502000000000002e-05 + sum: 2.2839999999999995e-05 timeUnixNano: "1000000" - attributes: - key: host @@ -2335,9 +2248,9 @@ resourceMetrics: value: stringValue: POST bucketCounts: - - "75" + - "73" - "0" - - "23" + - "22" - "0" - "9" - "0" @@ -2348,7 +2261,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "107" + count: "104" explicitBounds: - 0.005 - 0.025 @@ -2363,7 +2276,7 @@ resourceMetrics: - 30 - 60 startTimeUnixNano: "1000000" - sum: 5.061467736999997 + sum: 5.1838831999999995 timeUnixNano: "1000000" - attributes: - key: host @@ -2373,7 +2286,7 @@ resourceMetrics: value: stringValue: PUT bucketCounts: - - "145" + - "146" - "0" - "0" - "0" @@ -2386,7 +2299,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "145" + count: "146" explicitBounds: - 0.005 - 0.025 @@ -2401,18 +2314,19 @@ resourceMetrics: - 30 - 60 startTimeUnixNano: "1000000" - sum: 0.00022481699999999993 + sum: 0.0004300169999999999 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram name: rest_client_rate_limiter_duration_seconds - - description: '[ALPHA] CEL compilation time in seconds.' + - description: '[ALPHA] Number of endpoints added on each Service sync' histogram: aggregationTemporality: 2 dataPoints: - bucketCounts: + - "16" - "0" - "0" - "0" @@ -2425,18 +2339,53 @@ resourceMetrics: - "0" - "0" - "0" + - "0" + - "0" + - "0" + count: "16" + explicitBounds: + - 2 + - 4 + - 8 + - 16 + - 32 + - 64 + - 128 + - 256 + - 512 + - 1024 + - 2048 + - 4096 + - 8192 + - 16384 + - 32768 + startTimeUnixNano: "1000000" + sum: 4 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: endpoint_slice_controller_endpoints_added_per_sync + - description: '[ALPHA] The ratio of chosen deleted pod''s ages to the current youngest pod''s age (at the time). Should be <2. The intent of this metric is to measure the rough efficacy of the LogarithmicScaleDown feature gate''s effect on the sorting (and deletion) of pods when a replicaset scales down. This only considers Ready pods when calculating and reporting.' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" explicitBounds: - - 0.005 - - 0.01 - - 0.025 - - 0.05 - - 0.1 - 0.25 - 0.5 - 1 - - 2.5 - - 5 - - 10 + - 2 + - 4 + - 8 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" @@ -2444,8 +2393,8 @@ resourceMetrics: - key: prometheus.type value: stringValue: histogram - name: apiserver_cel_compilation_duration_seconds - - description: '[ALPHA] Request latency in seconds. Broken down by verb, and host.' + name: replicaset_controller_sorting_deletion_age_ratio + - description: '[ALPHA] Response size in bytes. Broken down by verb and host.' histogram: aggregationTemporality: 2 dataPoints: @@ -2457,35 +2406,33 @@ resourceMetrics: value: stringValue: GET bucketCounts: - - "96" - - "35" - - "44" - - "9" - - "10" + - "19" + - "78" + - "38" - "5" - - "1" - - "0" + - "16" + - "25" + - "17" - "0" - "0" - "0" - "0" - "0" - count: "200" + count: "198" explicitBounds: - - 0.005 - - 0.025 - - 0.1 - - 0.25 - - 0.5 - - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 64 + - 256 + - 512 + - 1024 + - 4096 + - 16384 + - 65536 + - 262144 + - 1.048576e+06 + - 4.194304e+06 + - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 12.578945927000007 + sum: 603705 timeUnixNano: "1000000" - attributes: - key: host @@ -2495,12 +2442,11 @@ resourceMetrics: value: stringValue: PATCH bucketCounts: - - "4" - - "9" - "0" - "0" - "0" - - "4" + - "1" + - "14" - "0" - "0" - "0" @@ -2508,22 +2454,21 @@ resourceMetrics: - "0" - "0" - "0" - count: "17" + count: "15" explicitBounds: - - 0.005 - - 0.025 - - 0.1 - - 0.25 - - 0.5 - - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 64 + - 256 + - 512 + - 1024 + - 4096 + - 16384 + - 65536 + - 262144 + - 1.048576e+06 + - 4.194304e+06 + - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 3.3372112920000006 + sum: 46591 timeUnixNano: "1000000" - attributes: - key: host @@ -2533,35 +2478,33 @@ resourceMetrics: value: stringValue: POST bucketCounts: - - "50" - - "15" - - "23" - - "1" - - "11" - - "7" - "0" + - "40" + - "1" + - "20" + - "41" + - "2" - "0" - "0" - "0" - "0" - "0" - "0" - count: "107" + count: "104" explicitBounds: - - 0.005 - - 0.025 - - 0.1 - - 0.25 - - 0.5 - - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 64 + - 256 + - 512 + - 1024 + - 4096 + - 16384 + - 65536 + - 262144 + - 1.048576e+06 + - 4.194304e+06 + - 1.6777216e+07 startTimeUnixNano: "1000000" - sum: 11.268858019000001 + sum: 115812 timeUnixNano: "1000000" - attributes: - key: host @@ -2571,11 +2514,48 @@ resourceMetrics: value: stringValue: PUT bucketCounts: - - "88" - - "54" - - "1" - - "1" - "0" + - "2" + - "84" + - "6" + - "34" + - "20" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + count: "146" + explicitBounds: + - 64 + - 256 + - 512 + - 1024 + - 4096 + - 16384 + - 65536 + - 262144 + - 1.048576e+06 + - 4.194304e+06 + - 1.6777216e+07 + startTimeUnixNano: "1000000" + sum: 259808 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: rest_client_response_size_bytes + - description: '[ALPHA] Request latency in seconds. Broken down by status code.' + histogram: + aggregationTemporality: 2 + dataPoints: + - attributes: + - key: code + value: + stringValue: "201" + bucketCounts: - "1" - "0" - "0" @@ -2584,29 +2564,26 @@ resourceMetrics: - "0" - "0" - "0" - count: "145" + - "0" + count: "1" explicitBounds: - - 0.005 - - 0.025 - - 0.1 - 0.25 - 0.5 + - 0.7 - 1 - - 2 - - 4 - - 8 - - 15 - - 30 - - 60 + - 1.5 + - 3 + - 5 + - 10 startTimeUnixNano: "1000000" - sum: 1.507132858 + sum: 0.001492718 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: rest_client_request_duration_seconds - - description: '[STABLE] Time between when a cronjob is scheduled to be run, and when the corresponding job is created' + name: apiserver_delegated_authz_request_duration_seconds + - description: '[ALPHA] A metric measuring the latency for updating each load balancer hosts.' histogram: aggregationTemporality: 2 dataPoints: @@ -2622,6 +2599,11 @@ resourceMetrics: - "0" - "0" - "0" + - "0" + - "0" + - "0" + - "0" + - "0" explicitBounds: - 1 - 2 @@ -2633,6 +2615,11 @@ resourceMetrics: - 128 - 256 - 512 + - 1024 + - 2048 + - 4096 + - 8192 + - 16384 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" @@ -2640,7 +2627,39 @@ resourceMetrics: - key: prometheus.type value: stringValue: histogram - name: cronjob_controller_job_creation_skew_duration_seconds + name: service_controller_update_loadbalancer_host_latency_seconds + - description: '[ALPHA] Duration in seconds for NodeController to update the health of all nodes.' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "28" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + count: "28" + explicitBounds: + - 0.01 + - 0.04 + - 0.16 + - 0.64 + - 2.56 + - 10.24 + - 40.96 + - 163.84 + startTimeUnixNano: "1000000" + sum: 0.004353011999999999 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: node_collector_update_all_nodes_health_duration_seconds - description: Distribution of individual non-GC-related stop-the-world stopping latencies. This is the time it takes from deciding to stop the world until all Ps are stopped. This is a subset of the total non-GC-related stop-the-world time (/sched/pauses/total/other:seconds). During this time, some threads may be executing. Bucket counts increase monotonically. histogram: aggregationTemporality: 2 @@ -2670,6 +2689,87 @@ resourceMetrics: value: stringValue: histogram name: go_sched_pauses_stopping_other_seconds + - description: Distribution of freed heap allocations by approximate size. Bucket counts increase monotonically. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "10082" + - "191131" + - "104475" + - "72652" + - "41679" + - "7860" + - "3692" + - "1421" + - "1556" + - "248" + - "63" + - "75" + count: "434934" + explicitBounds: + - 8.999999999999998 + - 24.999999999999996 + - 64.99999999999999 + - 144.99999999999997 + - 320.99999999999994 + - 704.9999999999999 + - 1536.9999999999998 + - 3200.9999999999995 + - 6528.999999999999 + - 13568.999999999998 + - 27264.999999999996 + startTimeUnixNano: "1000000" + sum: 5.32536e+07 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: go_gc_heap_frees_by_size_bytes + - description: '[ALPHA] Distribution of the remaining lifetime on the certificate used to authenticate a request.' + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + explicitBounds: + - 0 + - 1800 + - 3600 + - 7200 + - 21600 + - 43200 + - 86400 + - 172800 + - 345600 + - 604800 + - 2.592e+06 + - 7.776e+06 + - 1.5552e+07 + - 3.1104e+07 + startTimeUnixNano: "1000000" + sum: 0 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: apiserver_client_certificate_expiration_seconds - description: '[ALPHA] The time it took to delete the job since it became eligible for deletion' histogram: aggregationTemporality: 2 @@ -2713,7 +2813,7 @@ resourceMetrics: value: stringValue: histogram name: ttl_after_finished_controller_job_deletion_duration_seconds - - description: '[ALPHA] CEL evaluation time in seconds.' + - description: '[ALPHA] A metric measuring the latency for nodesync which updates loadbalancer hosts on cluster node updates.' histogram: aggregationTemporality: 2 dataPoints: @@ -2730,36 +2830,44 @@ resourceMetrics: - "0" - "0" - "0" + - "0" + - "0" + - "0" + - "0" explicitBounds: - - 0.005 - - 0.01 - - 0.025 - - 0.05 - - 0.1 - - 0.25 - - 0.5 - 1 - - 2.5 - - 5 - - 10 - startTimeUnixNano: "1000000" - sum: 0 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: apiserver_cel_evaluation_duration_seconds - - description: '[ALPHA] Request latency in seconds. Broken down by status code.' - histogram: - aggregationTemporality: 2 + - 2 + - 4 + - 8 + - 16 + - 32 + - 64 + - 128 + - 256 + - 512 + - 1024 + - 2048 + - 4096 + - 8192 + - 16384 + startTimeUnixNano: "1000000" + sum: 0 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: service_controller_nodesync_latency_seconds + - description: '[ALPHA] Latency, in seconds, between the time when a taint effect has been activated for the Pod and its deletion via TaintEvictionController.' + histogram: + aggregationTemporality: 2 dataPoints: - - attributes: - - key: code - value: - stringValue: "201" - bucketCounts: - - "1" + - bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" - "0" - "0" - "0" @@ -2768,25 +2876,28 @@ resourceMetrics: - "0" - "0" - "0" - count: "1" explicitBounds: - - 0.25 + - 0.005 + - 0.025 + - 0.1 - 0.5 - - 0.7 - 1 - - 1.5 - - 3 - - 5 + - 2.5 - 10 + - 30 + - 60 + - 120 + - 180 + - 240 startTimeUnixNano: "1000000" - sum: 0.004823868 + sum: 0 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: apiserver_delegated_authn_request_duration_seconds - - description: '[ALPHA] Latencies in seconds of data encryption key(DEK) generation operations.' + name: taint_eviction_controller_pod_deletion_duration_seconds + - description: '[ALPHA] CEL evaluation time in seconds.' histogram: aggregationTemporality: 2 dataPoints: @@ -2803,24 +2914,18 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "0" - - "0" explicitBounds: - - 5e-06 - - 1e-05 - - 2e-05 - - 4e-05 - - 8e-05 - - 0.00016 - - 0.00032 - - 0.00064 - - 0.00128 - - 0.00256 - - 0.00512 - - 0.01024 - - 0.02048 - - 0.04096 + - 0.005 + - 0.01 + - 0.025 + - 0.05 + - 0.1 + - 0.25 + - 0.5 + - 1 + - 2.5 + - 5 + - 10 startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "1000000" @@ -2828,50 +2933,47 @@ resourceMetrics: - key: prometheus.type value: stringValue: histogram - name: apiserver_storage_data_key_generation_duration_seconds - - description: Distribution of individual GC-related stop-the-world pause latencies. This is the time from deciding to stop the world until the world is started again. Some of this time is spent getting all threads to stop (this is measured directly in /sched/pauses/stopping/gc:seconds), during which some threads may still be running. Bucket counts increase monotonically. + name: apiserver_cel_evaluation_duration_seconds + - description: '[ALPHA] Number of endpoints added on each Service sync' histogram: aggregationTemporality: 2 dataPoints: - - bucketCounts: + - attributes: + - key: clusterCIDR + value: + stringValue: 10.244.0.0/16 + bucketCounts: + - "1" - "0" - "0" - - "5" - - "15" - - "6" - "0" - "0" - "0" - count: "26" + count: "1" explicitBounds: - - 6.399999999999999e-08 - - 6.399999999999999e-07 - - 7.167999999999999e-06 - - 8.191999999999999e-05 - - 0.0009175039999999999 - - 0.010485759999999998 - - 0.11744051199999998 + - 1 + - 5 + - 25 + - 125 + - 625 startTimeUnixNano: "1000000" - sum: 0.0006022400000000001 + sum: 0 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: go_sched_pauses_total_gc_seconds - - description: '[ALPHA] A metric measuring the latency for updating each load balancer hosts.' + name: node_ipam_controller_cidrset_allocation_tries_per_request + - description: '[ALPHA] Request latency in seconds. Broken down by status code.' histogram: aggregationTemporality: 2 dataPoints: - - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" + - attributes: + - key: code + value: + stringValue: "201" + bucketCounts: + - "1" - "0" - "0" - "0" @@ -2880,83 +2982,76 @@ resourceMetrics: - "0" - "0" - "0" + count: "1" explicitBounds: + - 0.25 + - 0.5 + - 0.7 - 1 - - 2 - - 4 - - 8 - - 16 - - 32 - - 64 - - 128 - - 256 - - 512 - - 1024 - - 2048 - - 4096 - - 8192 - - 16384 + - 1.5 + - 3 + - 5 + - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.005402619 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: service_controller_update_loadbalancer_host_latency_seconds - - description: '[ALPHA] Latency, in seconds, between the time when a taint effect has been activated for the Pod and its deletion via TaintEvictionController.' + name: apiserver_delegated_authn_request_duration_seconds + - description: Distribution of heap allocations by approximate size. Bucket counts increase monotonically. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. histogram: aggregationTemporality: 2 dataPoints: - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" + - "14512" + - "262161" + - "166054" + - "102464" + - "54630" + - "20180" + - "5096" + - "1967" + - "1852" + - "456" + - "271" + - "186" + count: "629829" explicitBounds: - - 0.005 - - 0.025 - - 0.1 - - 0.5 - - 1 - - 2.5 - - 10 - - 30 - - 60 - - 120 - - 180 - - 240 + - 8.999999999999998 + - 24.999999999999996 + - 64.99999999999999 + - 144.99999999999997 + - 320.99999999999994 + - 704.9999999999999 + - 1536.9999999999998 + - 3200.9999999999995 + - 6528.999999999999 + - 13568.999999999998 + - 27264.999999999996 startTimeUnixNano: "1000000" - sum: 0 + sum: 8.6040568e+07 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: taint_eviction_controller_pod_deletion_duration_seconds + name: go_gc_heap_allocs_by_size_bytes - description: Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running. Bucket counts increase monotonically. histogram: aggregationTemporality: 2 dataPoints: - bucketCounts: - - "1173" - - "637" - - "2875" - - "1221" - - "223" - - "14" + - "1087" + - "679" + - "2634" + - "1561" + - "210" + - "44" - "0" - "0" - count: "6143" + count: "6215" explicitBounds: - 6.399999999999999e-08 - 6.399999999999999e-07 @@ -2966,7 +3061,7 @@ resourceMetrics: - 0.010485759999999998 - 0.11744051199999998 startTimeUnixNano: "1000000" - sum: 0.041746112 + sum: 0.07049184 timeUnixNano: "1000000" metadata: - key: prometheus.type @@ -2982,7 +3077,7 @@ resourceMetrics: value: stringValue: success bucketCounts: - - "16" + - "15" - "0" - "0" - "0" @@ -2998,7 +3093,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "16" + count: "15" explicitBounds: - 0.001 - 0.002 @@ -3016,401 +3111,59 @@ resourceMetrics: - 8.192 - 16.384 startTimeUnixNano: "1000000" - sum: 0.000331944 + sum: 0.000385428 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram name: authentication_duration_seconds - - description: '[ALPHA] Request size in bytes. Broken down by verb and host.' + - description: '[ALPHA] How long in seconds an item stays in workqueue before being requested.' histogram: aggregationTemporality: 2 dataPoints: - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: GET + stringValue: ClusterRoleAggregator bucketCounts: - - "200" - - "0" - - "0" - - "0" - - "0" - - "0" - "0" - "0" - "0" + - "21" + - "6" + - "25" + - "8" + - "3" - "0" - "0" - "0" - count: "200" + count: "63" explicitBounds: - - 64 - - 256 - - 512 - - 1024 - - 4096 - - 16384 - - 65536 - - 262144 - - 1.048576e+06 - - 4.194304e+06 - - 1.6777216e+07 + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 startTimeUnixNano: "1000000" - sum: 0 + sum: 0.17281754 timeUnixNano: "1000000" - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb + - key: name value: - stringValue: PATCH + stringValue: DynamicCABundle-client-ca-bundle bucketCounts: + - "0" + - "0" + - "0" - "1" - - "5" - "1" - "0" - - "7" - - "3" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - count: "17" - explicitBounds: - - 64 - - 256 - - 512 - - 1024 - - 4096 - - 16384 - - 65536 - - 262144 - - 1.048576e+06 - - 4.194304e+06 - - 1.6777216e+07 - startTimeUnixNano: "1000000" - sum: 34721 - timeUnixNano: "1000000" - - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb - value: - stringValue: POST - bucketCounts: - - "1" - - "57" - - "22" - - "7" - - "19" - - "1" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - count: "107" - explicitBounds: - - 64 - - 256 - - 512 - - 1024 - - 4096 - - 16384 - - 65536 - - 262144 - - 1.048576e+06 - - 4.194304e+06 - - 1.6777216e+07 - startTimeUnixNano: "1000000" - sum: 52198 - timeUnixNano: "1000000" - - attributes: - - key: host - value: - stringValue: 172.18.0.2:6443 - - key: verb - value: - stringValue: PUT - bucketCounts: - - "0" - - "0" - - "76" - - "13" - - "55" - - "1" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - count: "145" - explicitBounds: - - 64 - - 256 - - 512 - - 1024 - - 4096 - - 16384 - - 65536 - - 262144 - - 1.048576e+06 - - 4.194304e+06 - - 1.6777216e+07 - startTimeUnixNano: "1000000" - sum: 173226 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: rest_client_request_size_bytes - - description: Distribution of individual non-GC-related stop-the-world pause latencies. This is the time from deciding to stop the world until the world is started again. Some of this time is spent getting all threads to stop (measured directly in /sched/pauses/stopping/other:seconds). Bucket counts increase monotonically. - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - explicitBounds: - - 6.399999999999999e-08 - - 6.399999999999999e-07 - - 7.167999999999999e-06 - - 8.191999999999999e-05 - - 0.0009175039999999999 - - 0.010485759999999998 - - 0.11744051199999998 - startTimeUnixNano: "1000000" - sum: 0 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: go_sched_pauses_total_other_seconds - - description: '[ALPHA] Number of endpoints added on each Service sync' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "16" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - count: "16" - explicitBounds: - - 2 - - 4 - - 8 - - 16 - - 32 - - 64 - - 128 - - 256 - - 512 - - 1024 - - 2048 - - 4096 - - 8192 - - 16384 - - 32768 - startTimeUnixNano: "1000000" - sum: 4 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: endpoint_slice_controller_endpoints_added_per_sync - - description: '[ALPHA] A metric measuring the latency for nodesync which updates loadbalancer hosts on cluster node updates.' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - explicitBounds: - - 1 - - 2 - - 4 - - 8 - - 16 - - 32 - - 64 - - 128 - - 256 - - 512 - - 1024 - - 2048 - - 4096 - - 8192 - - 16384 - startTimeUnixNano: "1000000" - sum: 0 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: service_controller_nodesync_latency_seconds - - description: '[ALPHA] Duration in seconds for NodeController to update the health of all nodes.' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "27" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - count: "27" - explicitBounds: - - 0.01 - - 0.04 - - 0.16 - - 0.64 - - 2.56 - - 10.24 - - 40.96 - - 163.84 - startTimeUnixNano: "1000000" - sum: 0.017431722999999993 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: node_collector_update_all_nodes_health_duration_seconds - - description: '[ALPHA] Histogram of the number of seconds the last auth exec plugin client certificate lived before being rotated. If auth exec plugin client certificates are unused, histogram will contain no data.' - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - - "0" - explicitBounds: - - 600 - - 1800 - - 3600 - - 14400 - - 86400 - - 604800 - - 2.592e+06 - - 7.776e+06 - - 1.5552e+07 - - 3.1104e+07 - - 1.24416e+08 - startTimeUnixNano: "1000000" - sum: 0 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: rest_client_exec_plugin_certificate_rotation_age - - description: '[ALPHA] How long in seconds processing an item from workqueue takes.' - histogram: - aggregationTemporality: 2 - dataPoints: - - attributes: - - key: name - value: - stringValue: ClusterRoleAggregator - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "0" - - "43" - - "13" - - "0" - - "3" - - "0" - - "0" - count: "59" - explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 - - 0.1 - - 1 - - 10 - startTimeUnixNano: "1000000" - sum: 2.639294359000001 - timeUnixNano: "1000000" - - attributes: - - key: name - value: - stringValue: DynamicCABundle-client-ca-bundle - bucketCounts: - - "0" - - "0" - - "0" - - "0" - - "2" - - "0" - "0" - "0" - "0" @@ -3429,7 +3182,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 7.7095e-05 + sum: 1.8701000000000003e-05 timeUnixNano: "1000000" - attributes: - key: name @@ -3440,14 +3193,14 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - "4" + - "2" - "0" - "0" - "0" - "0" - "0" - count: "4" + count: "6" explicitBounds: - 1e-08 - 1e-07 @@ -3460,7 +3213,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.000744315 + sum: 0.000537703 timeUnixNano: "1000000" - attributes: - key: name @@ -3471,8 +3224,8 @@ resourceMetrics: - "0" - "0" - "0" - - "1" - "0" + - "1" - "0" - "0" - "0" @@ -3491,7 +3244,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 2.8934e-05 + sum: 0.000269112 timeUnixNano: "1000000" - attributes: - key: name @@ -3522,7 +3275,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 5.5123e-05 + sum: 4.3540999999999995e-05 timeUnixNano: "1000000" - attributes: - key: name @@ -3537,8 +3290,8 @@ resourceMetrics: - "0" - "0" - "0" - - "1" - "0" + - "1" - "0" count: "2" explicitBounds: @@ -3553,7 +3306,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.505574071 + sum: 1.8004588220000002 timeUnixNano: "1000000" - attributes: - key: name @@ -3562,13 +3315,13 @@ resourceMetrics: bucketCounts: - "0" - "0" - - "0" - - "19" - - "7" - - "2" - - "1" + - "0" + - "16" + - "3" - "1" - "0" + - "10" + - "0" - "0" - "0" count: "30" @@ -3584,7 +3337,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.020845419 + sum: 0.43301819700000005 timeUnixNano: "1000000" - attributes: - key: name @@ -3654,15 +3407,15 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "0" - - "11" + - "6" - "2" - - "4" - "2" + - "1" + - "5" - "0" - "0" - count: "19" + - "0" + count: "16" explicitBounds: - 1e-08 - 1e-07 @@ -3675,7 +3428,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 1.8336191409999996 + sum: 0.24582215799999999 timeUnixNano: "1000000" - attributes: - key: name @@ -3685,15 +3438,15 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "0" - - "17" - - "15" - - "7" + - "16" - "2" + - "7" + - "4" + - "9" + - "0" - "0" - "0" - count: "41" + count: "38" explicitBounds: - 1e-08 - 1e-07 @@ -3706,7 +3459,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 1.6506292250000003 + sum: 0.277535491 timeUnixNano: "1000000" - attributes: - key: name @@ -3776,12 +3529,12 @@ resourceMetrics: - "0" - "0" - "0" - - "1" - - "4" + - "12" - "0" - - "8" - "0" - - "1" + - "0" + - "2" + - "0" - "0" - "0" count: "14" @@ -3797,7 +3550,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.6358130330000001 + sum: 0.05124650099999999 timeUnixNano: "1000000" - attributes: - key: name @@ -3807,12 +3560,12 @@ resourceMetrics: - "0" - "0" - "0" + - "14" - "1" - - "3" - - "4" - - "8" - "0" - - "1" + - "0" + - "2" + - "0" - "0" - "0" count: "17" @@ -3828,7 +3581,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.6888996699999999 + sum: 0.169764549 timeUnixNano: "1000000" - attributes: - key: name @@ -3838,11 +3591,11 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "10" + - "9" - "0" - "0" - "0" + - "1" - "0" - "0" - "0" @@ -3859,7 +3612,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.00021828100000000002 + sum: 0.08571319100000001 timeUnixNano: "1000000" - attributes: - key: name @@ -3902,12 +3655,12 @@ resourceMetrics: - "0" - "0" - "0" - - "5" - "0" - "0" + - "6" - "0" - "0" - count: "5" + count: "6" explicitBounds: - 1e-08 - 1e-07 @@ -3920,7 +3673,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.025283864 + sum: 4.686947429 timeUnixNano: "1000000" - attributes: - key: name @@ -3959,16 +3712,16 @@ resourceMetrics: bucketCounts: - "0" - "0" - - "1" - - "537" - - "119" - - "2" + - "9" + - "460" + - "104" + - "98" - "0" - "0" - "0" - "0" - "0" - count: "659" + count: "671" explicitBounds: - 1e-08 - 1e-07 @@ -3981,7 +3734,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.004994353999999997 + sum: 0.024226090999999977 timeUnixNano: "1000000" - attributes: - key: name @@ -4142,14 +3895,14 @@ resourceMetrics: - "0" - "0" - "6" - - "1" - "0" - - "1" - "0" - "0" + - "1" + - "0" - "0" - "0" - count: "8" + count: "7" explicitBounds: - 1e-08 - 1e-07 @@ -4162,7 +3915,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.005602995999999999 + sum: 0.092592275 timeUnixNano: "1000000" - attributes: - key: name @@ -4172,11 +3925,11 @@ resourceMetrics: - "0" - "0" - "0" - - "4" - "8" - "1" - "0" - "0" + - "4" - "0" - "0" - "0" @@ -4193,7 +3946,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.00069763 + sum: 0.364914616 timeUnixNano: "1000000" - attributes: - key: name @@ -4204,14 +3957,14 @@ resourceMetrics: - "0" - "0" - "0" - - "2" - "0" - "0" - "0" + - "1" - "0" - "0" - "0" - count: "2" + count: "1" explicitBounds: - 1e-08 - 1e-07 @@ -4224,7 +3977,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 8.9167e-05 + sum: 0.091185582 timeUnixNano: "1000000" - attributes: - key: name @@ -4234,11 +3987,11 @@ resourceMetrics: - "0" - "0" - "0" - - "14" - - "8" - - "0" - - "0" + - "12" + - "5" + - "1" - "0" + - "4" - "0" - "0" - "0" @@ -4255,7 +4008,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.000344169 + sum: 0.3601596169999999 timeUnixNano: "1000000" - attributes: - key: name @@ -4385,15 +4138,15 @@ resourceMetrics: - "0" - "0" - "0" + - "24" + - "1" + - "5" + - "3" + - "7" - "0" - - "9" - - "13" - - "9" - - "6" - - "2" - "0" - "0" - count: "39" + count: "40" explicitBounds: - 1e-08 - 1e-07 @@ -4406,7 +4159,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.8035203130000002 + sum: 0.12755579099999997 timeUnixNano: "1000000" - attributes: - key: name @@ -4536,12 +4289,12 @@ resourceMetrics: - "0" - "0" - "0" + - "1" - "0" - "0" - "0" - - "5" - "1" - - "0" + - "4" - "0" - "0" count: "6" @@ -4557,7 +4310,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.08754944399999999 + sum: 3.258264172 timeUnixNano: "1000000" - attributes: - key: name @@ -4597,12 +4350,12 @@ resourceMetrics: - "0" - "0" - "0" + - "1" - "0" - "0" - "0" - - "5" - - "0" - "1" + - "4" - "0" - "0" count: "6" @@ -4618,7 +4371,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.8116132939999999 + sum: 2.393591978 timeUnixNano: "1000000" - attributes: - key: name @@ -4658,11 +4411,11 @@ resourceMetrics: - "0" - "0" - "0" - - "47" + - "37" - "1" - "0" - "0" - - "0" + - "10" - "0" - "0" - "0" @@ -4679,7 +4432,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.00027870099999999996 + sum: 0.7501404709999999 timeUnixNano: "1000000" - attributes: - key: name @@ -4750,8 +4503,8 @@ resourceMetrics: - "0" - "0" - "0" - - "1" - "0" + - "1" - "0" - "0" - "0" @@ -4770,7 +4523,7 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 4.7589e-05 + sum: 0.000249947 timeUnixNano: "1000000" - attributes: - key: name @@ -4810,37 +4563,209 @@ resourceMetrics: - "0" - "0" - "0" - - "5" + - "5" + - "0" + - "1" + - "0" + - "1" + - "0" + - "0" + - "0" + count: "7" + explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 + startTimeUnixNano: "1000000" + sum: 0.08389994599999999 + timeUnixNano: "1000000" + - attributes: + - key: name + value: + stringValue: validatingadmissionpolicy-status + bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 + startTimeUnixNano: "1000000" + sum: 0 + timeUnixNano: "1000000" + - attributes: + - key: name + value: + stringValue: volume_expand + bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 + startTimeUnixNano: "1000000" + sum: 0 + timeUnixNano: "1000000" + - attributes: + - key: name + value: + stringValue: volumes + bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + explicitBounds: + - 1e-08 + - 1e-07 + - 1e-06 + - 9.999999999999999e-06 + - 9.999999999999999e-05 + - 0.001 + - 0.01 + - 0.1 + - 1 + - 10 + startTimeUnixNano: "1000000" + sum: 0 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: workqueue_queue_duration_seconds + - description: Distribution of individual non-GC-related stop-the-world pause latencies. This is the time from deciding to stop the world until the world is started again. Some of this time is spent getting all threads to stop (measured directly in /sched/pauses/stopping/other:seconds). Bucket counts increase monotonically. + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + explicitBounds: + - 6.399999999999999e-08 + - 6.399999999999999e-07 + - 7.167999999999999e-06 + - 8.191999999999999e-05 + - 0.0009175039999999999 + - 0.010485759999999998 + - 0.11744051199999998 + startTimeUnixNano: "1000000" + sum: 0 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: go_sched_pauses_total_other_seconds + - description: '[ALPHA] Request latency in seconds. Broken down by verb, and host.' + histogram: + aggregationTemporality: 2 + dataPoints: + - attributes: + - key: host + value: + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: GET + bucketCounts: + - "89" + - "39" + - "44" + - "9" + - "11" + - "5" + - "1" + - "0" - "0" - "0" - - "1" - "0" - - "1" - "0" - "0" - count: "7" + count: "198" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0.7021705919999999 + sum: 13.2880485 timeUnixNano: "1000000" - attributes: - - key: name + - key: host value: - stringValue: validatingadmissionpolicy-status + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: PATCH bucketCounts: + - "5" + - "6" - "0" - "0" - "0" + - "4" - "0" - "0" - "0" @@ -4848,26 +4773,37 @@ resourceMetrics: - "0" - "0" - "0" - - "0" + count: "15" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0 + sum: 2.754314302 timeUnixNano: "1000000" - attributes: - - key: name + - key: host value: - stringValue: volume_expand + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: POST bucketCounts: + - "44" + - "19" + - "21" + - "2" + - "11" + - "7" - "0" - "0" - "0" @@ -4875,31 +4811,37 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "0" - - "0" - - "0" + count: "104" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0 + sum: 11.538072622 timeUnixNano: "1000000" - attributes: - - key: name + - key: host value: - stringValue: volumes + stringValue: 172.18.0.2:6443 + - key: verb + value: + stringValue: PUT bucketCounts: + - "76" + - "67" + - "2" - "0" - "0" + - "1" - "0" - "0" - "0" @@ -4907,64 +4849,98 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - - "0" + count: "146" explicitBounds: - - 1e-08 - - 1e-07 - - 1e-06 - - 9.999999999999999e-06 - - 9.999999999999999e-05 - - 0.001 - - 0.01 + - 0.005 + - 0.025 - 0.1 + - 0.25 + - 0.5 - 1 - - 10 + - 2 + - 4 + - 8 + - 15 + - 30 + - 60 startTimeUnixNano: "1000000" - sum: 0 + sum: 1.5314315709999997 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: workqueue_work_duration_seconds - - description: '[ALPHA] Duration in seconds for NodeController to update the health of a single node.' + name: rest_client_request_duration_seconds + - description: Distribution of individual GC-related stop-the-world stopping latencies. This is the time it takes from deciding to stop the world until all Ps are stopped. This is a subset of the total GC-related stop-the-world time (/sched/pauses/total/gc:seconds). During this time, some threads may be executing. Bucket counts increase monotonically. histogram: aggregationTemporality: 2 dataPoints: - bucketCounts: - - "26" - "0" + - "10" - "1" + - "10" + - "3" + - "0" - "0" - "0" + count: "24" + explicitBounds: + - 6.399999999999999e-08 + - 6.399999999999999e-07 + - 7.167999999999999e-06 + - 8.191999999999999e-05 + - 0.0009175039999999999 + - 0.010485759999999998 + - 0.11744051199999998 + startTimeUnixNano: "1000000" + sum: 0.00031872000000000004 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: go_sched_pauses_stopping_gc_seconds + - description: Distribution of individual GC-related stop-the-world pause latencies. This is the time from deciding to stop the world until the world is started again. Some of this time is spent getting all threads to stop (this is measured directly in /sched/pauses/stopping/gc:seconds), during which some threads may still be running. Bucket counts increase monotonically. + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: + - "0" - "0" + - "4" + - "15" + - "5" - "0" - "0" - "0" - count: "27" + count: "24" explicitBounds: - - 0.001 - - 0.004 - - 0.016 - - 0.064 - - 0.256 - - 1.024 - - 4.096 - - 16.384 + - 6.399999999999999e-08 + - 6.399999999999999e-07 + - 7.167999999999999e-06 + - 8.191999999999999e-05 + - 0.0009175039999999999 + - 0.010485759999999998 + - 0.11744051199999998 startTimeUnixNano: "1000000" - sum: 0.009113469 + sum: 0.00051968 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: node_collector_update_node_health_duration_seconds - - description: '[ALPHA] Time for comparison of old to new for the purposes of CRDValidationRatcheting during an UPDATE in seconds.' + name: go_sched_pauses_total_gc_seconds + - description: '[ALPHA] Authorization duration in seconds broken out by result.' histogram: aggregationTemporality: 2 dataPoints: - - bucketCounts: + - attributes: + - key: result + value: + stringValue: allowed + bucketCounts: + - "15" - "0" - "0" - "0" @@ -4976,39 +4952,49 @@ resourceMetrics: - "0" - "0" - "0" + - "0" + - "0" + - "0" + - "0" + count: "15" explicitBounds: - - 1e-05 - - 4e-05 - - 0.00016 - - 0.00064 - - 0.00256 - - 0.01024 - - 0.04096 - - 0.16384 - - 0.65536 - - 2.62144 + - 0.001 + - 0.002 + - 0.004 + - 0.008 + - 0.016 + - 0.032 + - 0.064 + - 0.128 + - 0.256 + - 0.512 + - 1.024 + - 2.048 + - 4.096 + - 8.192 + - 16.384 startTimeUnixNano: "1000000" - sum: 0 + sum: 5.9977e-05 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: apiextensions_apiserver_validation_ratcheting_seconds - - description: Distribution of individual GC-related stop-the-world stopping latencies. This is the time it takes from deciding to stop the world until all Ps are stopped. This is a subset of the total GC-related stop-the-world time (/sched/pauses/total/gc:seconds). During this time, some threads may be executing. Bucket counts increase monotonically. + name: authorization_duration_seconds + - description: Deprecated. Prefer the identical /sched/pauses/total/gc:seconds. histogram: aggregationTemporality: 2 dataPoints: - bucketCounts: - "0" - - "11" - - "1" - - "10" + - "0" - "4" + - "15" + - "5" - "0" - "0" - "0" - count: "26" + count: "24" explicitBounds: - 6.399999999999999e-08 - 6.399999999999999e-07 @@ -5018,22 +5004,24 @@ resourceMetrics: - 0.010485759999999998 - 0.11744051199999998 startTimeUnixNano: "1000000" - sum: 0.000400704 + sum: 0.00051968 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: go_sched_pauses_stopping_gc_seconds - - description: '[ALPHA] Request latency in seconds. Broken down by status code.' + name: go_gc_pauses_seconds + - description: '[ALPHA] Number of namespace syncs happened in root ca cert publisher.' histogram: aggregationTemporality: 2 dataPoints: - attributes: - key: code value: - stringValue: "201" + stringValue: "200" bucketCounts: + - "0" + - "4" - "1" - "0" - "0" @@ -5042,25 +5030,37 @@ resourceMetrics: - "0" - "0" - "0" + - "1" - "0" - count: "1" + - "0" + - "0" + - "0" + - "0" + count: "6" explicitBounds: - - 0.25 - - 0.5 - - 0.7 - - 1 - - 1.5 - - 3 - - 5 - - 10 + - 0.001 + - 0.002 + - 0.004 + - 0.008 + - 0.016 + - 0.032 + - 0.064 + - 0.128 + - 0.256 + - 0.512 + - 1.024 + - 2.048 + - 4.096 + - 8.192 + - 16.384 startTimeUnixNano: "1000000" - sum: 0.001369719 + sum: 0.719279898 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: apiserver_delegated_authz_request_duration_seconds + name: root_ca_cert_publisher_sync_duration_seconds scope: - name: otelcol/prometheusreceiver - version: v0.105.0 + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver + version: v0.110.0 diff --git a/functional_tests/testdata_histogram/expected/v1.30/coredns_metrics.yaml b/functional_tests/testdata_histogram/expected/v1.30/coredns_metrics.yaml index ac323f921..0ddbe661c 100644 --- a/functional_tests/testdata_histogram/expected/v1.30/coredns_metrics.yaml +++ b/functional_tests/testdata_histogram/expected/v1.30/coredns_metrics.yaml @@ -18,10 +18,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-7db6d8ff4d-wht27 + stringValue: coredns-7db6d8ff4d-2ztxm - key: k8s.pod.uid value: - stringValue: 9b0a29ed-734c-42e3-892d-ca2f2dc2553e + stringValue: 14dcef47-a7c0-428a-99ff-2b3504322c2a - key: net.host.name value: stringValue: 10.244.0.3 @@ -49,7 +49,7 @@ resourceMetrics: schemaUrl: https://opentelemetry.io/schemas/1.6.1 scopeMetrics: - metrics: - - description: Size of the EDNS0 UDP buffer in bytes (64K for TCP) per zone and protocol. + - description: Size of the returned response in bytes. histogram: aggregationTemporality: 2 dataPoints: @@ -64,10 +64,10 @@ resourceMetrics: value: stringValue: . bucketCounts: - - "0" - - "2" - "0" - "0" + - "14" + - "3" - "0" - "0" - "0" @@ -79,7 +79,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "2" + count: "17" explicitBounds: - 0 - 100 @@ -96,88 +96,66 @@ resourceMetrics: - 48000 - 64000 startTimeUnixNano: "1000000" - sum: 125 + sum: 2723 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: coredns_dns_request_size_bytes - - description: Histogram of the time (in seconds) each request took. - histogram: - aggregationTemporality: 2 - dataPoints: - - bucketCounts: - - "2" - - "111" - - "0" - - "0" - - "0" - - "0" - count: "113" - explicitBounds: - - 0.00025 - - 0.0025 - - 0.025 - - 0.25 - - 2.5 - startTimeUnixNano: "1000000" - sum: 0.04157324600000001 - timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: coredns_health_request_duration_seconds - - description: Client side rate limiter latency in seconds. Broken down by verb and host. + name: coredns_dns_response_size_bytes + - description: Histogram of the time each request took. histogram: aggregationTemporality: 2 dataPoints: - attributes: - - key: host + - key: proxy_name value: - stringValue: 10.96.0.1:443 - - key: verb + stringValue: forward + - key: rcode value: - stringValue: GET + stringValue: NOERROR + - key: to + value: + stringValue: 172.18.0.1:53 bucketCounts: - - "3" - "0" - "0" - "0" - "0" - "0" + - "1" + - "1" - "0" - "0" - "0" - "0" - "0" - "0" - count: "3" + - "0" + - "0" + - "0" + - "0" + count: "2" explicitBounds: - - 0.005 - - 0.01 - - 0.025 - - 0.05 - - 0.1 - - 0.25 - - 0.5 - - 1 - - 2.5 - - 5 - - 10 + - 0.00025 + - 0.0005 + - 0.001 + - 0.002 + - 0.004 + - 0.008 + - 0.016 + - 0.032 + - 0.064 + - 0.128 + - 0.256 + - 0.512 + - 1.024 + - 2.048 + - 4.096 + - 8.192 startTimeUnixNano: "1000000" - sum: 3.225e-06 + sum: 0.015369563999999999 timeUnixNano: "1000000" - metadata: - - key: prometheus.type - value: - stringValue: histogram - name: coredns_kubernetes_rest_client_rate_limiter_duration_seconds - - description: Histogram of the time each request took. - histogram: - aggregationTemporality: 2 - dataPoints: - attributes: - key: proxy_name value: @@ -194,9 +172,10 @@ resourceMetrics: - "0" - "0" - "0" - - "0" + - "2" - "1" - "0" + - "1" - "0" - "0" - "0" @@ -205,8 +184,7 @@ resourceMetrics: - "0" - "0" - "0" - - "0" - count: "1" + count: "4" explicitBounds: - 0.00025 - 0.0005 @@ -225,14 +203,14 @@ resourceMetrics: - 4.096 - 8.192 startTimeUnixNano: "1000000" - sum: 0.009530533 + sum: 0.057460335 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram name: coredns_proxy_request_duration_seconds - - description: Request latency in seconds. Broken down by verb and host. + - description: Client side rate limiter latency in seconds. Broken down by verb and host. histogram: aggregationTemporality: 2 dataPoints: @@ -244,7 +222,6 @@ resourceMetrics: value: stringValue: GET bucketCounts: - - "0" - "3" - "0" - "0" @@ -256,6 +233,7 @@ resourceMetrics: - "0" - "0" - "0" + - "0" count: "3" explicitBounds: - 0.005 @@ -270,69 +248,85 @@ resourceMetrics: - 5 - 10 startTimeUnixNano: "1000000" - sum: 0.021691217000000002 + sum: 3.3040000000000005e-06 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: coredns_kubernetes_rest_client_request_duration_seconds - - description: Histogram of the time (in seconds) each request took per zone. + name: coredns_kubernetes_rest_client_rate_limiter_duration_seconds + - description: Request latency in seconds. Broken down by verb and host. histogram: aggregationTemporality: 2 dataPoints: - attributes: - - key: server + - key: host value: - stringValue: dns://:53 - - key: zone + stringValue: 10.96.0.1:443 + - key: verb value: - stringValue: . + stringValue: GET bucketCounts: - - "1" + - "0" + - "3" - "0" - "0" - "0" - "0" - "0" - - "1" - "0" - "0" - "0" - "0" - "0" + count: "3" + explicitBounds: + - 0.005 + - 0.01 + - 0.025 + - 0.05 + - 0.1 + - 0.25 + - 0.5 + - 1 + - 2.5 + - 5 + - 10 + startTimeUnixNano: "1000000" + sum: 0.015708676999999997 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: coredns_kubernetes_rest_client_request_duration_seconds + - description: Histogram of the time (in seconds) each request took. + histogram: + aggregationTemporality: 2 + dataPoints: + - bucketCounts: - "0" + - "159" - "0" - "0" - "0" - "0" - count: "2" + count: "159" explicitBounds: - 0.00025 - - 0.0005 - - 0.001 - - 0.002 - - 0.004 - - 0.008 - - 0.016 - - 0.032 - - 0.064 - - 0.128 - - 0.256 - - 0.512 - - 1.024 - - 2.048 - - 4.096 - - 8.192 + - 0.0025 + - 0.025 + - 0.25 + - 2.5 startTimeUnixNano: "1000000" - sum: 0.009800634999999999 + sum: 0.07593674800000003 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: coredns_dns_request_duration_seconds - - description: Size of the returned response in bytes. + name: coredns_health_request_duration_seconds + - description: Size of the EDNS0 UDP buffer in bytes (64K for TCP) per zone and protocol. histogram: aggregationTemporality: 2 dataPoints: @@ -348,8 +342,8 @@ resourceMetrics: stringValue: . bucketCounts: - "0" - - "0" - - "2" + - "14" + - "3" - "0" - "0" - "0" @@ -362,7 +356,7 @@ resourceMetrics: - "0" - "0" - "0" - count: "2" + count: "17" explicitBounds: - 0 - 100 @@ -379,13 +373,68 @@ resourceMetrics: - 48000 - 64000 startTimeUnixNano: "1000000" - sum: 282 + sum: 1279 timeUnixNano: "1000000" metadata: - key: prometheus.type value: stringValue: histogram - name: coredns_dns_response_size_bytes + name: coredns_dns_request_size_bytes + - description: Histogram of the time (in seconds) each request took per zone. + histogram: + aggregationTemporality: 2 + dataPoints: + - attributes: + - key: server + value: + stringValue: dns://:53 + - key: zone + value: + stringValue: . + bucketCounts: + - "10" + - "1" + - "0" + - "0" + - "0" + - "3" + - "2" + - "0" + - "1" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + - "0" + count: "17" + explicitBounds: + - 0.00025 + - 0.0005 + - 0.001 + - 0.002 + - 0.004 + - 0.008 + - 0.016 + - 0.032 + - 0.064 + - 0.128 + - 0.256 + - 0.512 + - 1.024 + - 2.048 + - 4.096 + - 8.192 + startTimeUnixNano: "1000000" + sum: 0.07464508700000001 + timeUnixNano: "1000000" + metadata: + - key: prometheus.type + value: + stringValue: histogram + name: coredns_dns_request_duration_seconds scope: - name: otelcol/prometheusreceiver - version: v0.105.0 + name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver + version: v0.110.0 diff --git a/helm-charts/splunk-otel-collector/Chart.yaml b/helm-charts/splunk-otel-collector/Chart.yaml index 45d45dfaa..b838e378d 100644 --- a/helm-charts/splunk-otel-collector/Chart.yaml +++ b/helm-charts/splunk-otel-collector/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: splunk-otel-collector -version: 0.105.5 -appVersion: 0.105.0 +version: 0.113.0 +appVersion: 0.113.0 description: Splunk OpenTelemetry Collector for Kubernetes icon: https://github.com/signalfx/splunk-otel-collector-chart/tree/main/splunk.png type: application @@ -17,7 +17,7 @@ keywords: maintainers: - name: dmitryax - name: jvoravong - - name: emaderer + - name: atoulme dependencies: # Subchart Notes: # - Avoid uppercase letters in names/aliases, they cause install failure due to subchart resource naming @@ -28,7 +28,7 @@ dependencies: repository: https://charts.jetstack.io condition: certmanager.enabled - name: opentelemetry-operator - version: 0.49.1 + version: 0.71.2 alias: operator repository: https://open-telemetry.github.io/opentelemetry-helm-charts condition: operator.enabled diff --git a/helm-charts/splunk-otel-collector/templates/config/_common.tpl b/helm-charts/splunk-otel-collector/templates/config/_common.tpl index 5e6635b66..fd709415d 100644 --- a/helm-charts/splunk-otel-collector/templates/config/_common.tpl +++ b/helm-charts/splunk-otel-collector/templates/config/_common.tpl @@ -10,13 +10,15 @@ memory_limiter: {{- end }} {{/* -Common config for the otel-collector sapm exporter +Common config for the otel-collector otlphttp exporter */}} -{{- define "splunk-otel-collector.otelSapmExporter" -}} +{{- define "splunk-otel-collector.otlpHttpExporter" -}} {{- if (eq (include "splunk-otel-collector.tracesEnabled" .) "true") }} -sapm: - endpoint: {{ include "splunk-otel-collector.o11yIngestUrl" . }}/v2/trace - access_token: ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} +otlphttp: + metrics_endpoint: {{ include "splunk-otel-collector.o11yIngestUrl" . }}/v2/datapoint/otlp + traces_endpoint: {{ include "splunk-otel-collector.o11yIngestUrl" . }}/v2/trace/otlp + headers: + "X-SF-Token": ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} {{- end }} {{- end }} @@ -209,6 +211,36 @@ k8sattributes: {{- include "splunk-otel-collector.addExtraLabels" . | nindent 6 }} {{- end }} {{- end }} +{{- define "splunk-otel-collector.k8sClusterReceiverAttributesProcessor" -}} +k8sattributes/clusterReceiver: + pod_association: + - sources: + - from: resource_attribute + name: k8s.namespace.name + - sources: + - from: resource_attribute + name: k8s.node.name + extract: + metadata: + - k8s.namespace.name + - k8s.node.name + - k8s.pod.name + - k8s.pod.uid + - container.id + - container.image.name + - container.image.tag + {{- if eq (include "splunk-otel-collector.splunkPlatformEnabled" .) "true"}} + annotations: + - key: splunk.com/sourcetype + from: pod + - key: splunk.com/index + tag_name: com.splunk.index + from: namespace + - key: splunk.com/index + tag_name: com.splunk.index + from: pod + {{- end}} +{{- end }} {{/* Common config for K8s attributes processor adding k8s metadata to metrics resource attributes. @@ -404,6 +436,9 @@ splunk_hec/platform_metrics: token: "${SPLUNK_PLATFORM_HEC_TOKEN}" index: {{ .Values.splunkPlatform.metricsIndex | quote }} source: {{ .Values.splunkPlatform.source | quote }} + {{- if .Values.splunkPlatform.sourcetype }} + sourcetype: {{ .Values.splunkPlatform.sourcetype | quote }} + {{- end }} max_idle_conns: {{ .Values.splunkPlatform.maxConnections }} max_idle_conns_per_host: {{ .Values.splunkPlatform.maxConnections }} disable_compression: {{ .Values.splunkPlatform.disableCompression }} @@ -445,6 +480,9 @@ splunk_hec/platform_traces: token: "${SPLUNK_PLATFORM_HEC_TOKEN}" index: {{ .Values.splunkPlatform.tracesIndex | quote }} source: {{ .Values.splunkPlatform.source | quote }} + {{- if .Values.splunkPlatform.sourcetype }} + sourcetype: {{ .Values.splunkPlatform.sourcetype | quote }} + {{- end }} max_idle_conns: {{ .Values.splunkPlatform.maxConnections }} max_idle_conns_per_host: {{ .Values.splunkPlatform.maxConnections }} disable_compression: {{ .Values.splunkPlatform.disableCompression }} @@ -521,6 +559,5 @@ prometheus/{{ $receiver }}: - __name__ scrape_interval: 10s static_configs: - - targets: - - "${K8S_POD_IP}:8889" + - targets: [localhost:8889] {{- end }} diff --git a/helm-charts/splunk-otel-collector/templates/config/_otel-agent.tpl b/helm-charts/splunk-otel-collector/templates/config/_otel-agent.tpl index 710c37a71..501cb2771 100644 --- a/helm-charts/splunk-otel-collector/templates/config/_otel-agent.tpl +++ b/helm-charts/splunk-otel-collector/templates/config/_otel-agent.tpl @@ -40,16 +40,31 @@ receivers: endpoint: 0.0.0.0:8006 {{- end }} + # Placeholder receiver needed for discovery mode + nop: + # Prometheus receiver scraping metrics from the pod itself {{- include "splunk-otel-collector.prometheusInternalMetrics" "agent" | nindent 2}} {{- if (eq (include "splunk-otel-collector.metricsEnabled" .) "true") }} hostmetrics: collection_interval: 10s + {{- if not .Values.isWindows }} + root_path: "/hostfs" + {{- end }} scrapers: cpu: disk: filesystem: + # Collect metrics from the root filesystem only to avoid scraping errors since the collector + # doesn't have access to all filesystems on the host by default. To collect metrics from + # other devices, ensure that they are mounted to the collector container using + # agent.extraVolumeMounts and agent.extraVolumes helm values options and override this list + # using agent.config.hostmetrics.filesystem.include_mount_points.mount_points helm value. + include_mount_points: + match_type: strict + mount_points: + - "/" memory: network: # System load average metrics https://en.wikipedia.org/wiki/Load_(computing) @@ -667,6 +682,8 @@ processors: {{- include "splunk-otel-collector.otelMemoryLimiterConfig" . | nindent 2 }} batch: + metadata_keys: + - X-SF-Token # Resource detection processor is configured to override all host and cloud # attributes because OTel Collector Agent is the source of truth for all host @@ -775,7 +792,7 @@ exporters: {{- else }} # If gateway is disabled, data will be sent to directly to backends. {{- if (eq (include "splunk-otel-collector.o11yTracesEnabled" .) "true") }} - {{- include "splunk-otel-collector.otelSapmExporter" . | nindent 2 }} + {{- include "splunk-otel-collector.otlpHttpExporter" . | nindent 2 }} {{- end }} {{- if (eq (include "splunk-otel-collector.o11yLogsOrProfilingEnabled" .) "true") }} splunk_hec/o11y: @@ -821,10 +838,21 @@ exporters: send_otlp_histograms: true {{- end }} + # To send entities (applicable only if discovery mode is enabled) + otlphttp/entities: + logs_endpoint: {{ include "splunk-otel-collector.o11yIngestUrl" . }}/v3/event + headers: + "X-SF-Token": ${SPLUNK_OBSERVABILITY_ACCESS_TOKEN} + service: telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 extensions: {{- if and (eq (include "splunk-otel-collector.logsEnabled" .) "true") (eq .Values.logsEngine "otel") }} - file_storage @@ -952,7 +980,7 @@ service: - otlp {{- else }} {{- if (eq (include "splunk-otel-collector.o11yTracesEnabled" .) "true") }} - - sapm + - otlphttp {{- end }} {{- if (eq (include "splunk-otel-collector.platformTracesEnabled" .) "true") }} - splunk_hec/platform_traces @@ -1053,6 +1081,16 @@ service: exporters: - signalfx/histograms {{- end }} + + logs/entities: + # Receivers are added dinamically if discovery mode is enabled + receivers: [nop] + processors: + - memory_limiter + - batch + - resourcedetection + - resource + exporters: [otlphttp/entities] {{- end }} {{/* Discovery properties for the otel-collector agent diff --git a/helm-charts/splunk-otel-collector/templates/config/_otel-collector.tpl b/helm-charts/splunk-otel-collector/templates/config/_otel-collector.tpl index 232edfb41..16a31b00c 100644 --- a/helm-charts/splunk-otel-collector/templates/config/_otel-collector.tpl +++ b/helm-charts/splunk-otel-collector/templates/config/_otel-collector.tpl @@ -44,6 +44,8 @@ processors: {{- include "splunk-otel-collector.otelMemoryLimiterConfig" . | nindent 2 }} batch: + metadata_keys: + - X-SF-Token {{- include "splunk-otel-collector.resourceDetectionProcessor" . | nindent 2 }} {{- if eq (include "splunk-otel-collector.autoDetectClusterName" .) "true" }} @@ -104,7 +106,7 @@ exporters: {{- end }} {{- if (eq (include "splunk-otel-collector.o11yTracesEnabled" .) "true") }} - {{- include "splunk-otel-collector.otelSapmExporter" . | nindent 2 }} + {{- include "splunk-otel-collector.otlpHttpExporter" . | nindent 2 }} sending_queue: num_consumers: 32 {{- end }} @@ -135,7 +137,12 @@ exporters: service: telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 extensions: - health_check - zpages @@ -168,7 +175,7 @@ service: {{- end }} exporters: {{- if (eq (include "splunk-otel-collector.o11yTracesEnabled" .) "true") }} - - sapm + - otlphttp {{- end }} {{- if (eq (include "splunk-otel-collector.platformTracesEnabled" .) "true") }} - splunk_hec/platform_traces diff --git a/helm-charts/splunk-otel-collector/templates/config/_otel-k8s-cluster-receiver-config.tpl b/helm-charts/splunk-otel-collector/templates/config/_otel-k8s-cluster-receiver-config.tpl index ede63ac5e..56df2992e 100644 --- a/helm-charts/splunk-otel-collector/templates/config/_otel-k8s-cluster-receiver-config.tpl +++ b/helm-charts/splunk-otel-collector/templates/config/_otel-k8s-cluster-receiver-config.tpl @@ -117,6 +117,14 @@ processors: - set(resource.attributes["com.splunk.sourcetype"], Concat(["kube:object:", attributes["k8s.resource.name"]], "")) {{- end }} + {{- if or + (and $clusterReceiver.eventsEnabled (eq (include "splunk-otel-collector.logsEnabled" .) "true")) + (and (eq (include "splunk-otel-collector.objectsEnabled" .) "true") (eq (include "splunk-otel-collector.logsEnabled" .) "true")) + (eq (include "splunk-otel-collector.o11yInfraMonEventsEnabled" .) "true") + }} + {{- include "splunk-otel-collector.k8sClusterReceiverAttributesProcessor" . | nindent 2 }} + {{- end }} + # Resource attributes specific to the collector itself. resource/add_collector_k8s: attributes: @@ -204,7 +212,12 @@ exporters: service: telemetry: metrics: - address: 0.0.0.0:8889 + readers: + - pull: + exporter: + prometheus: + host: localhost + port: 8889 {{- if eq (include "splunk-otel-collector.distribution" .) "eks/fargate" }} extensions: [health_check, k8s_observer] {{- else }} @@ -290,6 +303,7 @@ service: {{- if .Values.environment }} - resource/add_environment {{- end }} + - k8sattributes/clusterReceiver exporters: {{- if (eq (include "splunk-otel-collector.o11yLogsEnabled" .) "true") }} - splunk_hec/o11y @@ -312,6 +326,7 @@ service: {{- if .Values.environment }} - resource/add_environment {{- end }} + - k8sattributes/clusterReceiver exporters: {{- if (eq (include "splunk-otel-collector.o11yLogsEnabled" .) "true") }} - splunk_hec/o11y @@ -333,6 +348,7 @@ service: {{- if .Values.clusterName }} - resource/add_event_k8s {{- end }} + - k8sattributes/clusterReceiver exporters: - signalfx {{- end }} diff --git a/helm-charts/splunk-otel-collector/templates/daemonset.yaml b/helm-charts/splunk-otel-collector/templates/daemonset.yaml index 470425547..0325315af 100644 --- a/helm-charts/splunk-otel-collector/templates/daemonset.yaml +++ b/helm-charts/splunk-otel-collector/templates/daemonset.yaml @@ -76,7 +76,7 @@ spec: {{- end }} {{- with .Values.tolerations }} tolerations: - {{ toYaml . | nindent 8 }} + {{- toYaml . | nindent 8 }} {{- end }} {{- if and (eq (include "splunk-otel-collector.logsEnabled" .) "true") (not .Values.isWindows) (not $agent.skipInitContainers) }} initContainers: @@ -315,28 +315,6 @@ spec: name: {{ include "splunk-otel-collector.secret" . }} key: splunk_platform_hec_token {{- end }} - {{- if eq (include "splunk-otel-collector.metricsEnabled" .) "true" }} - # Env variables for host metrics receiver - - name: HOST_PROC - value: {{ .Values.isWindows | ternary "C:\\hostfs\\proc" "/hostfs/proc" }} - - name: HOST_SYS - value: {{ .Values.isWindows | ternary "C:\\hostfs\\sys" "/hostfs/sys" }} - - name: HOST_ETC - value: {{ .Values.isWindows | ternary "C:\\hostfs\\etc" "/hostfs/etc" }} - - name: HOST_VAR - value: {{ .Values.isWindows | ternary "C:\\hostfs\\var" "/hostfs/var" }} - - name: HOST_RUN - value: {{ .Values.isWindows | ternary "C:\\hostfs\\run" "/hostfs/run" }} - - name: HOST_DEV - value: {{ .Values.isWindows | ternary "C:\\hostfs\\dev" "/hostfs/dev" }} - {{- if not .Values.isWindows }} - # until https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5879 - # is resolved fall back to previous gopsutil mountinfo path: - # https://github.com/shirou/gopsutil/issues/1271 - - name: HOST_PROC_MOUNTINFO - value: /proc/self/mountinfo - {{- end }} - {{- end }} {{- with $agent.extraEnvs }} {{- . | toYaml | nindent 10 }} {{- end }} diff --git a/helm-charts/splunk-otel-collector/templates/operator/instrumentation.yaml b/helm-charts/splunk-otel-collector/templates/operator/instrumentation.yaml index 3d4cfea47..859021146 100644 --- a/helm-charts/splunk-otel-collector/templates/operator/instrumentation.yaml +++ b/helm-charts/splunk-otel-collector/templates/operator/instrumentation.yaml @@ -13,6 +13,9 @@ metadata: release: {{ .Release.Name }} heritage: {{ .Release.Service }} app.kubernetes.io/component: otel-operator + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-weight": "5" spec: exporter: endpoint: {{ include "splunk-otel-collector.operator.instrumentation-exporter-endpoint" . }} diff --git a/helm-charts/splunk-otel-collector/values.yaml b/helm-charts/splunk-otel-collector/values.yaml index 4f7d94ff7..1b3580e96 100644 --- a/helm-charts/splunk-otel-collector/values.yaml +++ b/helm-charts/splunk-otel-collector/values.yaml @@ -47,7 +47,7 @@ splunkPlatform: # Optional. Default value for `source` field. source: "kubernetes" # Optional. Default value for `sourcetype` field. For container logs, it will - # be container name. + # be container name. For metrics and traces it will default to "httpevent". sourcetype: "" # Maximum HTTP connections to use simultaneously when sending data. maxConnections: 200 @@ -173,7 +173,7 @@ splunkObservability: # otel-collector agent for further processing. # - `otel`: utilize native OpenTelemetry log collection. # -# `fluentd` will be deprecated soon, so it's recommended to use `otel` instead. +# `fluentd` will be deprecated in October 2025, so it's recommended to use `otel` instead. ################################################################################ logsEngine: otel @@ -404,7 +404,7 @@ agent: service: # create a service for the agents with a local internalTrafficPolicy # so that agent pods can be discovered via dns etc - enabled: false + enabled: true # hostNetwork schedules the pod with the host's network namespace. # Disabling this value will affect monitoring of some control plane @@ -657,7 +657,7 @@ logsCollection: ################################################################################ # Fluentd sidecar configuration for logs collection. # Applicable only if "logsEngine: fluentd". -# Fluentd will be deprecated soon, so it's recommended to use "logsEngine: otel" instead. +# Fluentd logs engine is now deprecated and will reach End Of Support in October 2025, it is strongly recommended to use "logsEngine: otel" instead. ################################################################################ fluentd: @@ -958,6 +958,8 @@ image: otelcol: # The registry and name of the opentelemetry collector image to pull repository: quay.io/signalfx/splunk-otel-collector + # For the FIPS-140 enabled version, use this repository instead: + # repository: quay.io/signalfx/splunk-otel-collector-fips # The tag of the Splunk OTel Collector image, default value is the chart appVersion tag: "" # The policy that specifies when the user wants the opentelemetry collector images to be pulled @@ -1009,19 +1011,27 @@ secret: # Specifies whether secret provided by user should be validated. validateSecret: true -# This default tolerations allow the daemonset to be deployed on control-plane -# nodes, so that we can also collect logs and metrics from those nodes. +# The tolerations for deploying the agent collector daemonset. By default, it targets control-plane, worker, +# and k8s distribution-specific nodes (infrastructure or system) to ensure logs and metrics collection from nodes. tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule + operator: Exists - key: node-role.kubernetes.io/control-plane effect: NoSchedule + operator: Exists + - key: kubernetes.io/system-node + effect: NoSchedule + operator: Exists + - key: node-role.kubernetes.io/infra + effect: NoSchedule + operator: Exists -# Defines which nodes should be selected to deploy the o11y collector daemonset. +# Defines which nodes should be selected to deploy the agent collector daemonset. nodeSelector: {} terminationGracePeriodSeconds: 600 -# Defines node affinity to restrict deployment of the o11y collector daemonset. +# Defines node affinity to restrict deployment of the agent collector daemonset. affinity: {} # Defines priorityClassName to assign a priority class to pods. @@ -1175,6 +1185,11 @@ operator: issuerAnnotations: "helm.sh/hook": post-install,post-upgrade "helm.sh/hook-weight": "1" + # Collector deployment via the operator is not supported at this time. + # The collector image repository is specified here to meet operator subchart constraints. + manager: + collectorImage: + repository: quay.io/signalfx/splunk-otel-collector # The default Splunk Instrumentation object deployed when operator.enabled=true. # For more details see: @@ -1215,19 +1230,19 @@ instrumentation: # - env: (Optional) Allows you to add any additional environment variables. java: repository: ghcr.io/signalfx/splunk-otel-java/splunk-otel-java - tag: v2.7.0 + tag: v2.10.0 # env: # - name: JAVA_ENV_VAR # value: java_value nodejs: repository: ghcr.io/signalfx/splunk-otel-js/splunk-otel-js - tag: v2.11.0 + tag: v2.15.0 # env: # - name: NODEJS_ENV_VAR # value: nodejs_value dotnet: repository: ghcr.io/signalfx/splunk-otel-dotnet/splunk-otel-dotnet - tag: v1.6.0 + tag: v1.8.0 env: - name: OTEL_DOTNET_AUTO_PLUGINS value: Splunk.OpenTelemetry.AutoInstrumentation.Plugin,Splunk.OpenTelemetry.AutoInstrumentation diff --git a/test/unittests/operator_customized_test.yaml b/test/unittests/operator_customized_test.yaml index a03e608aa..f6701744a 100644 --- a/test/unittests/operator_customized_test.yaml +++ b/test/unittests/operator_customized_test.yaml @@ -10,7 +10,7 @@ tests: count: 1 - equal: path: spec.exporter.endpoint - value: http://RELEASE-NAME-splunk-otel-collector:4317 + value: http://RELEASE-NAME-splunk-otel-collector-agent.NAMESPACE.svc.cluster.local:4317 - contains: path: spec.env content: @@ -25,7 +25,7 @@ tests: path: spec.nodejs.env content: name: OTEL_EXPORTER_OTLP_ENDPOINT - value: "http://$(NODEJS_OTEL_AGENT):4318" + value: "http://SOME-OTHER-RELEASE-NAME-splunk-otel-collector-agent.NAMESPACE.svc.cluster.local:4318" - contains: path: spec.nodejs.env content: diff --git a/test/unittests/operator_default_test.yaml b/test/unittests/operator_default_test.yaml index 79e8bf064..fdc6dd439 100644 --- a/test/unittests/operator_default_test.yaml +++ b/test/unittests/operator_default_test.yaml @@ -10,7 +10,7 @@ tests: count: 1 - equal: path: spec.exporter.endpoint - value: http://$(SPLUNK_OTEL_AGENT):4317 + value: http://RELEASE-NAME-splunk-otel-collector-agent.NAMESPACE.svc.cluster.local:4317 - contains: path: spec.propagators content: tracecontext @@ -20,14 +20,6 @@ tests: - contains: path: spec.propagators content: b3 - - contains: - path: spec.env - content: - name: SPLUNK_OTEL_AGENT - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.hostIP - contains: path: spec.dotnet.env content: @@ -37,17 +29,9 @@ tests: path: spec.dotnet.env content: name: OTEL_EXPORTER_OTLP_ENDPOINT - value: http://$(SPLUNK_OTEL_AGENT):4318 - - contains: - path: spec.env - content: - name: SPLUNK_OTEL_AGENT - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: status.hostIP + value: http://RELEASE-NAME-splunk-otel-collector-agent.NAMESPACE.svc.cluster.local:4318 - contains: path: spec.java.env content: name: OTEL_EXPORTER_OTLP_ENDPOINT - value: http://$(SPLUNK_OTEL_AGENT):4318 + value: http://RELEASE-NAME-splunk-otel-collector-agent.NAMESPACE.svc.cluster.local:4318 diff --git a/test/unittests/service_account_test.yaml b/test/unittests/service_account_test.yaml new file mode 100644 index 000000000..0a4344944 --- /dev/null +++ b/test/unittests/service_account_test.yaml @@ -0,0 +1,112 @@ +suite: splunk-otel-collector.serviceAccount +values: + - ./values/basic.yaml +templates: + - serviceAccount.yaml + - deployment-cluster-receiver.yaml + - configmap-cluster-receiver.yaml + - daemonset.yaml + - configmap-fluentd.yaml + - configmap-agent.yaml +release: + name: test-release +tests: + - it: should render serviceaccount correctly + set: + image: + imagePullSecrets: + - secret1 + - secret2 + serviceAccount: + create: true + annotations: + key1: value1 + featureGates: + explicitMountServiceAccountToken: true # automount should be false + template: serviceAccount.yaml + asserts: + - isKind: + of: ServiceAccount + template: serviceAccount.yaml + - equal: + path: metadata.name + value: "test-release-splunk-otel-collector" + - equal: + path: metadata.annotations + value: + key1: value1 + - equal: + path: automountServiceAccountToken + value: false + - equal: + path: imagePullSecrets + value: + - name: secret1 + - name: secret2 + - it: should add default serviceAccountName to deployment and daemonset + set: + serviceAccount: + create: true + templates: + - deployment-cluster-receiver.yaml + - daemonset.yaml + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: "test-release-splunk-otel-collector" + template: deployment-cluster-receiver.yaml + - equal: + path: spec.template.spec.serviceAccountName + value: "test-release-splunk-otel-collector" + template: daemonset.yaml + - it: should not render serviceaccount + set: + serviceAccount: + create: false + template: serviceAccount.yaml + asserts: + - hasDocuments: + count: 0 + - it: should set custom service account name correctly in serviceaccount, deployment and daemonset + set: + serviceAccount: + create: true + name: custom-service-account + templates: + - deployment-cluster-receiver.yaml + - daemonset.yaml + - serviceAccount.yaml + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: custom-service-account + template: deployment-cluster-receiver.yaml + - equal: + path: spec.template.spec.serviceAccountName + value: custom-service-account + template: daemonset.yaml + - equal: + path: metadata.name + value: custom-service-account + template: serviceAccount.yaml + - it: should set custom service account name correctly in deployment and daemonset with no serviceaccount + set: + serviceAccount: + create: false + name: custom-service-account + templates: + - deployment-cluster-receiver.yaml + - daemonset.yaml + - serviceAccount.yaml + asserts: + - equal: + path: spec.template.spec.serviceAccountName + value: custom-service-account + template: deployment-cluster-receiver.yaml + - equal: + path: spec.template.spec.serviceAccountName + value: custom-service-account + template: daemonset.yaml + - hasDocuments: + count: 0 + template: serviceAccount.yaml \ No newline at end of file diff --git a/test/unittests/values/operator_customized.yaml b/test/unittests/values/operator_customized.yaml index 743c597fd..1ea1d9a31 100644 --- a/test/unittests/values/operator_customized.yaml +++ b/test/unittests/values/operator_customized.yaml @@ -20,7 +20,7 @@ instrumentation: nodejs: env: - name: OTEL_EXPORTER_OTLP_ENDPOINT - value: http://$(NODEJS_OTEL_AGENT):4318 + value: http://SOME-OTHER-RELEASE-NAME-splunk-otel-collector-agent.NAMESPACE.svc.cluster.local:4318 - name: SPLUNK_PROFILER_ENABLED value: "false" - name: SPLUNK_PROFILER_MEMORY_ENABLED diff --git a/tools/splunk_kubernetes_debug_info.sh b/tools/splunk_kubernetes_debug_info.sh new file mode 100755 index 000000000..826ca55f1 --- /dev/null +++ b/tools/splunk_kubernetes_debug_info.sh @@ -0,0 +1,343 @@ +#!/bin/bash + +# Description: +# This script collects debugging information from a Kubernetes cluster. +# It retrieves networking, firewall, security policies, custom resource definitions (CRDs), +# and logs from specified pods. The outputs are saved to files for each namespace and object type. +# This helps in diagnosing and troubleshooting cluster configurations. +# Finally, it compresses all the collected files into a ZIP archive. +# +# Input Parameters: +# - NAMESPACES: Comma-separated list of namespaces to collect data from. If not specified, the script collects data from all namespaces. +# - K8S_OBJECT_NAME_FILTER: Filter for Kubernetes object names (default: 'splunk|collector|otel|certmanager|test|sck|sock'). +# +# Usage: +# 1. Ensure you have `kubectl`, `yq`, and `helm` installed and configured to access your Kubernetes cluster. +# 2. Save the script to a file called `splunk_kubernetes_debug_info.sh`. +# 3. Make the script executable: +# chmod +x splunk_kubernetes_debug_info.sh +# 4. Run the script: +# 4.1. Via Terminal and Curl: +# curl -s https://raw.githubusercontent.com/signalfx/splunk-otel-collector-chart/main/tools/splunk_kubernetes_debug_info.sh | bash +# 4.2. Via Terminal and Local Code: +# ./splunk_kubernetes_debug_info.sh [NAMESPACES=namespace1,namespace2,...] [K8S_OBJECT_NAME_FILTER=splunk|collector|otel|certmanager|test|sck|sock|customname] +# Note: If no namespaces are specified, the script will collect information from all namespaces. +# Sensitive Data Handling: +# The script attempts to redact sensitive information where possible, including tokens, passwords, and certificates. +# However, users should review the files for any sensitive data before sharing. +# +# Objects Scraped: +# - Pod logs for agent, cluster-receiver, certmanager, operator, gateway, splunk pods +# - Deployments, daemonsets, Helm releases matching K8S_OBJECT_NAME_FILTER +# - NetworkPolicies, Services, Ingress resources, Endpoints, Roles, RoleBindings, Security contexts +# - OpenTelemetry Instrumentation objects +# - Custom Resource Definitions (CRDs), Pod Security Policies (PSPs), Security Context Constraints (SCCs) +# - Cert-manager related objects +# - MutatingWebhookConfiguration objects + +# Helper function to write output to a file +write_output() { + local output="$1" + local file_name="$2" + local cmd="$3" + + # Check if output is empty, starts with "No resources found", or "error" + if [[ -z "$output" || "$output" == "No resources found"* || "$output" == "error"* || "$output" == "Error"* ]]; then + echo "[$(date)] Skipping $file_name: $output" >> "$temp_dir/errors.txt" + return + fi + + # Check if output is in YAML format + if echo "$output" | yq eval '.' - > /dev/null 2>&1; then + # Check if output contains empty list using yq + if [[ $(echo "$output" | yq eval '.kind' -) == "List" ]] && [[ $(echo "$output" | yq eval '.items | length' -) -eq 0 ]]; then + echo "[$(date)] Skipping $file_name: Empty list" >> "$temp_dir/errors.txt" + return + fi + fi + + # Redact sensitive information + output=$(echo "$output" | awk ' + /BEGIN CERTIFICATE/,/END CERTIFICATE/ { + if (/BEGIN CERTIFICATE/) print; + else if (/END CERTIFICATE/) print; + else print " [CERTIFICATE REDACTED]"; + next; + } + /ca\.crt|client\.crt|client\.key|tls\.crt|tls\.key/ { + print " [SENSITIVE DATA REDACTED]"; + next; + } + /[Tt][Oo][Kk][Ee][Nn]/ { + print " [TOKEN REDACTED]"; + next; + } + /[Pp][Aa][Ss][Ss][Ww][Oo][Rr][Dd]/ { + print " [PASSWORD REDACTED]"; + next; + } + {print}') + + # Write command and output to file + echo "# Command: $cmd" > "$file_name" + echo "$output" >> "$file_name" +} + +# Function to collect data for a given namespace +collect_data_namespace() { + local ns=$1 + + object_types=("configmaps" "daemonsets" "deployments" "endpoints" "events" "ingress" "jobs" "networkpolicies" "otelinst" "rolebindings" "roles" "svc") + for type in "${object_types[@]}"; do + stdbuf -oL echo "Collecting $type data for $ns namespace with $k8s_object_name_filter name filter" + if [[ "$type" == "deployment" || "$type" == "daemonset" || "$type" == "configmaps" ]]; then + kubectl get "$type" -n "$ns" -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep -E "$k8s_object_name_filter" | while read object; do + cmd="kubectl get $type $object -n $ns -o yaml" + output=$(eval "$cmd") + write_output "$output" "$temp_dir/namespace_${ns}_${type}_${object}.yaml" "$cmd" + done + else + kubectl get "$type" -n "$ns" -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | while read object; do + cmd="kubectl get $type $object -n $ns -o yaml" + output=$(eval "$cmd") + write_output "$output" "$temp_dir/namespace_${ns}_${type}_${object}.yaml" "$cmd" + done + fi + done + + # Collect logs from specific pods + pods=$(kubectl get pods -n "$ns" -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep -E "$k8s_object_name_filter") + # Collect logs from a single agent pod + agent_pod=$(echo "$pods" | grep "agent" | head -n 1) + if [ -n "$agent_pod" ]; then + cmd="kubectl logs $agent_pod -n $ns" + output=$(eval "$cmd") + write_output "$output" "$temp_dir/namespace_${ns}_logs_pod_${agent_pod}.log" "$cmd" + pods=$(echo "$pods" | grep -v "$agent_pod") + fi + + # Collect logs from a single cluster-receiver pod + cluster_receiver_pod=$(echo "$pods" | grep "cluster-receiver" | head -n 1) + if [ -n "$cluster_receiver_pod" ]; then + cmd="kubectl logs $cluster_receiver_pod -n $ns" + output=$(eval "$cmd") + write_output "$output" "$temp_dir/namespace_${ns}_logs_pod_${cluster_receiver_pod}.log" "$cmd" + pods=$(echo "$pods" | grep -v "$cluster_receiver_pod") + fi + + # Collect logs from all certmanager pods + certmanager_pods=$(echo "$pods" | grep "certmanager") + for pod in $certmanager_pods; do + cmd="kubectl logs $pod -n $ns" + output=$(eval "$cmd") + write_output "$output" "$temp_dir/namespace_${ns}_logs_pod_${pod}.log" "$cmd" + done + pods=$(echo "$pods" | grep -v "certmanager") + + # Collect logs from all operator pods + operator_pods=$(echo "$pods" | grep "operator") + for pod in $operator_pods; do + cmd="kubectl logs $pod -n $ns" + output=$(eval "$cmd") + write_output "$output" "$temp_dir/namespace_${ns}_logs_pod_${pod}.log" "$cmd" + done + pods=$(echo "$pods" | grep -v "operator") + + # Collect logs from a single Splunk pod + splunk_pod=$(kubectl get pods -n "$ns" -l app=splunk -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -n "$splunk_pod" ]; then + echo "Getting logs for pod $splunk_pod in namespace ${ns}" + cmd="kubectl logs -n ${ns} $splunk_pod" + output=$(eval "$cmd") + write_output "$output" "$temp_dir/namespace_${ns}_logs_pod_${splunk_pod}.log" "$cmd" + fi + + # Collect pod spec and logs for specific annotations + annotations=( + "instrumentation.opentelemetry.io/inject-java" + "instrumentation.opentelemetry.io/inject-python" + "instrumentation.opentelemetry.io/inject-dotnet" + "instrumentation.opentelemetry.io/inject-go" + "instrumentation.opentelemetry.io/inject-nodejs" + "instrumentation.opentelemetry.io/inject-nginx" + "instrumentation.opentelemetry.io/inject-sdk" + "instrumentation.opentelemetry.io/inject-apache-httpd" + ) + + for annotation in "${annotations[@]}"; do + pod_with_annotation=$(kubectl get pods -n "$ns" -o jsonpath="{range .items[?(@.metadata.annotations['$annotation'])]}{.metadata.name}{'\n'}{end}" | head -n 1) + if [ -n "$pod_with_annotation" ]; then + cmd="kubectl get pod $pod_with_annotation -n $ns -o yaml" + output=$(eval "$cmd") + write_output "$output" "$temp_dir/namespace_${ns}_pod_spec_${pod_with_annotation}.yaml" "$cmd" + cmd="kubectl logs $pod_with_annotation -n $ns" + output=$(eval "$cmd") + write_output "$output" "$temp_dir/namespace_${ns}_logs_pod_${pod_with_annotation}.log" "$cmd" + fi + done +} + +# Function to collect cluster-wide data +collect_data_cluster() { + echo "Collecting cluster-wide data..." + + echo "Basic Cluster Configurations:" >> "$output_file" + echo "Cluster Name: $(kubectl config view --minify -o jsonpath='{.clusters[].name}')" >> "$output_file" + echo "Kubernetes Version:" >> "$output_file" + kubectl version >> "$output_file" + echo "Number of Namespaces:" >> "$output_file" + kubectl get namespaces | wc -l >> "$output_file" + echo "Namespaces: $(kubectl get namespaces -o jsonpath='{.items[*].metadata.name}')" >> "$output_file" + echo "Number of Running Nodes:" >> "$output_file" + kubectl get nodes | wc -l >> "$output_file" + echo "Number of Running Pods:" >> "$output_file" + kubectl get pods --all-namespaces --field-selector=status.phase=Running | wc -l >> "$output_file" + echo "Splunk Related Pods:" >> "$output_file" + kubectl get pods --all-namespaces | (head -n 1 && grep -E "$k8s_object_name_filter") >> "$output_file" + echo "---" >> "$output_file" + + echo "Collecting custom resource definitions..." + cmd="kubectl get crds -o yaml" + output=$(eval "$cmd") + write_output "$output" "$temp_dir/cluster_custom_resource_definitions.yaml" "$cmd" + + echo "Checking for cert-manager installation..." + cert_manager_pods=$(kubectl get pods --all-namespaces -l app=cert-manager --no-headers) + if [ -n "$cert_manager_pods" ]; then + echo "Cert-manager is installed. Collecting related objects..." + cmd="kubectl get Issuers,ClusterIssuers,Certificates,CertificateRequests,Orders,Challenges --all-namespaces -o yaml; kubectl describe Issuers,ClusterIssuers,Certificates,CertificateRequests,Orders,Challenges --all-namespaces" + output=$(eval "$cmd") + write_output "$output" "$temp_dir/cluster_cert_manager_objects.yaml" "$cmd" + fi + + echo "Collecting Helm values for relevant releases..." + helm list -A | grep -E "$k8s_object_name_filter" | awk '{print $1, $2}' | while read release namespace; do + cmd="helm get values $release -n $namespace" + output=$(eval "$cmd") + write_output "$output" "$temp_dir/helm_values_${release}_${namespace}.yaml" "$cmd" + done +} + +collect_cluster_resources() { + # List of cluster-scoped resource types to collect + cluster_object_types=( + "crds" + "psp" + "scc" + "mutatingwebhookconfiguration.admissionregistration.k8s.io" + "validatingwebhookconfiguration.admissionregistration.k8s.io" + ) + + for type in "${cluster_object_types[@]}"; do + echo "Collecting $type cluster-scoped resources..." + + # Fetch each object's name + kubectl get "$type" -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | while read object; do + # Get the API version for this object, fallback to "unknown" + api_version=$(kubectl get "$type" "$object" -o jsonpath='{.apiVersion}' 2>/dev/null || echo "unknown") + api_version=${api_version//\//_} # Sanitize slashes in API version + + # Collect YAML output + cmd="kubectl get $type $object -o yaml" + output=$(eval "$cmd") + write_output "$output" "$temp_dir/cluster_${type//./_}_${api_version}_${object}.yaml" "$cmd" + done + done +} + +# Parse input parameters +namespaces="" +k8s_object_name_filter="splunk|collector|otel|certmanager|test|sck|sock" + +for arg in "$@"; do + case $arg in + NAMESPACES=*) + namespaces="${arg#*=}" + ;; + K8S_OBJECT_NAME_FILTER=*) + k8s_object_name_filter="${arg#*=}" + ;; + *) + echo "Unknown parameter: $arg" + exit 1 + ;; + esac +done + +# Collect data from all namespaces if no namespaces are specified +if [[ -z "$namespaces" ]]; then + # Get all namespaces and convert the string into an array + IFS=' ' read -r -a namespaces_array <<< "$(kubectl get namespaces -o jsonpath='{.items[*].metadata.name}')" +else + # Split the specified namespaces string into an array + IFS=',' read -r -a namespaces_array <<< "$namespaces" +fi + +echo "Namespaces: ${namespaces_array[@]}" +echo "Kubernetes object name filter: $k8s_object_name_filter" + +# Create a temporary directory with a unique name +temp_dir=$(mktemp -d -t splunk_kubernetes_debug_info_XXXXXX) +if [[ ! -d "$temp_dir" ]]; then + echo "Failed to create temporary directory" + exit 1 +fi + +# Output file for basic cluster information +output_file="$temp_dir/cluster.txt" + +# Print script start time +script_start_time=$(date +"%Y-%m-%d %H:%M:%S") +echo "Script start time: $script_start_time" +echo "Script start time: $script_start_time" >> "$output_file" + +# Collect cluster instance specific data +collect_data_cluster + +# Collect cluster scoped resources data +collect_cluster_resources + +# Function to manage parallel processing of namespaces +collect_data_namespace_namespaces() { + local parallelism=20 + local pids=() + + for ns in "${namespaces_array[@]}"; do + collect_data_namespace "$ns" & + pids+=($!) + + if [[ ${#pids[@]} -ge $parallelism ]]; then + for pid in "${pids[@]}"; do + wait "$pid" + done + pids=() + fi + done + + # Wait for any remaining background processes to complete + for pid in "${pids[@]}"; do + wait "$pid" + done +} + +# Process namespaces in parallel +collect_data_namespace_namespaces + +# Print script end time +script_end_time=$(date +"%Y-%m-%d %H:%M:%S") +echo "Script end time: $script_end_time" +echo "Script end time: $script_end_time" >> "$output_file" + +# Create a ZIP archive of all the collected YAML files +output_zip="splunk_kubernetes_debug_info_$(date +%Y%m%d_%H%M%S).zip" +echo "Creating ZIP archive: $output_zip" + +# Find and delete empty files before creating the ZIP archive +find "$temp_dir" -type f -empty -delete + +zip -j -r "$output_zip" "$temp_dir" + +# Clean up the temporary directory +rm -rf "$temp_dir" + +echo "Data collection complete. Output files are available in the ZIP archive: $output_zip"