From ef1bce5cfc693a9e1b015b98a96757738eb8aee9 Mon Sep 17 00:00:00 2001 From: Milton Hultgren Date: Wed, 13 Dec 2023 14:28:54 +0100 Subject: [PATCH 001/129] [beat] Set cluster UUID in state metricset correctly (#37419) * [beat] Set cluster UUID in state metricset correctly * Add test * Fix file ending with new line * Fix spacing error --- .../beat/state/_meta/test/uuid_es_output.json | 43 +++++++++ .../test/uuid_es_output_pre_connect.json | 43 +++++++++ .../_meta/test/uuid_monitoring_config.json | 52 +++++++++++ .../_meta/test/uuid_no_monitoring_config.json | 52 +++++++++++ metricbeat/module/beat/state/data.go | 11 +-- metricbeat/module/beat/state/data_test.go | 89 +++++++++++++++++++ 6 files changed, 283 insertions(+), 7 deletions(-) create mode 100644 metricbeat/module/beat/state/_meta/test/uuid_es_output.json create mode 100644 metricbeat/module/beat/state/_meta/test/uuid_es_output_pre_connect.json create mode 100644 metricbeat/module/beat/state/_meta/test/uuid_monitoring_config.json create mode 100644 metricbeat/module/beat/state/_meta/test/uuid_no_monitoring_config.json diff --git a/metricbeat/module/beat/state/_meta/test/uuid_es_output.json b/metricbeat/module/beat/state/_meta/test/uuid_es_output.json new file mode 100644 index 000000000000..3cabff1ad069 --- /dev/null +++ b/metricbeat/module/beat/state/_meta/test/uuid_es_output.json @@ -0,0 +1,43 @@ +{ + "beat": { + "name": "Shaunaks-MBP-2" + }, + "host": { + "architecture": "x86_64", + "hostname": "Shaunaks-MBP-2", + "id": "EF6274EA-462F-5316-A14A-850E7BFD8126", + "os": { + "build": "18F132", + "family": "darwin", + "kernel": "18.6.0", + "name": "Mac OS X", + "platform": "darwin", + "version": "10.14.5" + } + }, + "management": { + "enabled": false + }, + "module": { + "count": 3, + "names": [ + "system" + ] + }, + "output": { + "name": "elasticsearch" + }, + "outputs": { + "elasticsearch": { + "cluster_uuid": "uuid_from_es_output" + } + }, + "queue": { + "name": "mem" + }, + "service": { + "id": "1f0c187b-f2ef-4950-b9cc-dd6864b9191a", + "name": "metricbeat", + "version": "8.0.0" + } +} diff --git a/metricbeat/module/beat/state/_meta/test/uuid_es_output_pre_connect.json b/metricbeat/module/beat/state/_meta/test/uuid_es_output_pre_connect.json new file mode 100644 index 000000000000..557b4163d359 --- /dev/null +++ b/metricbeat/module/beat/state/_meta/test/uuid_es_output_pre_connect.json @@ -0,0 +1,43 @@ +{ + "beat": { + "name": "Shaunaks-MBP-2" + }, + "host": { + "architecture": "x86_64", + "hostname": "Shaunaks-MBP-2", + "id": "EF6274EA-462F-5316-A14A-850E7BFD8126", + "os": { + "build": "18F132", + "family": "darwin", + "kernel": "18.6.0", + "name": "Mac OS X", + "platform": "darwin", + "version": "10.14.5" + } + }, + "management": { + "enabled": false + }, + "module": { + "count": 3, + "names": [ + "system" + ] + }, + "output": { + "name": "elasticsearch" + }, + "outputs": { + "elasticsearch": { + "cluster_uuid": "" + } + }, + "queue": { + "name": "mem" + }, + "service": { + "id": "1f0c187b-f2ef-4950-b9cc-dd6864b9191a", + "name": "metricbeat", + "version": "8.0.0" + } +} diff --git a/metricbeat/module/beat/state/_meta/test/uuid_monitoring_config.json b/metricbeat/module/beat/state/_meta/test/uuid_monitoring_config.json new file mode 100644 index 000000000000..1464e65b6402 --- /dev/null +++ b/metricbeat/module/beat/state/_meta/test/uuid_monitoring_config.json @@ -0,0 +1,52 @@ +{ + "beat": { + "name": "MacBook-Pro" + }, + "host": { + "architecture": "arm64", + "hostname": "MacBook-Pro", + "id": "B0C1E948-241E-53FD-A6A3-0D0F352403AF", + "os": { + "build": "23B92", + "family": "darwin", + "kernel": "23.1.0", + "name": "macOS", + "platform": "darwin", + "version": "14.1.2" + } + }, + "input": { + "count": 1, + "names": [ + "log" + ] + }, + "management": { + "enabled": false + }, + "module": { + "count": 0, + "names": [] + }, + "monitoring": { + "cluster_uuid": "uuid_from_monitoring_config" + }, + "output": { + "batch_size": 2048, + "clients": 1, + "name": "logstash" + }, + "outputs": { + "elasticsearch": { + "cluster_uuid": "" + } + }, + "queue": { + "name": "mem" + }, + "service": { + "id": "8d6af17b-cf55-4029-bd22-64cd538acbd0", + "name": "filebeat", + "version": "8.13.0" + } +} diff --git a/metricbeat/module/beat/state/_meta/test/uuid_no_monitoring_config.json b/metricbeat/module/beat/state/_meta/test/uuid_no_monitoring_config.json new file mode 100644 index 000000000000..c204e26d2491 --- /dev/null +++ b/metricbeat/module/beat/state/_meta/test/uuid_no_monitoring_config.json @@ -0,0 +1,52 @@ +{ + "beat": { + "name": "MacBook-Pro" + }, + "host": { + "architecture": "arm64", + "hostname": "MacBook-Pro", + "id": "B0C1E948-241E-53FD-A6A3-0D0F352403AF", + "os": { + "build": "23B92", + "family": "darwin", + "kernel": "23.1.0", + "name": "macOS", + "platform": "darwin", + "version": "14.1.2" + } + }, + "input": { + "count": 1, + "names": [ + "log" + ] + }, + "management": { + "enabled": false + }, + "module": { + "count": 0, + "names": [] + }, + "monitoring": { + "cluster_uuid": "" + }, + "output": { + "batch_size": 2048, + "clients": 1, + "name": "logstash" + }, + "outputs": { + "elasticsearch": { + "cluster_uuid": "" + } + }, + "queue": { + "name": "mem" + }, + "service": { + "id": "8d6af17b-cf55-4029-bd22-64cd538acbd0", + "name": "filebeat", + "version": "8.13.0" + } +} diff --git a/metricbeat/module/beat/state/data.go b/metricbeat/module/beat/state/data.go index b555c84bd402..5d8231046043 100644 --- a/metricbeat/module/beat/state/data.go +++ b/metricbeat/module/beat/state/data.go @@ -77,22 +77,19 @@ func eventMapping(r mb.ReporterV2, info beat.Info, content []byte, isXpack bool) return fmt.Errorf("failure parsing Beat's State API response: %w", err) } - event.MetricSetFields, _ = schema.Apply(data) - clusterUUID := getMonitoringClusterUUID(data) if clusterUUID == "" { if isOutputES(data) { clusterUUID = getClusterUUID(data) - if clusterUUID != "" { - event.ModuleFields.Put("elasticsearch.cluster.id", clusterUUID) - if event.MetricSetFields != nil { - event.MetricSetFields.Put("cluster.uuid", clusterUUID) - } + if clusterUUID == "" { + return nil } } } + event.ModuleFields.Put("elasticsearch.cluster.id", clusterUUID) + event.MetricSetFields, _ = schema.Apply(data) if event.MetricSetFields != nil { diff --git a/metricbeat/module/beat/state/data_test.go b/metricbeat/module/beat/state/data_test.go index 6123b7539b7e..4d94a3f24da9 100644 --- a/metricbeat/module/beat/state/data_test.go +++ b/metricbeat/module/beat/state/data_test.go @@ -53,3 +53,92 @@ func TestEventMapping(t *testing.T) { require.Equal(t, 0, len(reporter.GetErrors()), f) } } + +func TestUuidFromEsOutput(t *testing.T) { + reporter := &mbtest.CapturingReporterV2{} + + info := beat.Info{ + UUID: "1234", + Beat: "testbeat", + } + + input, err := ioutil.ReadFile("./_meta/test/uuid_es_output.json") + require.NoError(t, err) + + err = eventMapping(reporter, info, input, true) + require.NoError(t, err) + require.True(t, len(reporter.GetEvents()) >= 1) + require.Equal(t, 0, len(reporter.GetErrors())) + + event := reporter.GetEvents()[0] + + uuid, err := event.ModuleFields.GetValue("elasticsearch.cluster.id") + require.NoError(t, err) + + require.Equal(t, "uuid_from_es_output", uuid) +} + +func TestNoEventIfEsOutputButNoUuidYet(t *testing.T) { + reporter := &mbtest.CapturingReporterV2{} + + info := beat.Info{ + UUID: "1234", + Beat: "testbeat", + } + + input, err := ioutil.ReadFile("./_meta/test/uuid_es_output_pre_connect.json") + require.NoError(t, err) + + err = eventMapping(reporter, info, input, true) + require.NoError(t, err) + require.Equal(t, 0, len(reporter.GetEvents())) + require.Equal(t, 0, len(reporter.GetErrors())) +} + +func TestUuidFromMonitoringConfig(t *testing.T) { + reporter := &mbtest.CapturingReporterV2{} + + info := beat.Info{ + UUID: "1234", + Beat: "testbeat", + } + + input, err := ioutil.ReadFile("./_meta/test/uuid_monitoring_config.json") + require.NoError(t, err) + + err = eventMapping(reporter, info, input, true) + require.NoError(t, err) + require.True(t, len(reporter.GetEvents()) >= 1) + require.Equal(t, 0, len(reporter.GetErrors())) + + event := reporter.GetEvents()[0] + + uuid, err := event.ModuleFields.GetValue("elasticsearch.cluster.id") + require.NoError(t, err) + + require.Equal(t, "uuid_from_monitoring_config", uuid) +} + +func TestNoUuidInMonitoringConfig(t *testing.T) { + reporter := &mbtest.CapturingReporterV2{} + + info := beat.Info{ + UUID: "1234", + Beat: "testbeat", + } + + input, err := ioutil.ReadFile("./_meta/test/uuid_no_monitoring_config.json") + require.NoError(t, err) + + err = eventMapping(reporter, info, input, true) + require.NoError(t, err) + require.True(t, len(reporter.GetEvents()) >= 1) + require.Equal(t, 0, len(reporter.GetErrors())) + + event := reporter.GetEvents()[0] + + uuid, err := event.ModuleFields.GetValue("elasticsearch.cluster.id") + require.NoError(t, err) + + require.Equal(t, "", uuid) +} From 6c5d221596c64c0cd25ecf02cf4387171947b977 Mon Sep 17 00:00:00 2001 From: Vignesh Shanmugam Date: Wed, 13 Dec 2023 07:40:38 -0800 Subject: [PATCH 002/129] fix: adjust formatting for heartbeat monitor and state loader (#37427) * fix: adjust formatting for heartbeat monitor and state loader * reset ES error --- CHANGELOG.next.asciidoc | 1 + heartbeat/monitors/active/icmp/stdloop.go | 2 +- heartbeat/monitors/monitor.go | 2 +- heartbeat/monitors/task.go | 6 +++--- heartbeat/monitors/wrappers/monitorstate/tracker.go | 2 +- x-pack/heartbeat/monitors/browser/sourcejob.go | 2 +- x-pack/heartbeat/monitors/browser/synthexec/synthexec.go | 2 +- x-pack/heartbeat/monitors/browser/synthexec/synthtypes.go | 2 +- 8 files changed, 10 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 2add786f8e1e..66a2ad9c40c3 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -86,6 +86,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Heartbeat* - Fix panics when parsing dereferencing invalid parsed url. {pull}34702[34702] +- Added fix for formatting the logs from stateloader properly. {pull}37369[37369] *Metricbeat* diff --git a/heartbeat/monitors/active/icmp/stdloop.go b/heartbeat/monitors/active/icmp/stdloop.go index b49d243ec4c6..f67ae402bc70 100644 --- a/heartbeat/monitors/active/icmp/stdloop.go +++ b/heartbeat/monitors/active/icmp/stdloop.go @@ -165,7 +165,7 @@ func (l *stdICMPLoop) runICMPRecv(conn *icmp.PacketConn, proto int) { bytes := make([]byte, 512) err := conn.SetReadDeadline(time.Now().Add(time.Second)) if err != nil { - logp.L().Error("could not set read deadline for ICMP: %w", err) + logp.L().Errorf("could not set read deadline for ICMP: %w", err) return } _, addr, err := conn.ReadFrom(bytes) diff --git a/heartbeat/monitors/monitor.go b/heartbeat/monitors/monitor.go index 6a16c7d3f303..29e7713145ca 100644 --- a/heartbeat/monitors/monitor.go +++ b/heartbeat/monitors/monitor.go @@ -256,7 +256,7 @@ func (m *Monitor) Stop() { if m.close != nil { err := m.close() if err != nil { - logp.L().Error("error closing monitor %s: %w", m.String(), err) + logp.L().Errorf("error closing monitor %s: %w", m.String(), err) } } diff --git a/heartbeat/monitors/task.go b/heartbeat/monitors/task.go index ee0839fe14e5..a655e1d15467 100644 --- a/heartbeat/monitors/task.go +++ b/heartbeat/monitors/task.go @@ -80,13 +80,13 @@ func (t *configuredJob) Start(pubClient beat.Client) { t.pubClient = pubClient if err != nil { - logp.L().Info("could not start monitor: %v", err) + logp.L().Infof("could not start monitor: %v", err) return } t.cancelFn, err = t.monitor.addTask(t.config.Schedule, t.monitor.stdFields.ID, t.makeSchedulerTaskFunc(), t.config.Type) if err != nil { - logp.L().Info("could not start monitor: %v", err) + logp.L().Infof("could not start monitor: %v", err) } } @@ -107,7 +107,7 @@ func runPublishJob(job jobs.Job, pubClient beat.Client) []scheduler.TaskFunc { conts, err := job(event) if err != nil { - logp.L().Info("Job failed with: %s", err) + logp.L().Infof("Job failed with: %s", err) } hasContinuations := len(conts) > 0 diff --git a/heartbeat/monitors/wrappers/monitorstate/tracker.go b/heartbeat/monitors/wrappers/monitorstate/tracker.go index 03909d55aa83..e350294e46e8 100644 --- a/heartbeat/monitors/wrappers/monitorstate/tracker.go +++ b/heartbeat/monitors/wrappers/monitorstate/tracker.go @@ -104,7 +104,7 @@ func (t *Tracker) GetCurrentState(sf stdfields.StdMonitorFields) (state *State) time.Sleep(sleepFor) } if err != nil { - logp.L().Warn("could not load prior state from elasticsearch after %d attempts, will create new state for monitor: %s", tries, sf.ID) + logp.L().Warnf("could not load prior state from elasticsearch after %d attempts, will create new state for monitor: %s", tries, sf.ID) } if loadedState != nil { diff --git a/x-pack/heartbeat/monitors/browser/sourcejob.go b/x-pack/heartbeat/monitors/browser/sourcejob.go index c62c50b3bb17..697e51abf51a 100644 --- a/x-pack/heartbeat/monitors/browser/sourcejob.go +++ b/x-pack/heartbeat/monitors/browser/sourcejob.go @@ -125,7 +125,7 @@ func (sj *SourceJob) extraArgs(uiOrigin bool) []string { s, err := json.Marshal(sj.browserCfg.PlaywrightOpts) if err != nil { // This should never happen, if it was parsed as a config it should be serializable - logp.L().Warn("could not serialize playwright options '%v': %w", sj.browserCfg.PlaywrightOpts, err) + logp.L().Warnf("could not serialize playwright options '%v': %w", sj.browserCfg.PlaywrightOpts, err) } else { extraArgs = append(extraArgs, "--playwright-options", string(s)) } diff --git a/x-pack/heartbeat/monitors/browser/synthexec/synthexec.go b/x-pack/heartbeat/monitors/browser/synthexec/synthexec.go index fbfb71526cc5..32f127de98c3 100644 --- a/x-pack/heartbeat/monitors/browser/synthexec/synthexec.go +++ b/x-pack/heartbeat/monitors/browser/synthexec/synthexec.go @@ -219,7 +219,7 @@ func runCmd( break } if err != nil { - logp.L().Warn("error decoding json for test json results: %w", err) + logp.L().Warnf("error decoding json for test json results: %w", err) } mpx.writeSynthEvent(&se) diff --git a/x-pack/heartbeat/monitors/browser/synthexec/synthtypes.go b/x-pack/heartbeat/monitors/browser/synthexec/synthtypes.go index 8555ae448a71..ddd928b216d6 100644 --- a/x-pack/heartbeat/monitors/browser/synthexec/synthtypes.go +++ b/x-pack/heartbeat/monitors/browser/synthexec/synthtypes.go @@ -96,7 +96,7 @@ func (se SynthEvent) ToMap() (m mapstr.M) { u, e := url.Parse(se.URL) if e != nil { _, _ = m.Put("url", mapstr.M{"full": se.URL}) - logp.L().Warn("Could not parse synthetics URL '%s': %s", se.URL, e.Error()) + logp.L().Warnf("Could not parse synthetics URL '%s': %s", se.URL, e.Error()) } else { _, _ = m.Put("url", wraputil.URLFields(u)) } From d11eac9452a28598f2afaed47202289a2463922c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emilio=20Alvarez=20Pi=C3=B1eiro?= <95703246+emilioalvap@users.noreply.github.com> Date: Thu, 14 Dec 2023 14:35:56 +0100 Subject: [PATCH 003/129] [Heartbeat] Remove duplicate seccomp syscalls (#37440) Remove duplicated pwrte64 syscall is preventing the filter from being installed. --- CHANGELOG.next.asciidoc | 1 + heartbeat/security/policy_linux_arm64.go | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 66a2ad9c40c3..ae767239de47 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -87,6 +87,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Fix panics when parsing dereferencing invalid parsed url. {pull}34702[34702] - Added fix for formatting the logs from stateloader properly. {pull}37369[37369] +- Remove duplicated syscall from arm seccomp profile. {pull}37440[37440] *Metricbeat* diff --git a/heartbeat/security/policy_linux_arm64.go b/heartbeat/security/policy_linux_arm64.go index 45c8192f5e9a..e198819dbb94 100644 --- a/heartbeat/security/policy_linux_arm64.go +++ b/heartbeat/security/policy_linux_arm64.go @@ -115,7 +115,6 @@ func init() { "prlimit64", "pselect6", "pwrite64", - "pwrite64", "read", "readlinkat", "recvfrom", From 6759826fc71a09c0c1eb65570f71dfd6be4421be Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 15 Dec 2023 14:37:16 -0500 Subject: [PATCH 004/129] [updatecli] update elastic stack version for testing 8.13.0-6av99u5d (#37312) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update snapshot.yml Made with ❤️️ by updatecli * chore: Update snapshot.yml Made with ❤️️ by updatecli * chore: Update snapshot.yml Made with ❤️️ by updatecli * chore: Update snapshot.yml Made with ❤️️ by updatecli * chore: Update snapshot.yml Made with ❤️️ by updatecli * chore: Update snapshot.yml Made with ❤️️ by updatecli * chore: Update snapshot.yml Made with ❤️️ by updatecli * chore: Update snapshot.yml Made with ❤️️ by updatecli --------- Co-authored-by: apmmachine Co-authored-by: Pierre HILBERT Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- testing/environments/snapshot.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 3ec6770467f0..4c2c820e6b07 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.12.0-33e8d7e1-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-6av99u5d-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.12.0-33e8d7e1-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.13.0-6av99u5d-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.12.0-33e8d7e1-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.13.0-6av99u5d-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From a633696068edd5cf3a698eeaea0c9390909de2b2 Mon Sep 17 00:00:00 2001 From: Denis Date: Mon, 18 Dec 2023 14:18:41 +0000 Subject: [PATCH 005/129] Add filestream benchmarks for many files case, fix data race (#37345) Before it was only testing for a single file not the tests support many files. During test runs with the race detector a data race was found and fixed. --- filebeat/input/filestream/input.go | 27 +++-- filebeat/input/filestream/input_test.go | 146 ++++++++++++++++-------- 2 files changed, 111 insertions(+), 62 deletions(-) diff --git a/filebeat/input/filestream/input.go b/filebeat/input/filestream/input.go index b935161c1269..6cedd1bd9d16 100644 --- a/filebeat/input/filestream/input.go +++ b/filebeat/input/filestream/input.go @@ -59,7 +59,6 @@ type fileMeta struct { type filestream struct { readerConfig readerConfig encodingFactory encoding.EncodingFactory - encoding encoding.Encoding closerConfig closerConfig parsers parser.Config } @@ -175,7 +174,7 @@ func initState(log *logp.Logger, c loginp.Cursor, s fileSource) state { } func (inp *filestream) open(log *logp.Logger, canceler input.Canceler, fs fileSource, offset int64) (reader.Reader, error) { - f, err := inp.openFile(log, fs.newPath, offset) + f, encoding, err := inp.openFile(log, fs.newPath, offset) if err != nil { return nil, err } @@ -216,7 +215,7 @@ func (inp *filestream) open(log *logp.Logger, canceler input.Canceler, fs fileSo var r reader.Reader r, err = readfile.NewEncodeReader(dbgReader, readfile.Config{ - Codec: inp.encoding, + Codec: encoding, BufferSize: inp.readerConfig.BufferSize, Terminator: inp.readerConfig.LineTerminator, MaxBytes: encReaderMaxBytes, @@ -241,33 +240,33 @@ func (inp *filestream) open(log *logp.Logger, canceler input.Canceler, fs fileSo // or the file cannot be opened because for example of failing read permissions, an error // is returned and the harvester is closed. The file will be picked up again the next time // the file system is scanned -func (inp *filestream) openFile(log *logp.Logger, path string, offset int64) (*os.File, error) { +func (inp *filestream) openFile(log *logp.Logger, path string, offset int64) (*os.File, encoding.Encoding, error) { fi, err := os.Stat(path) if err != nil { - return nil, fmt.Errorf("failed to stat source file %s: %w", path, err) + return nil, nil, fmt.Errorf("failed to stat source file %s: %w", path, err) } // it must be checked if the file is not a named pipe before we try to open it // if it is a named pipe os.OpenFile fails, so there is no need to try opening it. if fi.Mode()&os.ModeNamedPipe != 0 { - return nil, fmt.Errorf("failed to open file %s, named pipes are not supported", fi.Name()) + return nil, nil, fmt.Errorf("failed to open file %s, named pipes are not supported", fi.Name()) } ok := false f, err := file.ReadOpen(path) if err != nil { - return nil, fmt.Errorf("failed opening %s: %w", path, err) + return nil, nil, fmt.Errorf("failed opening %s: %w", path, err) } defer cleanup.IfNot(&ok, cleanup.IgnoreError(f.Close)) fi, err = f.Stat() if err != nil { - return nil, fmt.Errorf("failed to stat source file %s: %w", path, err) + return nil, nil, fmt.Errorf("failed to stat source file %s: %w", path, err) } err = checkFileBeforeOpening(fi) if err != nil { - return nil, err + return nil, nil, err } if fi.Size() < offset { @@ -276,20 +275,20 @@ func (inp *filestream) openFile(log *logp.Logger, path string, offset int64) (*o } err = inp.initFileOffset(f, offset) if err != nil { - return nil, err + return nil, nil, err } - inp.encoding, err = inp.encodingFactory(f) + encoding, err := inp.encodingFactory(f) if err != nil { f.Close() if errors.Is(err, transform.ErrShortSrc) { - return nil, fmt.Errorf("initialising encoding for '%v' failed due to file being too short", f) + return nil, nil, fmt.Errorf("initialising encoding for '%v' failed due to file being too short", f) } - return nil, fmt.Errorf("initialising encoding for '%v' failed: %w", f, err) + return nil, nil, fmt.Errorf("initialising encoding for '%v' failed: %w", f, err) } ok = true - return f, nil + return f, encoding, nil } func checkFileBeforeOpening(fi os.FileInfo) error { diff --git a/filebeat/input/filestream/input_test.go b/filebeat/input/filestream/input_test.go index 55b8d2e7fc66..a1d9729c5aad 100644 --- a/filebeat/input/filestream/input_test.go +++ b/filebeat/input/filestream/input_test.go @@ -21,10 +21,11 @@ import ( "context" "fmt" "os" + "path/filepath" + "sync/atomic" "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" loginp "github.com/elastic/beats/v7/filebeat/input/filestream/internal/input-logfile" @@ -38,25 +39,26 @@ import ( func BenchmarkFilestream(b *testing.B) { logp.TestingSetup(logp.ToDiscardOutput()) - lineCount := 10000 - filename := generateFile(b, lineCount) - b.ResetTimer() + b.Run("single file", func(b *testing.B) { + lineCount := 10000 + filename := generateFile(b, b.TempDir(), lineCount) + b.ResetTimer() - b.Run("filestream default throughput", func(b *testing.B) { - cfg := ` + b.Run("inode throughput", func(b *testing.B) { + cfg := ` type: filestream prospector.scanner.check_interval: 1s paths: - ` + filename + ` ` - for i := 0; i < b.N; i++ { - runFilestreamBenchmark(b, fmt.Sprintf("default-benchmark-%d", i), cfg, lineCount) - } - }) + for i := 0; i < b.N; i++ { + runFilestreamBenchmark(b, fmt.Sprintf("one-file-inode-benchmark-%d", i), cfg, lineCount) + } + }) - b.Run("filestream fingerprint throughput", func(b *testing.B) { - cfg := ` + b.Run("fingerprint throughput", func(b *testing.B) { + cfg := ` type: filestream prospector.scanner: fingerprint.enabled: true @@ -65,9 +67,51 @@ file_identity.fingerprint: ~ paths: - ` + filename + ` ` - for i := 0; i < b.N; i++ { - runFilestreamBenchmark(b, fmt.Sprintf("fp-benchmark-%d", i), cfg, lineCount) + for i := 0; i < b.N; i++ { + runFilestreamBenchmark(b, fmt.Sprintf("one-file-fp-benchmark-%d", i), cfg, lineCount) + } + }) + }) + + b.Run("many files", func(b *testing.B) { + lineCount := 1000 + fileCount := 100 + dir := b.TempDir() + + for i := 0; i < fileCount; i++ { + _ = generateFile(b, dir, lineCount) } + + ingestPath := filepath.Join(dir, "*") + expEvents := lineCount * fileCount + b.ResetTimer() + + b.Run("inode throughput", func(b *testing.B) { + cfg := ` +type: filestream +prospector.scanner.check_interval: 1s +paths: + - ` + ingestPath + ` +` + for i := 0; i < b.N; i++ { + runFilestreamBenchmark(b, fmt.Sprintf("many-files-inode-benchmark-%d", i), cfg, expEvents) + } + }) + + b.Run("fingerprint throughput", func(b *testing.B) { + cfg := ` +type: filestream +prospector.scanner: + fingerprint.enabled: true + check_interval: 1s +file_identity.fingerprint: ~ +paths: + - ` + ingestPath + ` +` + for i := 0; i < b.N; i++ { + runFilestreamBenchmark(b, fmt.Sprintf("many-files-fp-benchmark-%d", i), cfg, expEvents) + } + }) }) } @@ -76,13 +120,13 @@ paths: // `cfg` must be a valid YAML string containing valid filestream configuration // `expEventCount` is an expected amount of produced events func runFilestreamBenchmark(b *testing.B, testID string, cfg string, expEventCount int) { + b.Helper() // we don't include initialization in the benchmark time b.StopTimer() - runner := createFilestreamTestRunner(b, testID, cfg, expEventCount) + runner := createFilestreamTestRunner(context.Background(), b, testID, cfg, int64(expEventCount), false) // this is where the benchmark actually starts b.StartTimer() - events := runner(b) - require.Len(b, events, expEventCount) + _ = runner(b) } // createFilestreamTestRunner can be used for both benchmarks and regular tests to run a filestream input @@ -90,9 +134,11 @@ func runFilestreamBenchmark(b *testing.B, testID string, cfg string, expEventCou // `testID` must be unique for each test run // `cfg` must be a valid YAML string containing valid filestream configuration // `eventLimit` is an amount of produced events after which the filestream will shutdown +// `collectEvents` if `true` the runner will return a list of all events produced by the filestream input. +// Events should not be collected in benchmarks due to high extra costs of using the channel. // // returns a runner function that returns produced events. -func createFilestreamTestRunner(b testing.TB, testID string, cfg string, eventLimit int) func(t testing.TB) []beat.Event { +func createFilestreamTestRunner(ctx context.Context, b testing.TB, testID string, cfg string, eventLimit int64, collectEvents bool) func(t testing.TB) []beat.Event { logger := logp.L() c, err := conf.NewConfigWithYAML([]byte(cfg), cfg) require.NoError(b, err) @@ -101,41 +147,43 @@ func createFilestreamTestRunner(b testing.TB, testID string, cfg string, eventLi input, err := p.Manager.Create(c) require.NoError(b, err) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ctx) context := v2.Context{ Logger: logger, ID: testID, Cancelation: ctx, } - events := make([]beat.Event, 0, eventLimit) - connector, eventsDone := newTestPipeline(eventLimit, &events) - done := make(chan struct{}) + connector, events := newTestPipeline(eventLimit, collectEvents) + var out []beat.Event + if collectEvents { + out = make([]beat.Event, 0, eventLimit) + } + go func() { + // even if `collectEvents` is false we need to range the channel + // and wait until it's closed indicating that the input finished its job + for event := range events { + out = append(out, event) + } + cancel() + }() return func(t testing.TB) []beat.Event { - go func() { - err := input.Run(context, connector) - assert.NoError(b, err) - close(done) - }() + err := input.Run(context, connector) + require.NoError(b, err) - <-eventsDone - cancel() - <-done // for more stable results we should wait until the full shutdown - return events + return out } } -func generateFile(t testing.TB, lineCount int) string { +func generateFile(t testing.TB, dir string, lineCount int) string { t.Helper() - dir := t.TempDir() - file, err := os.CreateTemp(dir, "lines.log") + file, err := os.CreateTemp(dir, "*") require.NoError(t, err) - + filename := file.Name() for i := 0; i < lineCount; i++ { - fmt.Fprintf(file, "rather mediocre log line message - %d\n", i) + fmt.Fprintf(file, "rather mediocre log line message in %s - %d\n", filename, i) } - filename := file.Name() err = file.Close() require.NoError(t, err) return filename @@ -161,15 +209,15 @@ func (s *testStore) CleanupInterval() time.Duration { return time.Second } -func newTestPipeline(eventLimit int, out *[]beat.Event) (pc beat.PipelineConnector, done <-chan struct{}) { - ch := make(chan struct{}) - return &testPipeline{limit: eventLimit, done: ch, out: out}, ch +func newTestPipeline(eventLimit int64, collectEvents bool) (pc beat.PipelineConnector, out <-chan beat.Event) { + ch := make(chan beat.Event, eventLimit) + return &testPipeline{limit: eventLimit, out: ch, collect: collectEvents}, ch } type testPipeline struct { - done chan struct{} - limit int - out *[]beat.Event + limit int64 + out chan beat.Event + collect bool } func (p *testPipeline) ConnectWith(beat.ClientConfig) (beat.Client, error) { @@ -184,13 +232,15 @@ type testClient struct { } func (c *testClient) Publish(event beat.Event) { - c.testPipeline.limit-- - if c.testPipeline.limit < 0 { + newLimit := atomic.AddInt64(&c.testPipeline.limit, -1) + if newLimit < 0 { return } - *c.testPipeline.out = append(*c.testPipeline.out, event) - if c.testPipeline.limit == 0 { - close(c.testPipeline.done) + if c.testPipeline.collect { + c.testPipeline.out <- event + } + if newLimit == 0 { + close(c.testPipeline.out) } } From 46174c64e784554de04cb524f37b4bc6bb3f115a Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 18 Dec 2023 13:32:57 -0500 Subject: [PATCH 006/129] chore: Update snapshot.yml (#37454) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Made with ❤️️ by updatecli Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 4c2c820e6b07..03612a9033b3 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-6av99u5d-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-h30gube9-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.13.0-6av99u5d-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.13.0-h30gube9-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.13.0-6av99u5d-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.13.0-h30gube9-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 5fab3e9b1a93305cc84f73504d31df0c796d7047 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Mon, 18 Dec 2023 17:13:32 -0500 Subject: [PATCH 007/129] Update to elastic-agent-client with chunking support. (#37343) * Update to elastic-agent-client with chunking support. * Add changelog entry. * Fix changelog again. * Update to v7.6.0 elastic-agent-client. * Fix tests. * Fix another test. * Fix windows lint. --- CHANGELOG.next.asciidoc | 1 + NOTICE.txt | 77 ++++++++++++++----- go.mod | 22 +++--- go.sum | 42 +++++----- x-pack/libbeat/management/managerV2.go | 6 +- x-pack/libbeat/management/managerV2_test.go | 6 +- .../management/simple_input_config_test.go | 2 +- .../libbeat/management/tests/mock_server.go | 2 +- 8 files changed, 99 insertions(+), 59 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index ae767239de47..172fbf334d2a 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -53,6 +53,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Upgraded apache arrow library used in x-pack/libbeat/reader/parquet from v11 to v12.0.1 in order to fix cross-compilation issues {pull}35640[35640] - Fix panic when MaxRetryInterval is specified, but RetryInterval is not {pull}35820[35820] - Support build of projects outside of beats directory {pull}36126[36126] +- Support Elastic Agent control protocol chunking support {pull}37343[37343] *Auditbeat* diff --git a/NOTICE.txt b/NOTICE.txt index 0070ee8ff110..cfc6e641dd8a 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -12479,11 +12479,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-a -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-client/v7 -Version: v7.5.0 +Version: v7.6.0 Licence type (autodetected): Elastic -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-client/v7@v7.5.0/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-client/v7@v7.6.0/LICENSE.txt: ELASTIC LICENSE AGREEMENT @@ -24701,11 +24701,11 @@ THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : golang.org/x/crypto -Version: v0.14.0 +Version: v0.16.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.14.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.16.0/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -24775,11 +24775,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/mod -Version: v0.10.0 +Version: v0.14.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/mod@v0.10.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/mod@v0.14.0/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -24812,11 +24812,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/net -Version: v0.17.0 +Version: v0.19.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/net@v0.17.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/net@v0.19.0/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -24886,11 +24886,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/sync -Version: v0.3.0 +Version: v0.5.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/sync@v0.3.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/sync@v0.5.0/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -24923,11 +24923,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/sys -Version: v0.13.0 +Version: v0.15.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.13.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.15.0/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -24960,11 +24960,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/text -Version: v0.13.0 +Version: v0.14.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/text@v0.13.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/text@v0.14.0/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -25034,11 +25034,48 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/tools -Version: v0.9.1 +Version: v0.16.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/tools@v0.16.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/tools/go/vcs +Version: v0.1.0-deprecated Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/tools@v0.9.1/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/tools/go/vcs@v0.1.0-deprecated/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -51250,11 +51287,11 @@ THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : golang.org/x/exp -Version: v0.0.0-20220921023135-46d9e7742f1e +Version: v0.0.0-20231127185646-65229373498e Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/exp@v0.0.0-20220921023135-46d9e7742f1e/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/exp@v0.0.0-20231127185646-65229373498e/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -51287,11 +51324,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/term -Version: v0.13.0 +Version: v0.15.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/term@v0.13.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/term@v0.15.0/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. diff --git a/go.mod b/go.mod index 77aa9c7861ce..bb0ec7a35489 100644 --- a/go.mod +++ b/go.mod @@ -69,7 +69,7 @@ require ( github.com/dustin/go-humanize v1.0.1 github.com/eapache/go-resiliency v1.2.0 github.com/eclipse/paho.mqtt.golang v1.3.5 - github.com/elastic/elastic-agent-client/v7 v7.5.0 + github.com/elastic/elastic-agent-client/v7 v7.6.0 github.com/elastic/go-concert v0.2.0 github.com/elastic/go-libaudit/v2 v2.4.0 github.com/elastic/go-licenser v0.4.1 @@ -152,16 +152,16 @@ require ( go.uber.org/atomic v1.11.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.26.0 - golang.org/x/crypto v0.14.0 + golang.org/x/crypto v0.16.0 golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 - golang.org/x/mod v0.10.0 - golang.org/x/net v0.17.0 + golang.org/x/mod v0.14.0 + golang.org/x/net v0.19.0 golang.org/x/oauth2 v0.10.0 - golang.org/x/sync v0.3.0 - golang.org/x/sys v0.13.0 - golang.org/x/text v0.13.0 + golang.org/x/sync v0.5.0 + golang.org/x/sys v0.15.0 + golang.org/x/text v0.14.0 golang.org/x/time v0.3.0 - golang.org/x/tools v0.9.1 + golang.org/x/tools v0.16.0 google.golang.org/api v0.126.0 google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 // indirect google.golang.org/grpc v1.58.3 @@ -224,6 +224,7 @@ require ( go.elastic.co/apm/module/apmhttp/v2 v2.4.7 go.elastic.co/apm/v2 v2.4.7 go.mongodb.org/mongo-driver v1.5.1 + golang.org/x/tools/go/vcs v0.1.0-deprecated google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) @@ -366,8 +367,8 @@ require ( go.opentelemetry.io/otel v1.19.0 // indirect go.opentelemetry.io/otel/metric v1.19.0 // indirect go.opentelemetry.io/otel/trace v1.19.0 // indirect - golang.org/x/exp v0.0.0-20220921023135-46d9e7742f1e // indirect - golang.org/x/term v0.13.0 // indirect + golang.org/x/exp v0.0.0-20231127185646-65229373498e // indirect + golang.org/x/term v0.15.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect @@ -404,6 +405,7 @@ replace ( github.com/docker/go-plugins-helpers => github.com/elastic/go-plugins-helpers v0.0.0-20200207104224-bdf17607b79f github.com/dop251/goja => github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20 github.com/dop251/goja_nodejs => github.com/dop251/goja_nodejs v0.0.0-20171011081505-adff31b136e6 + github.com/fsnotify/fsevents => github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270 github.com/fsnotify/fsnotify => github.com/adriansr/fsnotify v1.4.8-0.20211018144411-a81f2b630e7c github.com/godror/godror => github.com/godror/godror v0.33.2 // updating to v0.24.2 caused a breaking change diff --git a/go.sum b/go.sum index e836a3d92670..5b4d63c39652 100644 --- a/go.sum +++ b/go.sum @@ -654,8 +654,8 @@ github.com/elastic/dhcp v0.0.0-20200227161230-57ec251c7eb3 h1:lnDkqiRFKm0rxdljqr github.com/elastic/dhcp v0.0.0-20200227161230-57ec251c7eb3/go.mod h1:aPqzac6AYkipvp4hufTyMj5PDIphF3+At8zr7r51xjY= github.com/elastic/elastic-agent-autodiscover v0.6.5 h1:5DeMpuNc8c/tN6HN0A4A2uOFTNFHSg7xrKApzfhvF1U= github.com/elastic/elastic-agent-autodiscover v0.6.5/go.mod h1:chulyCAyZb/njMHgzkhC/yWnt8v/Y6eCRUhmFVnsA5o= -github.com/elastic/elastic-agent-client/v7 v7.5.0 h1:niI3WQ+01Lnp2r5LxK8SyNhrPJe13vBiOkqrDRK2oTA= -github.com/elastic/elastic-agent-client/v7 v7.5.0/go.mod h1:DYoX95xjC4BW/p2avyu724Qr2+hoUIz9eCU9CVS1d+0= +github.com/elastic/elastic-agent-client/v7 v7.6.0 h1:FEn6FjzynW4TIQo5G096Tr7xYK/P5LY9cSS6wRbXZTc= +github.com/elastic/elastic-agent-client/v7 v7.6.0/go.mod h1:GlUKrbVd/O1CRAZonpBeN3J0RlVqP6VGcrBjFWca+aM= github.com/elastic/elastic-agent-libs v0.7.2 h1:yT0hF0UAxJCdQqhHh6SFpgYrcpB10oFzPj8IaytPS2o= github.com/elastic/elastic-agent-libs v0.7.2/go.mod h1:pVBEElQJUO9mr4WStWNXuQGsJn54lcjAoYAHmsvBLBc= github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3 h1:sb+25XJn/JcC9/VL8HX4r4QXSUq4uTNzGS2kxOE7u1U= @@ -2030,8 +2030,8 @@ golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= +golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2047,8 +2047,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= -golang.org/x/exp v0.0.0-20220921023135-46d9e7742f1e h1:Ctm9yurWsg7aWwIpH9Bnap/IdSVxixymIb3MhiMEQQA= -golang.org/x/exp v0.0.0-20220921023135-46d9e7742f1e/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/exp v0.0.0-20231127185646-65229373498e h1:Gvh4YaCaXNs6dKTlfgismwWZKyjVZXwOPfIyUaqU3No= +golang.org/x/exp v0.0.0-20231127185646-65229373498e/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -2089,8 +2089,8 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2173,8 +2173,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2207,8 +2207,8 @@ golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2344,8 +2344,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -2354,8 +2354,8 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2370,8 +2370,8 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2475,8 +2475,10 @@ golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= -golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= +golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools/go/vcs v0.1.0-deprecated h1:cOIJqWBl99H1dH5LWizPa+0ImeeJq3t3cJjaeOWUAL4= +golang.org/x/tools/go/vcs v0.1.0-deprecated/go.mod h1:zUrvATBAvEI9535oC0yWYsLsHIV4Z7g63sNPVMtuBy8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/x-pack/libbeat/management/managerV2.go b/x-pack/libbeat/management/managerV2.go index fd38051efb3a..235325c0cbfc 100644 --- a/x-pack/libbeat/management/managerV2.go +++ b/x-pack/libbeat/management/managerV2.go @@ -35,8 +35,6 @@ import ( "github.com/elastic/beats/v7/libbeat/version" ) -var errStoppingOnOutputChange = errors.New("stopping Beat on output change") - // diagnosticHandler is a wrapper type that's a bit of a hack, the compiler won't let us send the raw unit struct, // since there's a type disagreement with the `client.DiagnosticHook` argument, and due to licensing issues we can't import the agent client types into the reloader type diagnosticHandler struct { @@ -173,7 +171,7 @@ func NewV2AgentManager(config *conf.C, registry *reload.Registry) (lbmanagement. client.VersionInfo{ Name: "beat-v2-client-for-testing", Version: version.GetDefaultVersion(), - }, grpc.WithTransportCredentials(insecure.NewCredentials())) + }, client.WithGRPCDialOptions(grpc.WithTransportCredentials(insecure.NewCredentials()))) } else { // Normal Elastic-Agent-Client initialisation agentClient, _, err = client.NewV2FromReader(os.Stdin, client.VersionInfo{ @@ -557,7 +555,7 @@ func (cm *BeatV2Manager) reload(units map[unitKey]*client.Unit) { for _, unit := range units { errs := unitErrors[unit.ID()] if len(errs) != 0 { - unit.UpdateState(client.UnitStateFailed, errors.Join(errs...).Error(), nil) + _ = unit.UpdateState(client.UnitStateFailed, errors.Join(errs...).Error(), nil) } } }() diff --git a/x-pack/libbeat/management/managerV2_test.go b/x-pack/libbeat/management/managerV2_test.go index 9fe238605b49..ea67fdd89f40 100644 --- a/x-pack/libbeat/management/managerV2_test.go +++ b/x-pack/libbeat/management/managerV2_test.go @@ -209,7 +209,7 @@ func TestManagerV2(t *testing.T) { Meta: map[string]string{ "key": "value", }, - }, grpc.WithTransportCredentials(insecure.NewCredentials())) + }, client.WithGRPCDialOptions(grpc.WithTransportCredentials(insecure.NewCredentials()))) m, err := NewV2AgentManagerWithClient(&Config{ Enabled: true, @@ -321,7 +321,7 @@ func TestOutputError(t *testing.T) { fmt.Sprintf(":%d", server.Port), "", client.VersionInfo{}, - grpc.WithTransportCredentials(insecure.NewCredentials())) + client.WithGRPCDialOptions(grpc.WithTransportCredentials(insecure.NewCredentials()))) m, err := NewV2AgentManagerWithClient( &Config{ @@ -488,7 +488,7 @@ func TestErrorPerUnit(t *testing.T) { fmt.Sprintf(":%d", server.Port), "", client.VersionInfo{}, - grpc.WithTransportCredentials(insecure.NewCredentials())) + client.WithGRPCDialOptions(grpc.WithTransportCredentials(insecure.NewCredentials()))) m, err := NewV2AgentManagerWithClient( &Config{ diff --git a/x-pack/libbeat/management/simple_input_config_test.go b/x-pack/libbeat/management/simple_input_config_test.go index ef88781a8622..7822d347112b 100644 --- a/x-pack/libbeat/management/simple_input_config_test.go +++ b/x-pack/libbeat/management/simple_input_config_test.go @@ -133,7 +133,7 @@ func TestSimpleInputConfig(t *testing.T) { fmt.Sprintf(":%d", server.Port), "", client.VersionInfo{}, - grpc.WithTransportCredentials(insecure.NewCredentials())) + client.WithGRPCDialOptions(grpc.WithTransportCredentials(insecure.NewCredentials()))) m, err := NewV2AgentManagerWithClient( &Config{ diff --git a/x-pack/libbeat/management/tests/mock_server.go b/x-pack/libbeat/management/tests/mock_server.go index 84805c1f5cc8..8671b1242339 100644 --- a/x-pack/libbeat/management/tests/mock_server.go +++ b/x-pack/libbeat/management/tests/mock_server.go @@ -103,7 +103,7 @@ func NewMockServer(t *testing.T, canStop func(string) bool, inputConfig *proto.U Meta: map[string]string{ "key": "value", }, - }, grpc.WithTransportCredentials(insecure.NewCredentials())) + }, client.WithGRPCDialOptions(grpc.WithTransportCredentials(insecure.NewCredentials()))) return MockV2Handler{Srv: srv, Client: client} } From 8fe2f53bf1613bf6c0566a7e8e7afda785c51770 Mon Sep 17 00:00:00 2001 From: Anderson Queiroz Date: Tue, 19 Dec 2023 11:12:37 +0100 Subject: [PATCH 008/129] update codeowners (#37336) --- .github/CODEOWNERS | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index ee4de95884df..d8f36cdde15c 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -29,19 +29,18 @@ CHANGELOG* /filebeat/docs/ # Listed without an owner to avoid maintaining doc ownership for each input and module. /filebeat/input/syslog/ @elastic/security-external-integrations /filebeat/input/winlog/ @elastic/security-external-integrations -/filebeat/module/ @elastic/integrations /filebeat/module/apache @elastic/obs-infraobs-integrations /filebeat/module/auditd @elastic/security-external-integrations /filebeat/module/elasticsearch/ @elastic/infra-monitoring-ui /filebeat/module/haproxy @elastic/obs-infraobs-integrations -/filebeat/module/icinga @elastic/integrations +/filebeat/module/icinga # TODO: find right team /filebeat/module/iis @elastic/obs-infraobs-integrations /filebeat/module/kafka @elastic/obs-infraobs-integrations /filebeat/module/kibana/ @elastic/infra-monitoring-ui /filebeat/module/logstash/ @elastic/infra-monitoring-ui /filebeat/module/mongodb @elastic/obs-infraobs-integrations /filebeat/module/mysql @elastic/security-external-integrations -/filebeat/module/nats @elastic/integrations +/filebeat/module/nats @elastic/obs-infraobs-integrations /filebeat/module/nginx @elastic/obs-infraobs-integrations /filebeat/module/osquery @elastic/security-external-integrations /filebeat/module/pensando @elastic/security-external-integrations @@ -49,8 +48,8 @@ CHANGELOG* /filebeat/module/redis @elastic/obs-infraobs-integrations /filebeat/module/santa @elastic/security-external-integrations /filebeat/module/system @elastic/elastic-agent-data-plane -/filebeat/module/traefik @elastic/integrations -/heartbeat/ @elastic/hosted-services +/filebeat/module/traefik # TODO: find right team +/heartbeat/ @elastic/obs-ds-hosted-services /journalbeat @elastic/elastic-agent-data-plane /libbeat/ @elastic/elastic-agent-data-plane /libbeat/docs/processors-list.asciidoc @elastic/ingest-docs @@ -68,7 +67,6 @@ CHANGELOG* /metricbeat/ @elastic/elastic-agent-data-plane /metricbeat/docs/ # Listed without an owner to avoid maintaining doc ownership for each input and module. /metricbeat/helper/kubernetes @elastic/obs-cloudnative-monitoring -/metricbeat/module/ @elastic/integrations /metricbeat/module/apache @elastic/obs-infraobs-integrations /metricbeat/module/beat/ @elastic/infra-monitoring-ui /metricbeat/module/ceph @elastic/obs-infraobs-integrations @@ -122,7 +120,6 @@ CHANGELOG* /x-pack/filebeat/input/lumberjack/ @elastic/security-external-integrations /x-pack/filebeat/input/netflow/ @elastic/security-external-integrations /x-pack/filebeat/input/o365audit/ @elastic/security-external-integrations -/x-pack/filebeat/module/ @elastic/integrations /x-pack/filebeat/module/activemq @elastic/obs-infraobs-integrations /x-pack/filebeat/module/aws @elastic/obs-cloud-monitoring /x-pack/filebeat/module/awsfargate @elastic/obs-cloud-monitoring @@ -174,10 +171,9 @@ CHANGELOG* /x-pack/filebeat/module/zscaler @elastic/security-external-integrations /x-pack/filebeat/modules.d/zoom.yml.disabled @elastic/security-external-integrations /x-pack/filebeat/processors/decode_cef/ @elastic/security-external-integrations -/x-pack/heartbeat/ @elastic/hosted-services +/x-pack/heartbeat/ @elastic/obs-ds-hosted-services /x-pack/metricbeat/ @elastic/elastic-agent-data-plane /x-pack/metricbeat/docs/ # Listed without an owner to avoid maintaining doc ownership for each input and module. -/x-pack/metricbeat/module/ @elastic/integrations /x-pack/metricbeat/module/activemq @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/airflow @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/cloudfoundry @elastic/obs-infraobs-integrations From 50aac8069c525c59781f347f66a25ee7534ade34 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 19 Dec 2023 12:35:07 -0500 Subject: [PATCH 009/129] chore: Update snapshot.yml (#37463) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Made with ❤️️ by updatecli Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 03612a9033b3..0e66db12a2a5 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-h30gube9-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-ubdkmnyz-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.13.0-h30gube9-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.13.0-ubdkmnyz-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.13.0-h30gube9-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.13.0-ubdkmnyz-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From b59a8f4769c20c1d076bf7ff8c9b9b175f0f5969 Mon Sep 17 00:00:00 2001 From: Anderson Queiroz Date: Wed, 20 Dec 2023 11:36:50 +0100 Subject: [PATCH 010/129] Replace EOL modules: github.com/golang/protobuf by google.golang.org/protobuf (#37212) --- NOTICE.txt | 76 +++++++++---------- go.mod | 2 +- .../provider/aws/aws/kinesis_test.go | 7 +- .../aws/aws/transformer/transformer_test.go | 7 +- .../gcp/metrics/cloudsql/metadata_test.go | 10 +-- .../gcp/metrics/compute/metadata_test.go | 10 +-- .../module/gcp/metrics/metrics_requester.go | 11 ++- .../gcp/metrics/metrics_requester_test.go | 12 +-- .../module/gcp/metrics/metricset.go | 6 +- .../module/gcp/metrics/redis/metadata_test.go | 10 +-- 10 files changed, 76 insertions(+), 75 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index cfc6e641dd8a..cca066349fa3 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -17323,44 +17323,6 @@ Contents of probable licence file $GOMODCACHE/github.com/golang/mock@v1.6.0/LICE limitations under the License. --------------------------------------------------------------------------------- -Dependency : github.com/golang/protobuf -Version: v1.5.3 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/golang/protobuf@v1.5.3/LICENSE: - -Copyright 2010 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - -------------------------------------------------------------------------------- Dependency : github.com/golang/snappy Version: v0.0.4 @@ -37758,6 +37720,44 @@ third-party archives. limitations under the License. +-------------------------------------------------------------------------------- +Dependency : github.com/golang/protobuf +Version: v1.5.3 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/golang/protobuf@v1.5.3/LICENSE: + +Copyright 2010 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + -------------------------------------------------------------------------------- Dependency : github.com/google/gofuzz Version: v1.2.0 diff --git a/go.mod b/go.mod index bb0ec7a35489..6732fbc60061 100644 --- a/go.mod +++ b/go.mod @@ -95,7 +95,6 @@ require ( github.com/gofrs/uuid v4.4.0+incompatible github.com/gogo/protobuf v1.3.2 github.com/golang/mock v1.6.0 - github.com/golang/protobuf v1.5.3 github.com/golang/snappy v0.0.4 github.com/gomodule/redigo v1.8.3 github.com/google/flatbuffers v23.3.3+incompatible @@ -291,6 +290,7 @@ require ( github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect github.com/golang-sql/sqlexp v0.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/google/licenseclassifier v0.0.0-20221004142553-c1ed8fcf4bab // indirect github.com/google/s2a-go v0.1.4 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect diff --git a/x-pack/functionbeat/provider/aws/aws/kinesis_test.go b/x-pack/functionbeat/provider/aws/aws/kinesis_test.go index 242ed63d4a86..20d92ce36060 100644 --- a/x-pack/functionbeat/provider/aws/aws/kinesis_test.go +++ b/x-pack/functionbeat/provider/aws/aws/kinesis_test.go @@ -14,8 +14,9 @@ import ( "github.com/aws/aws-lambda-go/events" "github.com/awslabs/kinesis-aggregation/go/v2/deaggregator" aggRecProto "github.com/awslabs/kinesis-aggregation/go/v2/records" - "github.com/golang/protobuf/proto" //nolint:staticcheck // SA1019 dependency uses deprecated package "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/runtime/protoimpl" "github.com/elastic/beats/v7/x-pack/functionbeat/function/provider" conf "github.com/elastic/elastic-agent-libs/config" @@ -133,14 +134,14 @@ func generateAggregatedKinesisEvent(validRec bool) events.KinesisEvent { partKeyTable = append(partKeyTable, "0") aggRec.PartitionKeyTable = partKeyTable - data, _ := proto.Marshal(aggRec) + data, _ := proto.Marshal(protoimpl.X.ProtoMessageV2Of(aggRec)) md5Hash := md5.Sum(data) aggRecBytes = append(aggRecBytes, data...) aggRecBytes = append(aggRecBytes, md5Hash[:]...) return events.KinesisEvent{ Records: []events.KinesisEventRecord{ - events.KinesisEventRecord{ + { AwsRegion: "east-1", EventID: "1234", EventName: "connect", diff --git a/x-pack/functionbeat/provider/aws/aws/transformer/transformer_test.go b/x-pack/functionbeat/provider/aws/aws/transformer/transformer_test.go index e29bba6cde32..ae0dcb12eee4 100644 --- a/x-pack/functionbeat/provider/aws/aws/transformer/transformer_test.go +++ b/x-pack/functionbeat/provider/aws/aws/transformer/transformer_test.go @@ -14,12 +14,13 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/service/kinesis/types" + "google.golang.org/protobuf/runtime/protoimpl" "github.com/aws/aws-lambda-go/events" "github.com/awslabs/kinesis-aggregation/go/v2/deaggregator" aggRecProto "github.com/awslabs/kinesis-aggregation/go/v2/records" - "github.com/golang/protobuf/proto" //nolint:staticcheck // SA1019 dependency uses deprecated package "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/proto" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/elastic-agent-libs/mapstr" @@ -33,7 +34,7 @@ func TestCloudwatch(t *testing.T) { SubscriptionFilters: []string{"MyFilter"}, MessageType: "DATA_MESSAGE", LogEvents: []events.CloudwatchLogsLogEvent{ - events.CloudwatchLogsLogEvent{ + { ID: "1234567890123456789", Timestamp: 1566908691193, Message: "my interesting message", @@ -393,7 +394,7 @@ func generateKinesisAggregateRecord(numRecords int, valid bool) []byte { } aggRec.PartitionKeyTable = partKeyTable - data, _ := proto.Marshal(aggRec) + data, _ := proto.Marshal(protoimpl.X.ProtoMessageV2Of(aggRec)) md5Hash := md5.Sum(data) aggRecBytes = append(aggRecBytes, data...) aggRecBytes = append(aggRecBytes, md5Hash[:]...) diff --git a/x-pack/metricbeat/module/gcp/metrics/cloudsql/metadata_test.go b/x-pack/metricbeat/module/gcp/metrics/cloudsql/metadata_test.go index cb678607c242..b6cd6e228f85 100644 --- a/x-pack/metricbeat/module/gcp/metrics/cloudsql/metadata_test.go +++ b/x-pack/metricbeat/module/gcp/metrics/cloudsql/metadata_test.go @@ -8,9 +8,9 @@ import ( "testing" monitoring "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" - "github.com/golang/protobuf/ptypes/timestamp" "google.golang.org/genproto/googleapis/api/metric" "google.golang.org/genproto/googleapis/api/monitoredres" + "google.golang.org/protobuf/types/known/timestamppb" "gotest.tools/assert" "github.com/elastic/elastic-agent-libs/mapstr" @@ -43,10 +43,10 @@ var fake = &monitoring.TimeSeries{ Value: &monitoring.TypedValue_DoubleValue{DoubleValue: 0.0041224284852319215}, }, Interval: &monitoring.TimeInterval{ - StartTime: ×tamp.Timestamp{ + StartTime: ×tamppb.Timestamp{ Seconds: 1569932700, }, - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: 1569932700, }, }, @@ -55,10 +55,10 @@ var fake = &monitoring.TimeSeries{ Value: &monitoring.TypedValue_DoubleValue{DoubleValue: 0.004205757571772513}, }, Interval: &monitoring.TimeInterval{ - StartTime: ×tamp.Timestamp{ + StartTime: ×tamppb.Timestamp{ Seconds: 1569932640, }, - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: 1569932640, }, }, diff --git a/x-pack/metricbeat/module/gcp/metrics/compute/metadata_test.go b/x-pack/metricbeat/module/gcp/metrics/compute/metadata_test.go index 8cfae6675928..17213b8e1b89 100644 --- a/x-pack/metricbeat/module/gcp/metrics/compute/metadata_test.go +++ b/x-pack/metricbeat/module/gcp/metrics/compute/metadata_test.go @@ -8,10 +8,10 @@ import ( "testing" monitoring "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" - "github.com/golang/protobuf/ptypes/timestamp" "github.com/stretchr/testify/assert" "google.golang.org/genproto/googleapis/api/metric" "google.golang.org/genproto/googleapis/api/monitoredres" + "google.golang.org/protobuf/types/known/timestamppb" ) var fake = &monitoring.TimeSeries{ @@ -41,10 +41,10 @@ var fake = &monitoring.TimeSeries{ Value: &monitoring.TypedValue_DoubleValue{DoubleValue: 0.0041224284852319215}, }, Interval: &monitoring.TimeInterval{ - StartTime: ×tamp.Timestamp{ + StartTime: ×tamppb.Timestamp{ Seconds: 1569932700, }, - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: 1569932700, }, }, @@ -53,10 +53,10 @@ var fake = &monitoring.TimeSeries{ Value: &monitoring.TypedValue_DoubleValue{DoubleValue: 0.004205757571772513}, }, Interval: &monitoring.TimeInterval{ - StartTime: ×tamp.Timestamp{ + StartTime: ×tamppb.Timestamp{ Seconds: 1569932640, }, - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: 1569932640, }, }, diff --git a/x-pack/metricbeat/module/gcp/metrics/metrics_requester.go b/x-pack/metricbeat/module/gcp/metrics/metrics_requester.go index a39f320ef447..e7e7f081e526 100644 --- a/x-pack/metricbeat/module/gcp/metrics/metrics_requester.go +++ b/x-pack/metricbeat/module/gcp/metrics/metrics_requester.go @@ -12,12 +12,11 @@ import ( "sync" "time" - "github.com/golang/protobuf/ptypes/duration" - monitoring "cloud.google.com/go/monitoring/apiv3/v2" "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" - "github.com/golang/protobuf/ptypes/timestamp" "google.golang.org/api/iterator" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" "github.com/elastic/beats/v7/x-pack/metricbeat/module/gcp" "github.com/elastic/elastic-agent-libs/logp" @@ -256,7 +255,7 @@ func (r *metricsRequester) getFilterForMetric(serviceName, m string) string { } // Returns a GCP TimeInterval based on the ingestDelay and samplePeriod from ListMetricDescriptor -func getTimeIntervalAligner(ingestDelay time.Duration, samplePeriod time.Duration, collectionPeriod *duration.Duration, inputAligner string) (*monitoringpb.TimeInterval, string) { +func getTimeIntervalAligner(ingestDelay time.Duration, samplePeriod time.Duration, collectionPeriod *durationpb.Duration, inputAligner string) (*monitoringpb.TimeInterval, string) { var startTime, endTime, currentTime time.Time var needsAggregation bool currentTime = time.Now().UTC() @@ -280,10 +279,10 @@ func getTimeIntervalAligner(ingestDelay time.Duration, samplePeriod time.Duratio } interval := &monitoringpb.TimeInterval{ - StartTime: ×tamp.Timestamp{ + StartTime: ×tamppb.Timestamp{ Seconds: startTime.Unix(), }, - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: endTime.Unix(), }, } diff --git a/x-pack/metricbeat/module/gcp/metrics/metrics_requester_test.go b/x-pack/metricbeat/module/gcp/metrics/metrics_requester_test.go index 2b058de2beed..658568b66ca5 100644 --- a/x-pack/metricbeat/module/gcp/metrics/metrics_requester_test.go +++ b/x-pack/metricbeat/module/gcp/metrics/metrics_requester_test.go @@ -8,9 +8,9 @@ import ( "testing" "time" - "github.com/golang/protobuf/ptypes/duration" "github.com/stretchr/testify/assert" "go.uber.org/zap/zapcore" + "google.golang.org/protobuf/types/known/durationpb" "github.com/elastic/beats/v7/x-pack/metricbeat/module/gcp" "github.com/elastic/elastic-agent-libs/logp" @@ -21,7 +21,7 @@ func TestGetTimeIntervalAligner(t *testing.T) { title string ingestDelay time.Duration samplePeriod time.Duration - collectionPeriod *duration.Duration + collectionPeriod *durationpb.Duration inputAligner string expectedAligner string }{ @@ -29,7 +29,7 @@ func TestGetTimeIntervalAligner(t *testing.T) { "test collectionPeriod equals to samplePeriod", time.Duration(240) * time.Second, time.Duration(60) * time.Second, - &duration.Duration{ + &durationpb.Duration{ Seconds: int64(60), }, "", @@ -39,7 +39,7 @@ func TestGetTimeIntervalAligner(t *testing.T) { "test collectionPeriod larger than samplePeriod", time.Duration(240) * time.Second, time.Duration(60) * time.Second, - &duration.Duration{ + &durationpb.Duration{ Seconds: int64(300), }, "ALIGN_MEAN", @@ -49,7 +49,7 @@ func TestGetTimeIntervalAligner(t *testing.T) { "test collectionPeriod smaller than samplePeriod", time.Duration(240) * time.Second, time.Duration(60) * time.Second, - &duration.Duration{ + &durationpb.Duration{ Seconds: int64(30), }, "ALIGN_MAX", @@ -59,7 +59,7 @@ func TestGetTimeIntervalAligner(t *testing.T) { "test collectionPeriod equals to samplePeriod with given aligner", time.Duration(240) * time.Second, time.Duration(60) * time.Second, - &duration.Duration{ + &durationpb.Duration{ Seconds: int64(60), }, "ALIGN_MEAN", diff --git a/x-pack/metricbeat/module/gcp/metrics/metricset.go b/x-pack/metricbeat/module/gcp/metrics/metricset.go index ffe5cd788a1f..49cdce5d73a8 100644 --- a/x-pack/metricbeat/module/gcp/metrics/metricset.go +++ b/x-pack/metricbeat/module/gcp/metrics/metricset.go @@ -13,10 +13,10 @@ import ( monitoring "cloud.google.com/go/monitoring/apiv3/v2" "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" - "github.com/golang/protobuf/ptypes/duration" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/genproto/googleapis/api/metric" + "google.golang.org/protobuf/types/known/durationpb" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/x-pack/metricbeat/module/gcp" @@ -107,7 +107,7 @@ type config struct { CredentialsJSON string `config:"credentials_json"` opt []option.ClientOption - period *duration.Duration + period *durationpb.Duration } // New creates a new instance of the MetricSet. New is responsible for unpacking @@ -139,7 +139,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return m, fmt.Errorf("no credentials_file_path or credentials_json specified") } - m.config.period = &duration.Duration{ + m.config.period = &durationpb.Duration{ Seconds: int64(m.Module().Config().Period.Seconds()), } diff --git a/x-pack/metricbeat/module/gcp/metrics/redis/metadata_test.go b/x-pack/metricbeat/module/gcp/metrics/redis/metadata_test.go index 9fc790bf9e51..48ac45661ed9 100644 --- a/x-pack/metricbeat/module/gcp/metrics/redis/metadata_test.go +++ b/x-pack/metricbeat/module/gcp/metrics/redis/metadata_test.go @@ -8,10 +8,10 @@ import ( "testing" monitoring "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" - "github.com/golang/protobuf/ptypes/timestamp" "github.com/stretchr/testify/assert" "google.golang.org/genproto/googleapis/api/metric" "google.golang.org/genproto/googleapis/api/monitoredres" + "google.golang.org/protobuf/types/known/timestamppb" ) var fake = &monitoring.TimeSeries{ @@ -41,10 +41,10 @@ var fake = &monitoring.TimeSeries{ Value: &monitoring.TypedValue_DoubleValue{DoubleValue: 0.0041224284852319215}, }, Interval: &monitoring.TimeInterval{ - StartTime: ×tamp.Timestamp{ + StartTime: ×tamppb.Timestamp{ Seconds: 1569932700, }, - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: 1569932700, }, }, @@ -53,10 +53,10 @@ var fake = &monitoring.TimeSeries{ Value: &monitoring.TypedValue_DoubleValue{DoubleValue: 0.004205757571772513}, }, Interval: &monitoring.TimeInterval{ - StartTime: ×tamp.Timestamp{ + StartTime: ×tamppb.Timestamp{ Seconds: 1569932640, }, - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: 1569932640, }, }, From e92dae6d18a17923c721a87a04891e3966877eb8 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 20 Dec 2023 12:37:07 -0500 Subject: [PATCH 011/129] chore: Update snapshot.yml (#37471) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Made with ❤️️ by updatecli Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 0e66db12a2a5..7b2595d18d14 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-ubdkmnyz-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-9amqxdis-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.13.0-ubdkmnyz-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.13.0-9amqxdis-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.13.0-ubdkmnyz-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.13.0-9amqxdis-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 686c14fba8215f8523f275197aeac5f23acfbbfa Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 21 Dec 2023 12:34:26 -0500 Subject: [PATCH 012/129] chore: Update snapshot.yml (#37482) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Made with ❤️️ by updatecli Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 7b2595d18d14..09e1294b17b5 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-9amqxdis-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-yufkxnwm-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.13.0-9amqxdis-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.13.0-yufkxnwm-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.13.0-9amqxdis-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.13.0-yufkxnwm-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From e44fc1446599e128535ee3cf2e99c94ea93bc7b3 Mon Sep 17 00:00:00 2001 From: Giuseppe Santoro Date: Fri, 22 Dec 2023 11:43:03 +0000 Subject: [PATCH 013/129] add dev-tools/kubernetes entry in codeowners (#37444) --- .github/CODEOWNERS | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index d8f36cdde15c..61fa4b04bcdc 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -24,6 +24,7 @@ CHANGELOG* /deploy/ @elastic/elastic-agent-data-plane /deploy/kubernetes @elastic/elastic-agent-data-plane @elastic/obs-cloudnative-monitoring /dev-tools/ @elastic/elastic-agent-data-plane +/dev-tools/kubernetes @elastic/obs-ds-hosted-services /docs/ @elastic/elastic-agent-data-plane /filebeat @elastic/elastic-agent-data-plane /filebeat/docs/ # Listed without an owner to avoid maintaining doc ownership for each input and module. From 2f7ff01798d23bc5790c1a98e372bca087d502c6 Mon Sep 17 00:00:00 2001 From: Giuseppe Santoro Date: Fri, 22 Dec 2023 11:48:14 +0000 Subject: [PATCH 014/129] Update k8s manifests for filebeat and metricbeat in dev-tools (#37401) * updated manifests from deploy * adding filestream id to all filebeat manifests --- deploy/kubernetes/filebeat-kubernetes.yaml | 1 + .../filebeat/filebeat-configmap.yaml | 1 + dev-tools/kubernetes/Tiltfile | 20 +- .../kubernetes/filebeat/manifest.debug.yaml | 232 +++++++-------- .../kubernetes/filebeat/manifest.run.yaml | 232 +++++++-------- .../metricbeat/manifest.debug.multi.yaml | 263 +++++++++--------- .../kubernetes/metricbeat/manifest.debug.yaml | 263 +++++++++--------- .../kubernetes/metricbeat/manifest.run.yaml | 263 +++++++++--------- 8 files changed, 668 insertions(+), 607 deletions(-) diff --git a/deploy/kubernetes/filebeat-kubernetes.yaml b/deploy/kubernetes/filebeat-kubernetes.yaml index c9015c0e1473..6c365ced4cb9 100644 --- a/deploy/kubernetes/filebeat-kubernetes.yaml +++ b/deploy/kubernetes/filebeat-kubernetes.yaml @@ -113,6 +113,7 @@ data: filebeat.yml: |- filebeat.inputs: - type: filestream + id: kubernetes-container-logs-${data.kubernetes.pod.name}-${data.kubernetes.container.id} paths: - /var/log/containers/*.log parsers: diff --git a/deploy/kubernetes/filebeat/filebeat-configmap.yaml b/deploy/kubernetes/filebeat/filebeat-configmap.yaml index f2614e8c035b..8c2fb6603a48 100644 --- a/deploy/kubernetes/filebeat/filebeat-configmap.yaml +++ b/deploy/kubernetes/filebeat/filebeat-configmap.yaml @@ -9,6 +9,7 @@ data: filebeat.yml: |- filebeat.inputs: - type: filestream + id: kubernetes-container-logs-${data.kubernetes.pod.name}-${data.kubernetes.container.id} paths: - /var/log/containers/*.log parsers: diff --git a/dev-tools/kubernetes/Tiltfile b/dev-tools/kubernetes/Tiltfile index 0a373ceb05fa..5ef4217b849a 100644 --- a/dev-tools/kubernetes/Tiltfile +++ b/dev-tools/kubernetes/Tiltfile @@ -137,7 +137,7 @@ def k8s_expose( # `beat`: `metricbeat` to test Metricbeat, `filebeat` to test Filebeat # `mode`: `debug` to start a remote debugger that you can connect to from your IDE with hot reloading enabled, `run` to just run Metricbeat without a debugger but still with hot reloading enabled # `arch`: `amd64` to build go binary for amd64 architecture, `arm64` to build go binary for arm64 (aka M1 Apple chip) architecture -# `k8s_env`: `kind` to run against a Kind cluster with no docker repo, `gcp` to use a docker repo on GCP +# `k8s_env`: `kind` to run against a Kind cluster with no docker repo, `gcp` to use a docker repo on GCP, `aws` to use a docker repo on AWS # `k8s_cluster`: `single` to use a single node k8s cluster, `multi` to use a k8s with more than 1 node. # if running on a multi-node cluster we expect to have at least 2 workers and a control plane node. One of the workers (eg. worker1) # should have a taint and a label (for node affinity) to make sure that only the debugger runs on that node. You need to run the following commands: @@ -203,10 +203,20 @@ def beat( k8s_expose(beat=beat, mode=mode, k8s_cluster=k8s_cluster) +# Note: Select only one of the following examples or modify one with the parameters you want to use + +# Run metricbeat in run mode against a single node k8s cluster with a docker repo on AWS +# beat(beat="metricbeat", +# mode="run", +# arch="amd64", +# k8s_env="aws", +# k8s_cluster="single", +# ) + +# Run on Mac M1 against a single node k8s cluster beat(beat="metricbeat", - # mode="debug", - mode="run", - arch="amd64", - k8s_env="aws", + mode="debug", + arch="arm64", + k8s_env="kind", k8s_cluster="single", ) diff --git a/dev-tools/kubernetes/filebeat/manifest.debug.yaml b/dev-tools/kubernetes/filebeat/manifest.debug.yaml index 36600e5bf5cf..36fc03bc559c 100644 --- a/dev-tools/kubernetes/filebeat/manifest.debug.yaml +++ b/dev-tools/kubernetes/filebeat/manifest.debug.yaml @@ -1,3 +1,106 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: filebeat + namespace: kube-system + labels: + k8s-app: filebeat +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: filebeat + labels: + k8s-app: filebeat +rules: +- apiGroups: [""] # "" indicates the core API group + resources: + - namespaces + - pods + - nodes + verbs: + - get + - watch + - list +- apiGroups: ["apps"] + resources: + - replicasets + verbs: ["get", "list", "watch"] +- apiGroups: ["batch"] + resources: + - jobs + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: filebeat + # should be the namespace where filebeat is running + namespace: kube-system + labels: + k8s-app: filebeat +rules: + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: ["get", "create", "update"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: filebeat-kubeadm-config + namespace: kube-system + labels: + k8s-app: filebeat +rules: + - apiGroups: [""] + resources: + - configmaps + resourceNames: + - kubeadm-config + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: filebeat +subjects: +- kind: ServiceAccount + name: filebeat + namespace: kube-system +roleRef: + kind: ClusterRole + name: filebeat + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: filebeat + namespace: kube-system +subjects: + - kind: ServiceAccount + name: filebeat + namespace: kube-system +roleRef: + kind: Role + name: filebeat + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: filebeat-kubeadm-config + namespace: kube-system +subjects: + - kind: ServiceAccount + name: filebeat + namespace: kube-system +roleRef: + kind: Role + name: filebeat-kubeadm-config + apiGroup: rbac.authorization.k8s.io --- apiVersion: v1 kind: ConfigMap @@ -9,9 +112,17 @@ metadata: data: filebeat.yml: |- filebeat.inputs: - - type: container + - type: filestream + id: kubernetes-container-logs-${data.kubernetes.pod.name}-${data.kubernetes.container.id} paths: - /var/log/containers/*.log + parsers: + - container: ~ + prospector: + scanner: + fingerprint.enabled: true + symlinks: true + file_identity.fingerprint: ~ processors: - add_kubernetes_metadata: host: ${NODE_NAME} @@ -20,15 +131,23 @@ data: logs_path: "/var/log/containers/" # To enable hints based autodiscover, remove `filebeat.inputs` configuration and uncomment this: - #filebeat.autodiscover: + # filebeat.autodiscover: # providers: # - type: kubernetes # node: ${NODE_NAME} # hints.enabled: true # hints.default_config: - # type: container + # type: filestream + # id: kubernetes-container-logs-${data.kubernetes.pod.name}-${data.kubernetes.container.id} # paths: - # - /var/log/containers/*${data.kubernetes.container.id}.log + # - /var/log/containers/*-${data.kubernetes.container.id}.log + # parsers: + # - container: ~ + # prospector: + # scanner: + # fingerprint.enabled: true + # symlinks: true + # file_identity.fingerprint: ~ processors: - add_cloud_metadata: @@ -71,7 +190,6 @@ spec: args: [ "-c", "/etc/filebeat.yml", "-e", - "-system.hostfs=/hostfs", ] ports: - containerPort: 56268 @@ -136,107 +254,3 @@ spec: path: /var/lib/filebeat-data type: DirectoryOrCreate --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: filebeat -subjects: -- kind: ServiceAccount - name: filebeat - namespace: kube-system -roleRef: - kind: ClusterRole - name: filebeat - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: filebeat - namespace: kube-system -subjects: - - kind: ServiceAccount - name: filebeat - namespace: kube-system -roleRef: - kind: Role - name: filebeat - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: filebeat-kubeadm-config - namespace: kube-system -subjects: - - kind: ServiceAccount - name: filebeat - namespace: kube-system -roleRef: - kind: Role - name: filebeat-kubeadm-config - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: filebeat - labels: - k8s-app: filebeat -rules: -- apiGroups: [""] # "" indicates the core API group - resources: - - namespaces - - pods - - nodes - verbs: - - get - - watch - - list -- apiGroups: ["apps"] - resources: - - replicasets - verbs: ["get", "list", "watch"] -- apiGroups: ["batch"] - resources: - - jobs - verbs: ["get", "list", "watch"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: filebeat - # should be the namespace where filebeat is running - namespace: kube-system - labels: - k8s-app: filebeat -rules: - - apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: ["get", "create", "update"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: filebeat-kubeadm-config - namespace: kube-system - labels: - k8s-app: filebeat -rules: - - apiGroups: [""] - resources: - - configmaps - resourceNames: - - kubeadm-config - verbs: ["get"] ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: filebeat - namespace: kube-system - labels: - k8s-app: filebeat ---- diff --git a/dev-tools/kubernetes/filebeat/manifest.run.yaml b/dev-tools/kubernetes/filebeat/manifest.run.yaml index 70e4612aee1e..2263bdd77e67 100644 --- a/dev-tools/kubernetes/filebeat/manifest.run.yaml +++ b/dev-tools/kubernetes/filebeat/manifest.run.yaml @@ -1,3 +1,106 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: filebeat + namespace: kube-system + labels: + k8s-app: filebeat +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: filebeat + labels: + k8s-app: filebeat +rules: +- apiGroups: [""] # "" indicates the core API group + resources: + - namespaces + - pods + - nodes + verbs: + - get + - watch + - list +- apiGroups: ["apps"] + resources: + - replicasets + verbs: ["get", "list", "watch"] +- apiGroups: ["batch"] + resources: + - jobs + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: filebeat + # should be the namespace where filebeat is running + namespace: kube-system + labels: + k8s-app: filebeat +rules: + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: ["get", "create", "update"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: filebeat-kubeadm-config + namespace: kube-system + labels: + k8s-app: filebeat +rules: + - apiGroups: [""] + resources: + - configmaps + resourceNames: + - kubeadm-config + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: filebeat +subjects: +- kind: ServiceAccount + name: filebeat + namespace: kube-system +roleRef: + kind: ClusterRole + name: filebeat + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: filebeat + namespace: kube-system +subjects: + - kind: ServiceAccount + name: filebeat + namespace: kube-system +roleRef: + kind: Role + name: filebeat + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: filebeat-kubeadm-config + namespace: kube-system +subjects: + - kind: ServiceAccount + name: filebeat + namespace: kube-system +roleRef: + kind: Role + name: filebeat-kubeadm-config + apiGroup: rbac.authorization.k8s.io --- apiVersion: v1 kind: ConfigMap @@ -9,9 +112,17 @@ metadata: data: filebeat.yml: |- filebeat.inputs: - - type: container + - type: filestream + id: kubernetes-container-logs-${data.kubernetes.pod.name}-${data.kubernetes.container.id} paths: - /var/log/containers/*.log + parsers: + - container: ~ + prospector: + scanner: + fingerprint.enabled: true + symlinks: true + file_identity.fingerprint: ~ processors: - add_kubernetes_metadata: host: ${NODE_NAME} @@ -20,15 +131,23 @@ data: logs_path: "/var/log/containers/" # To enable hints based autodiscover, remove `filebeat.inputs` configuration and uncomment this: - #filebeat.autodiscover: + # filebeat.autodiscover: # providers: # - type: kubernetes # node: ${NODE_NAME} # hints.enabled: true # hints.default_config: - # type: container + # type: filestream + # id: kubernetes-container-logs-${data.kubernetes.pod.name}-${data.kubernetes.container.id} # paths: - # - /var/log/containers/*${data.kubernetes.container.id}.log + # - /var/log/containers/*-${data.kubernetes.container.id}.log + # parsers: + # - container: ~ + # prospector: + # scanner: + # fingerprint.enabled: true + # symlinks: true + # file_identity.fingerprint: ~ processors: - add_cloud_metadata: @@ -71,7 +190,6 @@ spec: args: [ "-c", "/etc/filebeat.yml", "-e", - "-system.hostfs=/hostfs", ] env: - name: ELASTICSEARCH_HOST @@ -131,107 +249,3 @@ spec: path: /var/lib/filebeat-data type: DirectoryOrCreate --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: filebeat -subjects: -- kind: ServiceAccount - name: filebeat - namespace: kube-system -roleRef: - kind: ClusterRole - name: filebeat - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: filebeat - namespace: kube-system -subjects: - - kind: ServiceAccount - name: filebeat - namespace: kube-system -roleRef: - kind: Role - name: filebeat - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: filebeat-kubeadm-config - namespace: kube-system -subjects: - - kind: ServiceAccount - name: filebeat - namespace: kube-system -roleRef: - kind: Role - name: filebeat-kubeadm-config - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: filebeat - labels: - k8s-app: filebeat -rules: -- apiGroups: [""] # "" indicates the core API group - resources: - - namespaces - - pods - - nodes - verbs: - - get - - watch - - list -- apiGroups: ["apps"] - resources: - - replicasets - verbs: ["get", "list", "watch"] -- apiGroups: ["batch"] - resources: - - jobs - verbs: ["get", "list", "watch"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: filebeat - # should be the namespace where filebeat is running - namespace: kube-system - labels: - k8s-app: filebeat -rules: - - apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: ["get", "create", "update"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: filebeat-kubeadm-config - namespace: kube-system - labels: - k8s-app: filebeat -rules: - - apiGroups: [""] - resources: - - configmaps - resourceNames: - - kubeadm-config - verbs: ["get"] ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: filebeat - namespace: kube-system - labels: - k8s-app: filebeat ---- diff --git a/dev-tools/kubernetes/metricbeat/manifest.debug.multi.yaml b/dev-tools/kubernetes/metricbeat/manifest.debug.multi.yaml index 12f51a2a500d..6dd492804c8b 100644 --- a/dev-tools/kubernetes/metricbeat/manifest.debug.multi.yaml +++ b/dev-tools/kubernetes/metricbeat/manifest.debug.multi.yaml @@ -1,3 +1,134 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: metricbeat + namespace: kube-system + labels: + k8s-app: metricbeat +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metricbeat + labels: + k8s-app: metricbeat +rules: +- apiGroups: [""] + resources: + - nodes + - namespaces + - events + - pods + - services + - persistentvolumes + - persistentvolumeclaims + verbs: ["get", "list", "watch"] +# Enable this rule only if planing to use Kubernetes keystore +#- apiGroups: [""] +# resources: +# - secrets +# verbs: ["get"] +- apiGroups: ["extensions"] + resources: + - replicasets + verbs: ["get", "list", "watch"] +- apiGroups: ["apps"] + resources: + - statefulsets + - deployments + - replicasets + - daemonsets + verbs: ["get", "list", "watch"] +- apiGroups: ["batch"] + resources: + - jobs + - cronjobs + verbs: ["get", "list", "watch"] +- apiGroups: ["storage.k8s.io"] + resources: + - storageclasses + verbs: ["get", "list", "watch"] +- apiGroups: + - "" + resources: + - nodes/stats + verbs: + - get +- nonResourceURLs: + - "/metrics" + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: metricbeat + # should be the namespace where metricbeat is running + namespace: kube-system + labels: + k8s-app: metricbeat +rules: + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: ["get", "create", "update"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: metricbeat-kubeadm-config + namespace: kube-system + labels: + k8s-app: metricbeat +rules: + - apiGroups: [""] + resources: + - configmaps + resourceNames: + - kubeadm-config + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metricbeat +subjects: +- kind: ServiceAccount + name: metricbeat + namespace: kube-system +roleRef: + kind: ClusterRole + name: metricbeat + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: metricbeat + namespace: kube-system +subjects: + - kind: ServiceAccount + name: metricbeat + namespace: kube-system +roleRef: + kind: Role + name: metricbeat + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: metricbeat-kubeadm-config + namespace: kube-system +subjects: + - kind: ServiceAccount + name: metricbeat + namespace: kube-system +roleRef: + kind: Role + name: metricbeat-kubeadm-config + apiGroup: rbac.authorization.k8s.io --- apiVersion: v1 kind: ConfigMap @@ -30,6 +161,7 @@ data: period: 10s add_metadata: true metricsets: + - state_namespace - state_node - state_deployment - state_daemonset @@ -41,6 +173,9 @@ data: - state_resourcequota - state_statefulset - state_service + - state_persistentvolume + - state_persistentvolumeclaim + - state_storageclass # If `https` is used to access `kube-state-metrics`, uncomment following settings: # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # ssl.certificate_authorities: @@ -337,131 +472,3 @@ spec: path: /var/lib/metricbeat-data type: DirectoryOrCreate --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: metricbeat -subjects: -- kind: ServiceAccount - name: metricbeat - namespace: kube-system -roleRef: - kind: ClusterRole - name: metricbeat - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: metricbeat - namespace: kube-system -subjects: - - kind: ServiceAccount - name: metricbeat - namespace: kube-system -roleRef: - kind: Role - name: metricbeat - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: metricbeat-kubeadm-config - namespace: kube-system -subjects: - - kind: ServiceAccount - name: metricbeat - namespace: kube-system -roleRef: - kind: Role - name: metricbeat-kubeadm-config - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: metricbeat - labels: - k8s-app: metricbeat -rules: -- apiGroups: [""] - resources: - - nodes - - namespaces - - events - - pods - - services - - persistentvolumes - - persistentvolumeclaims - verbs: ["get", "list", "watch"] -# Enable this rule only if planing to use Kubernetes keystore -#- apiGroups: [""] -# resources: -# - secrets -# verbs: ["get"] -- apiGroups: ["extensions"] - resources: - - replicasets - verbs: ["get", "list", "watch"] -- apiGroups: ["apps"] - resources: - - statefulsets - - deployments - - replicasets - - daemonsets - verbs: ["get", "list", "watch"] -- apiGroups: ["batch"] - resources: - - jobs - - cronjobs - verbs: ["get", "list", "watch"] -- apiGroups: - - "" - resources: - - nodes/stats - verbs: - - get -- nonResourceURLs: - - "/metrics" - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: metricbeat - # should be the namespace where metricbeat is running - namespace: kube-system - labels: - k8s-app: metricbeat -rules: - - apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: ["get", "create", "update"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: metricbeat-kubeadm-config - namespace: kube-system - labels: - k8s-app: metricbeat -rules: - - apiGroups: [""] - resources: - - configmaps - resourceNames: - - kubeadm-config - verbs: ["get"] ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: metricbeat - namespace: kube-system - labels: - k8s-app: metricbeat ---- diff --git a/dev-tools/kubernetes/metricbeat/manifest.debug.yaml b/dev-tools/kubernetes/metricbeat/manifest.debug.yaml index 7e7d6e8f2ad1..398d7fa85606 100644 --- a/dev-tools/kubernetes/metricbeat/manifest.debug.yaml +++ b/dev-tools/kubernetes/metricbeat/manifest.debug.yaml @@ -1,3 +1,134 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: metricbeat + namespace: kube-system + labels: + k8s-app: metricbeat +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metricbeat + labels: + k8s-app: metricbeat +rules: +- apiGroups: [""] + resources: + - nodes + - namespaces + - events + - pods + - services + - persistentvolumes + - persistentvolumeclaims + verbs: ["get", "list", "watch"] +# Enable this rule only if planing to use Kubernetes keystore +#- apiGroups: [""] +# resources: +# - secrets +# verbs: ["get"] +- apiGroups: ["extensions"] + resources: + - replicasets + verbs: ["get", "list", "watch"] +- apiGroups: ["apps"] + resources: + - statefulsets + - deployments + - replicasets + - daemonsets + verbs: ["get", "list", "watch"] +- apiGroups: ["batch"] + resources: + - jobs + - cronjobs + verbs: ["get", "list", "watch"] +- apiGroups: ["storage.k8s.io"] + resources: + - storageclasses + verbs: ["get", "list", "watch"] +- apiGroups: + - "" + resources: + - nodes/stats + verbs: + - get +- nonResourceURLs: + - "/metrics" + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: metricbeat + # should be the namespace where metricbeat is running + namespace: kube-system + labels: + k8s-app: metricbeat +rules: + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: ["get", "create", "update"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: metricbeat-kubeadm-config + namespace: kube-system + labels: + k8s-app: metricbeat +rules: + - apiGroups: [""] + resources: + - configmaps + resourceNames: + - kubeadm-config + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metricbeat +subjects: +- kind: ServiceAccount + name: metricbeat + namespace: kube-system +roleRef: + kind: ClusterRole + name: metricbeat + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: metricbeat + namespace: kube-system +subjects: + - kind: ServiceAccount + name: metricbeat + namespace: kube-system +roleRef: + kind: Role + name: metricbeat + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: metricbeat-kubeadm-config + namespace: kube-system +subjects: + - kind: ServiceAccount + name: metricbeat + namespace: kube-system +roleRef: + kind: Role + name: metricbeat-kubeadm-config + apiGroup: rbac.authorization.k8s.io --- apiVersion: v1 kind: ConfigMap @@ -30,6 +161,7 @@ data: period: 10s add_metadata: true metricsets: + - state_namespace - state_node - state_deployment - state_daemonset @@ -41,6 +173,9 @@ data: - state_resourcequota - state_statefulset - state_service + - state_persistentvolume + - state_persistentvolumeclaim + - state_storageclass # If `https` is used to access `kube-state-metrics`, uncomment following settings: # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # ssl.certificate_authorities: @@ -232,131 +367,3 @@ spec: path: /var/lib/metricbeat-data type: DirectoryOrCreate --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: metricbeat -subjects: -- kind: ServiceAccount - name: metricbeat - namespace: kube-system -roleRef: - kind: ClusterRole - name: metricbeat - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: metricbeat - namespace: kube-system -subjects: - - kind: ServiceAccount - name: metricbeat - namespace: kube-system -roleRef: - kind: Role - name: metricbeat - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: metricbeat-kubeadm-config - namespace: kube-system -subjects: - - kind: ServiceAccount - name: metricbeat - namespace: kube-system -roleRef: - kind: Role - name: metricbeat-kubeadm-config - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: metricbeat - labels: - k8s-app: metricbeat -rules: -- apiGroups: [""] - resources: - - nodes - - namespaces - - events - - pods - - services - - persistentvolumes - - persistentvolumeclaims - verbs: ["get", "list", "watch"] -# Enable this rule only if planing to use Kubernetes keystore -#- apiGroups: [""] -# resources: -# - secrets -# verbs: ["get"] -- apiGroups: ["extensions"] - resources: - - replicasets - verbs: ["get", "list", "watch"] -- apiGroups: ["apps"] - resources: - - statefulsets - - deployments - - replicasets - - daemonsets - verbs: ["get", "list", "watch"] -- apiGroups: ["batch"] - resources: - - jobs - - cronjobs - verbs: ["get", "list", "watch"] -- apiGroups: - - "" - resources: - - nodes/stats - verbs: - - get -- nonResourceURLs: - - "/metrics" - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: metricbeat - # should be the namespace where metricbeat is running - namespace: kube-system - labels: - k8s-app: metricbeat -rules: - - apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: ["get", "create", "update"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: metricbeat-kubeadm-config - namespace: kube-system - labels: - k8s-app: metricbeat -rules: - - apiGroups: [""] - resources: - - configmaps - resourceNames: - - kubeadm-config - verbs: ["get"] ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: metricbeat - namespace: kube-system - labels: - k8s-app: metricbeat ---- diff --git a/dev-tools/kubernetes/metricbeat/manifest.run.yaml b/dev-tools/kubernetes/metricbeat/manifest.run.yaml index 883b44862489..21c9727d45ef 100644 --- a/dev-tools/kubernetes/metricbeat/manifest.run.yaml +++ b/dev-tools/kubernetes/metricbeat/manifest.run.yaml @@ -1,3 +1,134 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: metricbeat + namespace: kube-system + labels: + k8s-app: metricbeat +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metricbeat + labels: + k8s-app: metricbeat +rules: +- apiGroups: [""] + resources: + - nodes + - namespaces + - events + - pods + - services + - persistentvolumes + - persistentvolumeclaims + verbs: ["get", "list", "watch"] +# Enable this rule only if planing to use Kubernetes keystore +#- apiGroups: [""] +# resources: +# - secrets +# verbs: ["get"] +- apiGroups: ["extensions"] + resources: + - replicasets + verbs: ["get", "list", "watch"] +- apiGroups: ["apps"] + resources: + - statefulsets + - deployments + - replicasets + - daemonsets + verbs: ["get", "list", "watch"] +- apiGroups: ["batch"] + resources: + - jobs + - cronjobs + verbs: ["get", "list", "watch"] +- apiGroups: ["storage.k8s.io"] + resources: + - storageclasses + verbs: ["get", "list", "watch"] +- apiGroups: + - "" + resources: + - nodes/stats + verbs: + - get +- nonResourceURLs: + - "/metrics" + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: metricbeat + # should be the namespace where metricbeat is running + namespace: kube-system + labels: + k8s-app: metricbeat +rules: + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: ["get", "create", "update"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: metricbeat-kubeadm-config + namespace: kube-system + labels: + k8s-app: metricbeat +rules: + - apiGroups: [""] + resources: + - configmaps + resourceNames: + - kubeadm-config + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metricbeat +subjects: +- kind: ServiceAccount + name: metricbeat + namespace: kube-system +roleRef: + kind: ClusterRole + name: metricbeat + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: metricbeat + namespace: kube-system +subjects: + - kind: ServiceAccount + name: metricbeat + namespace: kube-system +roleRef: + kind: Role + name: metricbeat + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: metricbeat-kubeadm-config + namespace: kube-system +subjects: + - kind: ServiceAccount + name: metricbeat + namespace: kube-system +roleRef: + kind: Role + name: metricbeat-kubeadm-config + apiGroup: rbac.authorization.k8s.io --- apiVersion: v1 kind: ConfigMap @@ -30,6 +161,7 @@ data: period: 10s add_metadata: true metricsets: + - state_namespace - state_node - state_deployment - state_daemonset @@ -41,6 +173,9 @@ data: - state_resourcequota - state_statefulset - state_service + - state_persistentvolume + - state_persistentvolumeclaim + - state_storageclass # If `https` is used to access `kube-state-metrics`, uncomment following settings: # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # ssl.certificate_authorities: @@ -227,131 +362,3 @@ spec: path: /var/lib/metricbeat-data type: DirectoryOrCreate --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: metricbeat -subjects: -- kind: ServiceAccount - name: metricbeat - namespace: kube-system -roleRef: - kind: ClusterRole - name: metricbeat - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: metricbeat - namespace: kube-system -subjects: - - kind: ServiceAccount - name: metricbeat - namespace: kube-system -roleRef: - kind: Role - name: metricbeat - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: metricbeat-kubeadm-config - namespace: kube-system -subjects: - - kind: ServiceAccount - name: metricbeat - namespace: kube-system -roleRef: - kind: Role - name: metricbeat-kubeadm-config - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: metricbeat - labels: - k8s-app: metricbeat -rules: -- apiGroups: [""] - resources: - - nodes - - namespaces - - events - - pods - - services - - persistentvolumes - - persistentvolumeclaims - verbs: ["get", "list", "watch"] -# Enable this rule only if planing to use Kubernetes keystore -#- apiGroups: [""] -# resources: -# - secrets -# verbs: ["get"] -- apiGroups: ["extensions"] - resources: - - replicasets - verbs: ["get", "list", "watch"] -- apiGroups: ["apps"] - resources: - - statefulsets - - deployments - - replicasets - - daemonsets - verbs: ["get", "list", "watch"] -- apiGroups: ["batch"] - resources: - - jobs - - cronjobs - verbs: ["get", "list", "watch"] -- apiGroups: - - "" - resources: - - nodes/stats - verbs: - - get -- nonResourceURLs: - - "/metrics" - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: metricbeat - # should be the namespace where metricbeat is running - namespace: kube-system - labels: - k8s-app: metricbeat -rules: - - apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: ["get", "create", "update"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: metricbeat-kubeadm-config - namespace: kube-system - labels: - k8s-app: metricbeat -rules: - - apiGroups: [""] - resources: - - configmaps - resourceNames: - - kubeadm-config - verbs: ["get"] ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: metricbeat - namespace: kube-system - labels: - k8s-app: metricbeat ---- From 1f88ea8d05e96b4a8eb0ef21d3b298371e0a00ca Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 22 Dec 2023 12:32:14 -0500 Subject: [PATCH 015/129] chore: Update snapshot.yml (#37490) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Made with ❤️️ by updatecli Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 09e1294b17b5..525657c20fd1 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-yufkxnwm-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-vqaxdghw-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.13.0-yufkxnwm-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.13.0-vqaxdghw-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.13.0-yufkxnwm-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.13.0-vqaxdghw-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 8a20bd2b137ae2825284c245d7f9a407eb33cfbc Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 22 Dec 2023 15:12:04 -0500 Subject: [PATCH 016/129] [Automation] Bump Golang version to 1.20.12 (#37350) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update stan Dockerfile Made with ❤️️ by updatecli * chore: Update Auditbeat Dockerfile Made with ❤️️ by updatecli * chore: Update .go-version Made with ❤️️ by updatecli * chore: Update Filebeat debug Dockerfile Made with ❤️️ by updatecli * chore: Update .golangci.yml Made with ❤️️ by updatecli * chore: Update Functionbeat Dockerfile Made with ❤️️ by updatecli * chore: Update Heartbeat debug Dockerfile Made with ❤️️ by updatecli * chore: Update NATS module Dockerfile Made with ❤️️ by updatecli * chore: Update Packetbeat Dockerfile Made with ❤️️ by updatecli * chore: Update from vsphere Dockerfile Made with ❤️️ by updatecli * chore: Update Metricbeat debug Dockerfile Made with ❤️️ by updatecli * chore: Update version.asciidoc Made with ❤️️ by updatecli * chore: Update Heartbeat Dockerfile Made with ❤️️ by updatecli * chore: Update Metricbeat Dockerfile Made with ❤️️ by updatecli * chore: Update HTTP module Dockerfile Made with ❤️️ by updatecli * Update changelog. --------- Co-authored-by: apmmachine Co-authored-by: Craig MacKenzie Co-authored-by: Denis --- .go-version | 2 +- .golangci.yml | 8 ++++---- CHANGELOG.next.asciidoc | 2 +- auditbeat/Dockerfile | 2 +- dev-tools/kubernetes/filebeat/Dockerfile.debug | 2 +- dev-tools/kubernetes/heartbeat/Dockerfile.debug | 2 +- dev-tools/kubernetes/metricbeat/Dockerfile.debug | 2 +- heartbeat/Dockerfile | 2 +- libbeat/docs/version.asciidoc | 2 +- metricbeat/Dockerfile | 2 +- metricbeat/module/http/_meta/Dockerfile | 2 +- metricbeat/module/nats/_meta/Dockerfile | 2 +- metricbeat/module/vsphere/_meta/Dockerfile | 2 +- packetbeat/Dockerfile | 2 +- x-pack/functionbeat/Dockerfile | 2 +- x-pack/metricbeat/module/stan/_meta/Dockerfile | 2 +- 16 files changed, 19 insertions(+), 19 deletions(-) diff --git a/.go-version b/.go-version index 4bb1a22f8ec5..3b9e4a0c187a 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.20.11 +1.20.12 diff --git a/.golangci.yml b/.golangci.yml index b4c16d3291da..3cc6336695e3 100755 --- a/.golangci.yml +++ b/.golangci.yml @@ -114,7 +114,7 @@ linters-settings: gosimple: # Select the Go version to target. The default is '1.13'. - go: "1.20.11" + go: "1.20.12" nakedret: # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 @@ -132,19 +132,19 @@ linters-settings: staticcheck: # Select the Go version to target. The default is '1.13'. - go: "1.20.11" + go: "1.20.12" checks: ["all"] stylecheck: # Select the Go version to target. The default is '1.13'. - go: "1.20.11" + go: "1.20.12" # Disabled: # ST1005: error strings should not be capitalized checks: ["all", "-ST1005"] unused: # Select the Go version to target. The default is '1.13'. - go: "1.20.11" + go: "1.20.12" gosec: excludes: diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 172fbf334d2a..284ebac1eeb2 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -148,7 +148,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - elasticsearch output now supports `idle_connection_timeout`. {issue}35616[35615] {pull}36843[36843] - Upgrade golang/x/net to v0.17.0. Updates the publicsuffix table used by the registered_domain processor. {pull}36969[36969] Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will disable the netinfo.enabled option of add_host_metadata processor -- Upgrade to Go 1.20.11. {pull}37123[37123] +- Upgrade to Go 1.20.12. {pull}37350[37350] - The Elasticsearch output can now configure performance presets with the `preset` configuration field. {pull}37259[37259] *Auditbeat* diff --git a/auditbeat/Dockerfile b/auditbeat/Dockerfile index 4f7612cd90e3..742041d66af8 100644 --- a/auditbeat/Dockerfile +++ b/auditbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20.11 +FROM golang:1.20.12 RUN \ apt-get update \ diff --git a/dev-tools/kubernetes/filebeat/Dockerfile.debug b/dev-tools/kubernetes/filebeat/Dockerfile.debug index 61e036848474..c0c8768861cc 100644 --- a/dev-tools/kubernetes/filebeat/Dockerfile.debug +++ b/dev-tools/kubernetes/filebeat/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:1.20.11 as builder +FROM golang:1.20.12 as builder ENV PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/go/bin:/usr/local/go/bin diff --git a/dev-tools/kubernetes/heartbeat/Dockerfile.debug b/dev-tools/kubernetes/heartbeat/Dockerfile.debug index df065094897e..aa48a8d58d7e 100644 --- a/dev-tools/kubernetes/heartbeat/Dockerfile.debug +++ b/dev-tools/kubernetes/heartbeat/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:1.20.11 as builder +FROM golang:1.20.12 as builder ENV PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/go/bin:/usr/local/go/bin diff --git a/dev-tools/kubernetes/metricbeat/Dockerfile.debug b/dev-tools/kubernetes/metricbeat/Dockerfile.debug index 241c186998d1..854cf1ac32be 100644 --- a/dev-tools/kubernetes/metricbeat/Dockerfile.debug +++ b/dev-tools/kubernetes/metricbeat/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:1.20.11 as builder +FROM golang:1.20.12 as builder ENV PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/go/bin:/usr/local/go/bin diff --git a/heartbeat/Dockerfile b/heartbeat/Dockerfile index d90185df60bd..e3e4a8bf989c 100644 --- a/heartbeat/Dockerfile +++ b/heartbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20.11 +FROM golang:1.20.12 RUN \ apt-get update \ diff --git a/libbeat/docs/version.asciidoc b/libbeat/docs/version.asciidoc index 6d2c70722207..ccb198c4a924 100644 --- a/libbeat/docs/version.asciidoc +++ b/libbeat/docs/version.asciidoc @@ -1,6 +1,6 @@ :stack-version: 8.13.0 :doc-branch: main -:go-version: 1.20.11 +:go-version: 1.20.12 :release-state: unreleased :python: 3.7 :docker: 1.12 diff --git a/metricbeat/Dockerfile b/metricbeat/Dockerfile index 7f3b50109d56..634db34245b7 100644 --- a/metricbeat/Dockerfile +++ b/metricbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20.11 +FROM golang:1.20.12 RUN \ apt update \ diff --git a/metricbeat/module/http/_meta/Dockerfile b/metricbeat/module/http/_meta/Dockerfile index 74c1fdb0bca5..bf7a9fc931ec 100644 --- a/metricbeat/module/http/_meta/Dockerfile +++ b/metricbeat/module/http/_meta/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20.11 +FROM golang:1.20.12 COPY test/main.go main.go diff --git a/metricbeat/module/nats/_meta/Dockerfile b/metricbeat/module/nats/_meta/Dockerfile index 0340387144c0..44fdc695748e 100644 --- a/metricbeat/module/nats/_meta/Dockerfile +++ b/metricbeat/module/nats/_meta/Dockerfile @@ -2,7 +2,7 @@ ARG NATS_VERSION=2.0.4 FROM nats:$NATS_VERSION # build stage -FROM golang:1.20.11 AS build-env +FROM golang:1.20.12 AS build-env RUN apt-get install git mercurial gcc RUN git clone https://github.com/nats-io/nats.go.git /nats-go RUN cd /nats-go/examples/nats-bench && git checkout tags/v1.10.0 && go build . diff --git a/metricbeat/module/vsphere/_meta/Dockerfile b/metricbeat/module/vsphere/_meta/Dockerfile index 05e0eaf3c74c..993137b89f3a 100644 --- a/metricbeat/module/vsphere/_meta/Dockerfile +++ b/metricbeat/module/vsphere/_meta/Dockerfile @@ -1,5 +1,5 @@ ARG VSPHERE_GOLANG_VERSION -FROM golang:1.20.11 +FROM golang:1.20.12 RUN apt-get install curl git RUN go install github.com/vmware/govmomi/vcsim@v0.30.4 diff --git a/packetbeat/Dockerfile b/packetbeat/Dockerfile index 2040495b5510..fe13ef47b61f 100644 --- a/packetbeat/Dockerfile +++ b/packetbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20.11 +FROM golang:1.20.12 RUN \ apt-get update \ diff --git a/x-pack/functionbeat/Dockerfile b/x-pack/functionbeat/Dockerfile index bd68544accce..662b27d669db 100644 --- a/x-pack/functionbeat/Dockerfile +++ b/x-pack/functionbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20.11 +FROM golang:1.20.12 RUN \ apt-get update \ diff --git a/x-pack/metricbeat/module/stan/_meta/Dockerfile b/x-pack/metricbeat/module/stan/_meta/Dockerfile index 20604392f603..92ee1d834571 100644 --- a/x-pack/metricbeat/module/stan/_meta/Dockerfile +++ b/x-pack/metricbeat/module/stan/_meta/Dockerfile @@ -2,7 +2,7 @@ ARG STAN_VERSION=0.15.1 FROM nats-streaming:$STAN_VERSION # build stage -FROM golang:1.20.11 AS build-env +FROM golang:1.20.12 AS build-env RUN apt-get install git mercurial gcc RUN git clone https://github.com/nats-io/stan.go.git /stan-go RUN cd /stan-go/examples/stan-bench && git checkout tags/v0.5.2 && go build . From 8c4a40fdbfda1a8e5ff2ceae01930a9f3e629d97 Mon Sep 17 00:00:00 2001 From: Tetiana Kravchenko Date: Wed, 27 Dec 2023 09:54:00 +0100 Subject: [PATCH 017/129] Upgrade elastic-agent-autodiscover dependency; use InitDefaults() to init default configuration (#37458) * use InitDefaults(); elastic-agent-autodiscover: fix default resource metadata config - https://github.com/elastic/elastic-agent-autodiscover/pull/72 Signed-off-by: Tetiana Kravchenko * revert unintended changes Signed-off-by: Tetiana Kravchenko * upgrade elastic-agent-autodiscover lib to v0.6.6 Signed-off-by: Tetiana Kravchenko * run make update; fix linter issues Signed-off-by: Tetiana Kravchenko * revert changes related to the 'commonMetaConfig'; add missing metadata configuration on the metricset level Signed-off-by: Tetiana Kravchenko * x-pack/metricbeat: run make update Signed-off-by: Tetiana Kravchenko --------- Signed-off-by: Tetiana Kravchenko --- NOTICE.txt | 4 ++-- go.mod | 4 +--- go.sum | 4 ++-- .../add_kubernetes_metadata/config.go | 18 +++++++-------- .../add_kubernetes_metadata/config_test.go | 10 ++++---- .../add_kubernetes_metadata/kubernetes.go | 3 +-- metricbeat/docs/modules/kubernetes.asciidoc | 23 +++++++++++++++---- metricbeat/metricbeat.reference.yml | 23 +++++++++++++++---- .../kubernetes/_meta/config.reference.yml | 23 +++++++++++++++---- metricbeat/module/kubernetes/_meta/config.yml | 12 +++++++--- .../module/kubernetes/util/kubernetes.go | 20 ++++++++-------- metricbeat/modules.d/kubernetes.yml.disabled | 12 +++++++--- x-pack/metricbeat/metricbeat.reference.yml | 23 +++++++++++++++---- 13 files changed, 124 insertions(+), 55 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index cca066349fa3..6e60972d6745 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -12268,11 +12268,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-autodiscover -Version: v0.6.5 +Version: v0.6.6 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-autodiscover@v0.6.5/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-autodiscover@v0.6.6/LICENSE: Apache License Version 2.0, January 2004 diff --git a/go.mod b/go.mod index 6732fbc60061..e4943105c6a4 100644 --- a/go.mod +++ b/go.mod @@ -200,7 +200,7 @@ require ( github.com/aws/smithy-go v1.13.5 github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20220623125934-28468a6701b5 github.com/elastic/bayeux v1.0.5 - github.com/elastic/elastic-agent-autodiscover v0.6.5 + github.com/elastic/elastic-agent-autodiscover v0.6.6 github.com/elastic/elastic-agent-libs v0.7.2 github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3 github.com/elastic/elastic-agent-system-metrics v0.9.1 @@ -405,7 +405,6 @@ replace ( github.com/docker/go-plugins-helpers => github.com/elastic/go-plugins-helpers v0.0.0-20200207104224-bdf17607b79f github.com/dop251/goja => github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20 github.com/dop251/goja_nodejs => github.com/dop251/goja_nodejs v0.0.0-20171011081505-adff31b136e6 - github.com/fsnotify/fsevents => github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270 github.com/fsnotify/fsnotify => github.com/adriansr/fsnotify v1.4.8-0.20211018144411-a81f2b630e7c github.com/godror/godror => github.com/godror/godror v0.33.2 // updating to v0.24.2 caused a breaking change @@ -415,7 +414,6 @@ replace ( github.com/snowflakedb/gosnowflake => github.com/snowflakedb/gosnowflake v1.6.19 github.com/tonistiigi/fifo => github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c k8s.io/kubernetes v1.13.0 => k8s.io/kubernetes v1.24.15 - ) // Exclude this version because the version has an invalid checksum. diff --git a/go.sum b/go.sum index 5b4d63c39652..c83841bdcb59 100644 --- a/go.sum +++ b/go.sum @@ -652,8 +652,8 @@ github.com/elastic/bayeux v1.0.5 h1:UceFq01ipmT3S8DzFK+uVAkbCdiPR0Bqei8qIGmUeY0= github.com/elastic/bayeux v1.0.5/go.mod h1:CSI4iP7qeo5MMlkznGvYKftp8M7qqP/3nzmVZoXHY68= github.com/elastic/dhcp v0.0.0-20200227161230-57ec251c7eb3 h1:lnDkqiRFKm0rxdljqrj3lotWinO9+jFmeDXIC4gvIQs= github.com/elastic/dhcp v0.0.0-20200227161230-57ec251c7eb3/go.mod h1:aPqzac6AYkipvp4hufTyMj5PDIphF3+At8zr7r51xjY= -github.com/elastic/elastic-agent-autodiscover v0.6.5 h1:5DeMpuNc8c/tN6HN0A4A2uOFTNFHSg7xrKApzfhvF1U= -github.com/elastic/elastic-agent-autodiscover v0.6.5/go.mod h1:chulyCAyZb/njMHgzkhC/yWnt8v/Y6eCRUhmFVnsA5o= +github.com/elastic/elastic-agent-autodiscover v0.6.6 h1:P1y0dDpbhJc7Uw/xe85irPEad4Vljygc+y4iSxtqW7A= +github.com/elastic/elastic-agent-autodiscover v0.6.6/go.mod h1:chulyCAyZb/njMHgzkhC/yWnt8v/Y6eCRUhmFVnsA5o= github.com/elastic/elastic-agent-client/v7 v7.6.0 h1:FEn6FjzynW4TIQo5G096Tr7xYK/P5LY9cSS6wRbXZTc= github.com/elastic/elastic-agent-client/v7 v7.6.0/go.mod h1:GlUKrbVd/O1CRAZonpBeN3J0RlVqP6VGcrBjFWca+aM= github.com/elastic/elastic-agent-libs v0.7.2 h1:yT0hF0UAxJCdQqhHh6SFpgYrcpB10oFzPj8IaytPS2o= diff --git a/libbeat/processors/add_kubernetes_metadata/config.go b/libbeat/processors/add_kubernetes_metadata/config.go index 0998a275ea4f..7c74c82268d1 100644 --- a/libbeat/processors/add_kubernetes_metadata/config.go +++ b/libbeat/processors/add_kubernetes_metadata/config.go @@ -50,15 +50,13 @@ type Enabled struct { type PluginConfig []map[string]config.C -func defaultKubernetesAnnotatorConfig() kubeAnnotatorConfig { - return kubeAnnotatorConfig{ - SyncPeriod: 10 * time.Minute, - CleanupTimeout: 60 * time.Second, - DefaultMatchers: Enabled{true}, - DefaultIndexers: Enabled{true}, - Scope: "node", - AddResourceMetadata: metadata.GetDefaultResourceMetadataConfig(), - } +func (k *kubeAnnotatorConfig) InitDefaults() { + k.SyncPeriod = 10 * time.Minute + k.CleanupTimeout = 60 * time.Second + k.DefaultMatchers = Enabled{true} + k.DefaultIndexers = Enabled{true} + k.Scope = "node" + k.AddResourceMetadata = metadata.GetDefaultResourceMetadataConfig() } func (k *kubeAnnotatorConfig) Validate() error { @@ -83,7 +81,7 @@ func (k *kubeAnnotatorConfig) Validate() error { err := matcherCfg.Unpack(&logsPathMatcher) if err != nil { - return fmt.Errorf("fail to unpack the `logs_path` matcher configuration: %s", err) + return fmt.Errorf("fail to unpack the `logs_path` matcher configuration: %w", err) } if logsPathMatcher.LogsPath == "" { return fmt.Errorf("invalid logs_path matcher configuration: when resource_type is defined, logs_path must be set as well") diff --git a/libbeat/processors/add_kubernetes_metadata/config_test.go b/libbeat/processors/add_kubernetes_metadata/config_test.go index e94089f388a9..3857eb148fa6 100644 --- a/libbeat/processors/add_kubernetes_metadata/config_test.go +++ b/libbeat/processors/add_kubernetes_metadata/config_test.go @@ -50,7 +50,7 @@ func TestConfigValidate(t *testing.T) { for _, test := range tests { cfg := config.MustNewConfigFrom(test.cfg) - c := defaultKubernetesAnnotatorConfig() + var c kubeAnnotatorConfig err := cfg.Unpack(&c) if test.error { @@ -116,16 +116,16 @@ func TestConfigValidate_LogsPatchMatcher(t *testing.T) { for _, test := range tests { cfg, _ := config.NewConfigFrom(test.matcherConfig) - c := defaultKubernetesAnnotatorConfig() - c.DefaultMatchers = Enabled{false} + var c kubeAnnotatorConfig - err := cfg.Unpack(&c) + _ = cfg.Unpack(&c) + c.DefaultMatchers = Enabled{false} c.Matchers = PluginConfig{ { test.matcherName: *cfg, }, } - err = c.Validate() + err := c.Validate() if test.error { require.NotNil(t, err) } else { diff --git a/libbeat/processors/add_kubernetes_metadata/kubernetes.go b/libbeat/processors/add_kubernetes_metadata/kubernetes.go index a8667aef0a8a..954a59ab3f12 100644 --- a/libbeat/processors/add_kubernetes_metadata/kubernetes.go +++ b/libbeat/processors/add_kubernetes_metadata/kubernetes.go @@ -123,8 +123,7 @@ func New(cfg *config.C) (beat.Processor, error) { } func newProcessorConfig(cfg *config.C, register *Register) (kubeAnnotatorConfig, error) { - config := defaultKubernetesAnnotatorConfig() - + var config kubeAnnotatorConfig err := cfg.Unpack(&config) if err != nil { return config, fmt.Errorf("fail to unpack the kubernetes configuration: %w", err) diff --git a/metricbeat/docs/modules/kubernetes.asciidoc b/metricbeat/docs/modules/kubernetes.asciidoc index 6b0bbc023f3c..9ff079faa3be 100644 --- a/metricbeat/docs/modules/kubernetes.asciidoc +++ b/metricbeat/docs/modules/kubernetes.asciidoc @@ -232,11 +232,18 @@ metricbeat.modules: # Enriching parameters: add_metadata: true - # When used outside the cluster: - #node: node_name # If kube_config is not set, KUBECONFIG environment variable will be checked # and if not present it will fall back to InCluster #kube_config: ~/.kube/config + #include_labels: [] + #exclude_labels: [] + #include_annotations: [] + #labels.dedot: true + #annotations.dedot: true + + # When used outside the cluster: + #node: node_name + # To configure additionally node and namespace metadata `add_resource_metadata` can be defined. # By default all labels will be included while annotations are not added by default. # add_resource_metadata: @@ -276,13 +283,21 @@ metricbeat.modules: # Enriching parameters: add_metadata: true - # When used outside the cluster: - #node: node_name # If kube_config is not set, KUBECONFIG environment variable will be checked # and if not present it will fall back to InCluster #kube_config: ~/.kube/config + #include_labels: [] + #exclude_labels: [] + #include_annotations: [] + #labels.dedot: true + #annotations.dedot: true + + # When used outside the cluster: + #node: node_name + # Set the namespace to watch for resources #namespace: staging + # To configure additionally node and namespace metadata `add_resource_metadata` can be defined. # By default all labels will be included while annotations are not added by default. # add_resource_metadata: diff --git a/metricbeat/metricbeat.reference.yml b/metricbeat/metricbeat.reference.yml index 3588aaec9769..d6b8b9e9475d 100644 --- a/metricbeat/metricbeat.reference.yml +++ b/metricbeat/metricbeat.reference.yml @@ -504,11 +504,18 @@ metricbeat.modules: # Enriching parameters: add_metadata: true - # When used outside the cluster: - #node: node_name # If kube_config is not set, KUBECONFIG environment variable will be checked # and if not present it will fall back to InCluster #kube_config: ~/.kube/config + #include_labels: [] + #exclude_labels: [] + #include_annotations: [] + #labels.dedot: true + #annotations.dedot: true + + # When used outside the cluster: + #node: node_name + # To configure additionally node and namespace metadata `add_resource_metadata` can be defined. # By default all labels will be included while annotations are not added by default. # add_resource_metadata: @@ -548,13 +555,21 @@ metricbeat.modules: # Enriching parameters: add_metadata: true - # When used outside the cluster: - #node: node_name # If kube_config is not set, KUBECONFIG environment variable will be checked # and if not present it will fall back to InCluster #kube_config: ~/.kube/config + #include_labels: [] + #exclude_labels: [] + #include_annotations: [] + #labels.dedot: true + #annotations.dedot: true + + # When used outside the cluster: + #node: node_name + # Set the namespace to watch for resources #namespace: staging + # To configure additionally node and namespace metadata `add_resource_metadata` can be defined. # By default all labels will be included while annotations are not added by default. # add_resource_metadata: diff --git a/metricbeat/module/kubernetes/_meta/config.reference.yml b/metricbeat/module/kubernetes/_meta/config.reference.yml index dcd59309119d..23f5ce8dea62 100644 --- a/metricbeat/module/kubernetes/_meta/config.reference.yml +++ b/metricbeat/module/kubernetes/_meta/config.reference.yml @@ -18,11 +18,18 @@ # Enriching parameters: add_metadata: true - # When used outside the cluster: - #node: node_name # If kube_config is not set, KUBECONFIG environment variable will be checked # and if not present it will fall back to InCluster #kube_config: ~/.kube/config + #include_labels: [] + #exclude_labels: [] + #include_annotations: [] + #labels.dedot: true + #annotations.dedot: true + + # When used outside the cluster: + #node: node_name + # To configure additionally node and namespace metadata `add_resource_metadata` can be defined. # By default all labels will be included while annotations are not added by default. # add_resource_metadata: @@ -62,13 +69,21 @@ # Enriching parameters: add_metadata: true - # When used outside the cluster: - #node: node_name # If kube_config is not set, KUBECONFIG environment variable will be checked # and if not present it will fall back to InCluster #kube_config: ~/.kube/config + #include_labels: [] + #exclude_labels: [] + #include_annotations: [] + #labels.dedot: true + #annotations.dedot: true + + # When used outside the cluster: + #node: node_name + # Set the namespace to watch for resources #namespace: staging + # To configure additionally node and namespace metadata `add_resource_metadata` can be defined. # By default all labels will be included while annotations are not added by default. # add_resource_metadata: diff --git a/metricbeat/module/kubernetes/_meta/config.yml b/metricbeat/module/kubernetes/_meta/config.yml index 44ef19c97862..1c56e57b167f 100644 --- a/metricbeat/module/kubernetes/_meta/config.yml +++ b/metricbeat/module/kubernetes/_meta/config.yml @@ -16,15 +16,21 @@ # Enriching parameters: #add_metadata: true + # If kube_config is not set, KUBECONFIG environment variable will be checked + # and if not present it will fall back to InCluster + #kube_config: ~/.kube/config + #include_labels: [] + #exclude_labels: [] + #include_annotations: [] #labels.dedot: true #annotations.dedot: true + # When used outside the cluster: #node: node_name - # If kube_config is not set, KUBECONFIG environment variable will be checked - # and if not present it will fall back to InCluster - #kube_config: ~/.kube/config + # Set the namespace to watch for resources #namespace: staging + # To configure additionally node and namespace metadata `add_resource_metadata` can be defined. # By default all labels will be included while annotations are not added by default. # add_resource_metadata: diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 26728fccdaea..60b3360ab891 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -161,7 +161,7 @@ func NewResourceMetadataEnricher( return &nilEnricher{} } - // GetPodMetaGen requires cfg of type Config + // commonMetaConfig stores the metadata configuration of the resource itself commonMetaConfig := metadata.Config{} if err := base.Module().UnpackConfig(&commonMetaConfig); err != nil { logp.Err("Error initializing Kubernetes metadata enricher: %s", err) @@ -206,7 +206,7 @@ func NewResourceMetadataEnricher( // update func(m map[string]mapstr.M, r kubernetes.Resource) { accessor, _ := meta.Accessor(r) - id := join(accessor.GetNamespace(), accessor.GetName()) //nolint:all + id := join(accessor.GetNamespace(), accessor.GetName()) switch r := r.(type) { case *kubernetes.Pod: @@ -308,6 +308,14 @@ func NewContainerMetadataEnricher( return &nilEnricher{} } + // commonMetaConfig stores the metadata configuration of the resource itself + commonMetaConfig := metadata.Config{} + if err := base.Module().UnpackConfig(&commonMetaConfig); err != nil { + logp.Err("Error initializing Kubernetes metadata enricher: %s", err) + return &nilEnricher{} + } + cfg, _ := conf.NewConfigFrom(&commonMetaConfig) + // Resource is Pod so we need to create watchers for Replicasets and Jobs that it might belongs to // in order to be able to retrieve 2nd layer Owner metadata like in case of: // Deployment -> Replicaset -> Pod @@ -331,13 +339,6 @@ func NewContainerMetadataEnricher( } } - commonMetaConfig := metadata.Config{} - if err := base.Module().UnpackConfig(&commonMetaConfig); err != nil { - logp.Err("Error initializing Kubernetes metadata enricher: %s", err) - return &nilEnricher{} - } - cfg, _ := conf.NewConfigFrom(&commonMetaConfig) - metaGen := metadata.GetPodMetaGen(cfg, watcher, nodeWatcher, namespaceWatcher, replicaSetWatcher, jobWatcher, config.AddResourceMetadata) enricher := buildMetadataEnricher(watcher, nodeWatcher, namespaceWatcher, replicaSetWatcher, jobWatcher, @@ -508,6 +509,7 @@ func GetConfig(base mb.BaseMetricSet) (*kubernetesConfig, error) { SyncPeriod: time.Minute * 10, AddResourceMetadata: metadata.GetDefaultResourceMetadataConfig(), } + if err := base.Module().UnpackConfig(&config); err != nil { return nil, errors.New("error unpacking configs") } diff --git a/metricbeat/modules.d/kubernetes.yml.disabled b/metricbeat/modules.d/kubernetes.yml.disabled index 23bd210a8357..12bbeee26ca5 100644 --- a/metricbeat/modules.d/kubernetes.yml.disabled +++ b/metricbeat/modules.d/kubernetes.yml.disabled @@ -19,15 +19,21 @@ # Enriching parameters: #add_metadata: true + # If kube_config is not set, KUBECONFIG environment variable will be checked + # and if not present it will fall back to InCluster + #kube_config: ~/.kube/config + #include_labels: [] + #exclude_labels: [] + #include_annotations: [] #labels.dedot: true #annotations.dedot: true + # When used outside the cluster: #node: node_name - # If kube_config is not set, KUBECONFIG environment variable will be checked - # and if not present it will fall back to InCluster - #kube_config: ~/.kube/config + # Set the namespace to watch for resources #namespace: staging + # To configure additionally node and namespace metadata `add_resource_metadata` can be defined. # By default all labels will be included while annotations are not added by default. # add_resource_metadata: diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml index 46e951ccb56e..a22db4f7f8cf 100644 --- a/x-pack/metricbeat/metricbeat.reference.yml +++ b/x-pack/metricbeat/metricbeat.reference.yml @@ -901,11 +901,18 @@ metricbeat.modules: # Enriching parameters: add_metadata: true - # When used outside the cluster: - #node: node_name # If kube_config is not set, KUBECONFIG environment variable will be checked # and if not present it will fall back to InCluster #kube_config: ~/.kube/config + #include_labels: [] + #exclude_labels: [] + #include_annotations: [] + #labels.dedot: true + #annotations.dedot: true + + # When used outside the cluster: + #node: node_name + # To configure additionally node and namespace metadata `add_resource_metadata` can be defined. # By default all labels will be included while annotations are not added by default. # add_resource_metadata: @@ -945,13 +952,21 @@ metricbeat.modules: # Enriching parameters: add_metadata: true - # When used outside the cluster: - #node: node_name # If kube_config is not set, KUBECONFIG environment variable will be checked # and if not present it will fall back to InCluster #kube_config: ~/.kube/config + #include_labels: [] + #exclude_labels: [] + #include_annotations: [] + #labels.dedot: true + #annotations.dedot: true + + # When used outside the cluster: + #node: node_name + # Set the namespace to watch for resources #namespace: staging + # To configure additionally node and namespace metadata `add_resource_metadata` can be defined. # By default all labels will be included while annotations are not added by default. # add_resource_metadata: From 204f37c250bc180930e2eca1683a862df592bc98 Mon Sep 17 00:00:00 2001 From: Gustavo B Date: Tue, 2 Jan 2024 13:56:35 -0300 Subject: [PATCH 018/129] Winlogbeat/Windows Services: changed default keystore path to program files (#37237) * added keystore.path config to Windows service --------- Co-authored-by: Gustavo Freddo Breunig Co-authored-by: Lee E Hinman <57081003+leehinman@users.noreply.github.com> --- CHANGELOG.next.asciidoc | 1 + dev-tools/packaging/templates/windows/install-service.ps1.tmpl | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 284ebac1eeb2..c3ff88d0c8ac 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -32,6 +32,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Winlogbeat* - Add "event.category" and "event.type" to Sysmon module for EventIDs 8, 9, 19, 20, 27, 28, 255 {pull}35193[35193] +- Add "keystore.path" configuration settings to $workdir\data\{{.BeatName}}.keystore. Issue {issue}12315[12315] {pull}37237[37237] *Functionbeat* diff --git a/dev-tools/packaging/templates/windows/install-service.ps1.tmpl b/dev-tools/packaging/templates/windows/install-service.ps1.tmpl index 840f8dd97d3e..279862dcd1de 100644 --- a/dev-tools/packaging/templates/windows/install-service.ps1.tmpl +++ b/dev-tools/packaging/templates/windows/install-service.ps1.tmpl @@ -11,7 +11,7 @@ $workdir = Split-Path $MyInvocation.MyCommand.Path # Create the new service. New-Service -name {{.BeatName}} ` -displayName {{.BeatName | title}} ` - -binaryPathName "`"$workdir\{{.BeatName}}.exe`" --environment=windows_service -c `"$workdir\{{.BeatName}}.yml`" --path.home `"$workdir`" --path.data `"$env:PROGRAMDATA\{{.BeatName}}`" --path.logs `"$env:PROGRAMDATA\{{.BeatName}}\logs`" -E logging.files.redirect_stderr=true" + -binaryPathName "`"$workdir\{{.BeatName}}.exe`" --environment=windows_service -c `"$workdir\{{.BeatName}}.yml`" --path.home `"$workdir`" --path.data `"$env:PROGRAMDATA\{{.BeatName}}`" --path.logs `"$env:PROGRAMDATA\{{.BeatName}}\logs`" -E keystore.path=`"$workdir\data\{{.BeatName}}.keystore`" -E logging.files.redirect_stderr=true" # Attempt to set the service to delayed start using sc config. Try { From 6f192c01ef1932b175e7309ce8b93a6e0b56b854 Mon Sep 17 00:00:00 2001 From: Craig MacKenzie Date: Tue, 2 Jan 2024 14:25:05 -0500 Subject: [PATCH 019/129] Update notice year to 2024. (#37532) --- NOTICE.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/NOTICE.txt b/NOTICE.txt index 6e60972d6745..192819bb6e72 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1,5 +1,5 @@ Elastic Beats -Copyright 2014-2023 Elasticsearch BV +Copyright 2014-2024 Elasticsearch BV This product includes software developed by The Apache Software Foundation (http://www.apache.org/). From c9f1426f12d9f9cdfb317c747118e2744abdccd7 Mon Sep 17 00:00:00 2001 From: Alex K <8418476+fearful-symmetry@users.noreply.github.com> Date: Wed, 3 Jan 2024 10:26:07 -0800 Subject: [PATCH 020/129] Add histogram and output metrics for output latency (#37445) * add metrics for output latency * plz linter * use milliseconds * move around timekeeping statements * move logstash latency window --- libbeat/docs/metrics-in-logs.asciidoc | 1 + libbeat/outputs/elasticsearch/client.go | 8 +++- libbeat/outputs/fileout/file.go | 6 +++ libbeat/outputs/logstash/sync.go | 6 ++- libbeat/outputs/metrics.go | 21 +++++++++- libbeat/outputs/observer.go | 52 +++++++++++++------------ libbeat/outputs/redis/client.go | 5 ++- libbeat/outputs/shipper/shipper_test.go | 25 ++++++------ 8 files changed, 82 insertions(+), 42 deletions(-) diff --git a/libbeat/docs/metrics-in-logs.asciidoc b/libbeat/docs/metrics-in-logs.asciidoc index c499e7462f4d..97aac4f3a302 100644 --- a/libbeat/docs/metrics-in-logs.asciidoc +++ b/libbeat/docs/metrics-in-logs.asciidoc @@ -170,6 +170,7 @@ endif::[] | `.output.events.total` | Integer | Number of events currently being processed by the output. | If this number grows over time, it may indicate that the output destination (e.g. {ls} pipeline or {es} cluster) is not able to accept events at the same or faster rate than what {beatname_uc} is sending to it. | `.output.events.acked` | Integer | Number of events acknowledged by the output destination. | Generally, we want this number to be the same as `.output.events.total` as this indicates that the output destination has reliably received all the events sent to it. | `.output.events.failed` | Integer | Number of events that {beatname_uc} tried to send to the output destination, but the destination failed to receive them. | Generally, we want this field to be absent or its value to be zero. When the value is greater than zero, it's useful to check {beatname_uc}'s logs right before this log entry's `@timestamp` to see if there are any connectivity issues with the output destination. Note that failed events are not lost or dropped; they will be sent back to the publisher pipeline for retrying later. +| `.output.write.latency` | Object | Reports statistics on the time to send an event to the connected output, in milliseconds. This can be used to diagnose delays and performance issues caused by I/O or output configuration. This metric is available for the Elasticsearch, file, redis, and logstash outputs. |=== ifeval::["{beatname_lc}"=="filebeat"] diff --git a/libbeat/outputs/elasticsearch/client.go b/libbeat/outputs/elasticsearch/client.go index 4996dba887e2..8aeef2c623e7 100644 --- a/libbeat/outputs/elasticsearch/client.go +++ b/libbeat/outputs/elasticsearch/client.go @@ -221,7 +221,7 @@ func (client *Client) Publish(ctx context.Context, batch publisher.Batch) error func (client *Client) publishEvents(ctx context.Context, data []publisher.Event) ([]publisher.Event, error) { span, ctx := apm.StartSpan(ctx, "publishEvents", "output") defer span.End() - begin := time.Now() + st := client.observer if st != nil { @@ -246,8 +246,10 @@ func (client *Client) publishEvents(ctx context.Context, data []publisher.Event) return nil, nil } + begin := time.Now() params := map[string]string{"filter_path": "errors,items.*.error,items.*.status"} status, result, sendErr := client.conn.Bulk(ctx, "", "", params, bulkItems) + timeSinceSend := time.Since(begin) if sendErr != nil { if status == http.StatusRequestEntityTooLarge { @@ -265,7 +267,7 @@ func (client *Client) publishEvents(ctx context.Context, data []publisher.Event) client.log.Debugf("PublishEvents: %d events have been published to elasticsearch in %v.", pubCount, - time.Since(begin)) + timeSinceSend) // check response for transient errors var failedEvents []publisher.Event @@ -289,6 +291,8 @@ func (client *Client) publishEvents(ctx context.Context, data []publisher.Event) st.Dropped(dropped) st.Duplicate(duplicates) st.ErrTooMany(stats.tooMany) + st.ReportLatency(timeSinceSend) + } if failed > 0 { diff --git a/libbeat/outputs/fileout/file.go b/libbeat/outputs/fileout/file.go index d12a11b25c3c..4ddc5955d6ef 100644 --- a/libbeat/outputs/fileout/file.go +++ b/libbeat/outputs/fileout/file.go @@ -21,6 +21,7 @@ import ( "context" "os" "path/filepath" + "time" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/outputs" @@ -119,6 +120,7 @@ func (out *fileOutput) Publish(_ context.Context, batch publisher.Batch) error { st.NewBatch(len(events)) dropped := 0 + for i := range events { event := &events[i] @@ -135,6 +137,7 @@ func (out *fileOutput) Publish(_ context.Context, batch publisher.Batch) error { continue } + begin := time.Now() if _, err = out.rotator.Write(append(serializedEvent, '\n')); err != nil { st.WriteError(err) @@ -149,9 +152,12 @@ func (out *fileOutput) Publish(_ context.Context, batch publisher.Batch) error { } st.WriteBytes(len(serializedEvent) + 1) + took := time.Since(begin) + st.ReportLatency(took) } st.Dropped(dropped) + st.Acked(len(events) - dropped) return nil diff --git a/libbeat/outputs/logstash/sync.go b/libbeat/outputs/logstash/sync.go index ad4293eb9f79..2a49324c46f9 100644 --- a/libbeat/outputs/logstash/sync.go +++ b/libbeat/outputs/logstash/sync.go @@ -114,6 +114,7 @@ func (c *syncClient) Publish(_ context.Context, batch publisher.Batch) error { } for len(events) > 0 { + // check if we need to reconnect if c.ticker != nil { select { @@ -136,12 +137,14 @@ func (c *syncClient) Publish(_ context.Context, batch publisher.Batch) error { err error ) + begin := time.Now() if c.win == nil { n, err = c.sendEvents(events) } else { n, err = c.publishWindowed(events) } - + took := time.Since(begin) + st.ReportLatency(took) c.log.Debugf("%v events out of %v events sent to logstash host %s. Continue sending", n, len(events), c.Host()) @@ -163,6 +166,7 @@ func (c *syncClient) Publish(_ context.Context, batch publisher.Batch) error { return err } + } batch.ACK() diff --git a/libbeat/outputs/metrics.go b/libbeat/outputs/metrics.go index 77374df3e61e..5502c4e4ae06 100644 --- a/libbeat/outputs/metrics.go +++ b/libbeat/outputs/metrics.go @@ -17,7 +17,14 @@ package outputs -import "github.com/elastic/elastic-agent-libs/monitoring" +import ( + "time" + + "github.com/rcrowley/go-metrics" + + "github.com/elastic/elastic-agent-libs/monitoring" + "github.com/elastic/elastic-agent-libs/monitoring/adapter" +) // Stats implements the Observer interface, for collecting metrics on common // outputs events. @@ -46,13 +53,15 @@ type Stats struct { readBytes *monitoring.Uint // total amount of bytes read readErrors *monitoring.Uint // total number of errors while waiting for response on output + + sendLatencyMillis metrics.Sample } // NewStats creates a new Stats instance using a backing monitoring registry. // This function will create and register a number of metrics with the registry passed. // The registry must not be null. func NewStats(reg *monitoring.Registry) *Stats { - return &Stats{ + obj := &Stats{ batches: monitoring.NewUint(reg, "events.batches"), events: monitoring.NewUint(reg, "events.total"), acked: monitoring.NewUint(reg, "events.acked"), @@ -69,7 +78,11 @@ func NewStats(reg *monitoring.Registry) *Stats { readBytes: monitoring.NewUint(reg, "read.bytes"), readErrors: monitoring.NewUint(reg, "read.errors"), + + sendLatencyMillis: metrics.NewUniformSample(1024), } + _ = adapter.NewGoMetrics(reg, "write.latency", adapter.Accept).Register("histogram", metrics.NewHistogram(obj.sendLatencyMillis)) + return obj } // NewBatch updates active batch and event metrics. @@ -81,6 +94,10 @@ func (s *Stats) NewBatch(n int) { } } +func (s *Stats) ReportLatency(time time.Duration) { + s.sendLatencyMillis.Update(time.Milliseconds()) +} + // Acked updates active and acked event metrics. func (s *Stats) Acked(n int) { if s != nil { diff --git a/libbeat/outputs/observer.go b/libbeat/outputs/observer.go index 9d7a3aec4a0c..3a330e4a43ac 100644 --- a/libbeat/outputs/observer.go +++ b/libbeat/outputs/observer.go @@ -17,21 +17,24 @@ package outputs +import "time" + // Observer provides an interface used by outputs to report common events on // documents/events being published and I/O workload. type Observer interface { - NewBatch(int) // report new batch being processed with number of events - Acked(int) // report number of acked events - Failed(int) // report number of failed events - Dropped(int) // report number of dropped events - Duplicate(int) // report number of events detected as duplicates (e.g. on resends) - Cancelled(int) // report number of cancelled events - Split() // report a batch was split for being too large to ingest - WriteError(error) // report an I/O error on write - WriteBytes(int) // report number of bytes being written - ReadError(error) // report an I/O error on read - ReadBytes(int) // report number of bytes being read - ErrTooMany(int) // report too many requests response + NewBatch(int) // report new batch being processed with number of events + ReportLatency(time.Duration) // report the duration a send to the output takes + Acked(int) // report number of acked events + Failed(int) // report number of failed events + Dropped(int) // report number of dropped events + Duplicate(int) // report number of events detected as duplicates (e.g. on resends) + Cancelled(int) // report number of cancelled events + Split() // report a batch was split for being too large to ingest + WriteError(error) // report an I/O error on write + WriteBytes(int) // report number of bytes being written + ReadError(error) // report an I/O error on read + ReadBytes(int) // report number of bytes being read + ErrTooMany(int) // report too many requests response } type emptyObserver struct{} @@ -43,15 +46,16 @@ func NewNilObserver() Observer { return nilObserver } -func (*emptyObserver) NewBatch(int) {} -func (*emptyObserver) Acked(int) {} -func (*emptyObserver) Duplicate(int) {} -func (*emptyObserver) Failed(int) {} -func (*emptyObserver) Dropped(int) {} -func (*emptyObserver) Cancelled(int) {} -func (*emptyObserver) Split() {} -func (*emptyObserver) WriteError(error) {} -func (*emptyObserver) WriteBytes(int) {} -func (*emptyObserver) ReadError(error) {} -func (*emptyObserver) ReadBytes(int) {} -func (*emptyObserver) ErrTooMany(int) {} +func (*emptyObserver) NewBatch(int) {} +func (*emptyObserver) ReportLatency(_ time.Duration) {} +func (*emptyObserver) Acked(int) {} +func (*emptyObserver) Duplicate(int) {} +func (*emptyObserver) Failed(int) {} +func (*emptyObserver) Dropped(int) {} +func (*emptyObserver) Cancelled(int) {} +func (*emptyObserver) Split() {} +func (*emptyObserver) WriteError(error) {} +func (*emptyObserver) WriteBytes(int) {} +func (*emptyObserver) ReadError(error) {} +func (*emptyObserver) ReadBytes(int) {} +func (*emptyObserver) ErrTooMany(int) {} diff --git a/libbeat/outputs/redis/client.go b/libbeat/outputs/redis/client.go index 5165d894f654..5a299749aac8 100644 --- a/libbeat/outputs/redis/client.go +++ b/libbeat/outputs/redis/client.go @@ -233,8 +233,11 @@ func (c *client) publishEventsBulk(conn redis.Conn, command string) publishFn { return nil, nil } + start := time.Now() // RPUSH returns total length of list -> fail and retry all on error _, err := conn.Do(command, args...) + took := time.Since(start) + c.observer.ReportLatency(took) if err != nil { c.log.Errorf("Failed to %v to redis list with: %+v", command, err) return okEvents, err @@ -283,7 +286,7 @@ func (c *client) publishEventsPipeline(conn redis.Conn, command string) publishF for i := range serialized { _, err := conn.Receive() if err != nil { - if _, ok := err.(redis.Error); ok { + if _, ok := err.(redis.Error); ok { //nolint:errorlint //this line checks against a type, not an instance of an error c.log.Errorf("Failed to %v event to list with %+v", command, err) failed = append(failed, data[i]) diff --git a/libbeat/outputs/shipper/shipper_test.go b/libbeat/outputs/shipper/shipper_test.go index ef6b628ba8c9..e26d44635aff 100644 --- a/libbeat/outputs/shipper/shipper_test.go +++ b/libbeat/outputs/shipper/shipper_test.go @@ -637,15 +637,16 @@ type TestObserver struct { errTooMany int } -func (to *TestObserver) NewBatch(batch int) { to.batch += batch } -func (to *TestObserver) Acked(acked int) { to.acked += acked } -func (to *TestObserver) Duplicate(duplicate int) { to.duplicate += duplicate } -func (to *TestObserver) Failed(failed int) { to.failed += failed } -func (to *TestObserver) Dropped(dropped int) { to.dropped += dropped } -func (to *TestObserver) Cancelled(cancelled int) { to.cancelled += cancelled } -func (to *TestObserver) Split() { to.split++ } -func (to *TestObserver) WriteError(we error) { to.writeError = we } -func (to *TestObserver) WriteBytes(wb int) { to.writeBytes += wb } -func (to *TestObserver) ReadError(re error) { to.readError = re } -func (to *TestObserver) ReadBytes(rb int) { to.readBytes += rb } -func (to *TestObserver) ErrTooMany(err int) { to.errTooMany = +err } +func (to *TestObserver) NewBatch(batch int) { to.batch += batch } +func (to *TestObserver) Acked(acked int) { to.acked += acked } +func (to *TestObserver) ReportLatency(_ time.Duration) {} +func (to *TestObserver) Duplicate(duplicate int) { to.duplicate += duplicate } +func (to *TestObserver) Failed(failed int) { to.failed += failed } +func (to *TestObserver) Dropped(dropped int) { to.dropped += dropped } +func (to *TestObserver) Cancelled(cancelled int) { to.cancelled += cancelled } +func (to *TestObserver) Split() { to.split++ } +func (to *TestObserver) WriteError(we error) { to.writeError = we } +func (to *TestObserver) WriteBytes(wb int) { to.writeBytes += wb } +func (to *TestObserver) ReadError(re error) { to.readError = re } +func (to *TestObserver) ReadBytes(rb int) { to.readBytes += rb } +func (to *TestObserver) ErrTooMany(err int) { to.errTooMany = +err } From 81ebafc4c2da79f1c6539c6efe2f09230ae91e65 Mon Sep 17 00:00:00 2001 From: Lee E Hinman <57081003+leehinman@users.noreply.github.com> Date: Thu, 4 Jan 2024 08:39:46 -0600 Subject: [PATCH 021/129] update cisco/asa and cyberarkpas/audit golden files (#37540) The source logs don't contain a year, so these need to be refreshed. --- .../additional_messages.log-expected.json | 36 +++++++++---------- .../asa/test/non-canonical.log-expected.json | 8 ++--- ...lear_users_history_start.log-expected.json | 2 +- ..._clear_users_history_end.log-expected.json | 2 +- ...tor_dr_replication_start.log-expected.json | 2 +- ...nitor_dr_replication_end.log-expected.json | 2 +- ...7_monitor_fw_rules_start.log-expected.json | 2 +- ...358_monitor_fw_rules_end.log-expected.json | 2 +- ...ault_certificate_is_sha1.log-expected.json | 2 +- .../59_clear_safe_history.log-expected.json | 2 +- .../test/88_set_password.log-expected.json | 2 +- .../audit/test/legacysyslog.log-expected.json | 2 +- 12 files changed, 32 insertions(+), 32 deletions(-) diff --git a/x-pack/filebeat/module/cisco/asa/test/additional_messages.log-expected.json b/x-pack/filebeat/module/cisco/asa/test/additional_messages.log-expected.json index 907a08003b42..256cb7f997bf 100644 --- a/x-pack/filebeat/module/cisco/asa/test/additional_messages.log-expected.json +++ b/x-pack/filebeat/module/cisco/asa/test/additional_messages.log-expected.json @@ -181,12 +181,12 @@ "event.code": 609002, "event.dataset": "cisco.asa", "event.duration": 0, - "event.end": "2023-05-05T17:51:17.000-02:00", + "event.end": "2024-05-05T17:51:17.000-02:00", "event.kind": "event", "event.module": "cisco", "event.original": "%FTD-7-609002: Teardown local-host net:192.168.2.2 duration 0:00:00", "event.severity": 7, - "event.start": "2023-05-05T19:51:17.000Z", + "event.start": "2024-05-05T19:51:17.000Z", "event.timezone": "-02:00", "event.type": [ "connection", @@ -701,12 +701,12 @@ "event.code": 609002, "event.dataset": "cisco.asa", "event.duration": 0, - "event.end": "2023-05-05T18:24:31.000-02:00", + "event.end": "2024-05-05T18:24:31.000-02:00", "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-7-609002: Teardown local-host identity:10.10.10.10 duration 0:00:00", "event.severity": 7, - "event.start": "2023-05-05T20:24:31.000Z", + "event.start": "2024-05-05T20:24:31.000Z", "event.timezone": "-02:00", "event.type": [ "connection", @@ -849,13 +849,13 @@ "event.code": 302014, "event.dataset": "cisco.asa", "event.duration": 0, - "event.end": "2023-05-05T18:29:32.000-02:00", + "event.end": "2024-05-05T18:29:32.000-02:00", "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-6-302014: Teardown TCP connection 2960892904 for out111:10.10.10.10/443 to fw111:192.168.2.2/55225 duration 0:00:00 bytes 0 TCP Reset-I", "event.reason": "TCP Reset-I", "event.severity": 6, - "event.start": "2023-05-05T20:29:32.000Z", + "event.start": "2024-05-05T20:29:32.000Z", "event.timezone": "-02:00", "event.type": [ "connection", @@ -966,12 +966,12 @@ "event.code": 305012, "event.dataset": "cisco.asa", "event.duration": 0, - "event.end": "2023-05-05T18:29:32.000-02:00", + "event.end": "2024-05-05T18:29:32.000-02:00", "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-6-305012: Teardown dynamic UDP translation from fw111:10.10.10.10/54230 to out111:192.168.2.2/54230 duration 0:00:00", "event.severity": 6, - "event.start": "2023-05-05T20:29:32.000Z", + "event.start": "2024-05-05T20:29:32.000Z", "event.timezone": "-02:00", "event.type": [ "connection", @@ -1175,12 +1175,12 @@ "event.code": 302016, "event.dataset": "cisco.asa", "event.duration": 124000000000, - "event.end": "2023-05-05T18:40:50.000-02:00", + "event.end": "2024-05-05T18:40:50.000-02:00", "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-2-302016: Teardown UDP connection 1671727 for intfacename:10.10.10.10/161 to net:192.186.2.2/53356 duration 0:02:04 bytes 64585", "event.severity": 2, - "event.start": "2023-05-05T20:38:46.000Z", + "event.start": "2024-05-05T20:38:46.000Z", "event.timezone": "-02:00", "event.type": [ "connection", @@ -1812,13 +1812,13 @@ "event.code": 302023, "event.dataset": "cisco.asa", "event.duration": 0, - "event.end": "2023-05-05T19:02:58.000-02:00", + "event.end": "2024-05-05T19:02:58.000-02:00", "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-6-302023: Teardown stub TCP connection for fw111:10.10.10.10/39210 to net:192.168.2.2/10051 duration 0:00:00 forwarded bytes 0 Cluster flow with CLU closed on owner", "event.reason": "Cluster flow with CLU closed on owner", "event.severity": 6, - "event.start": "2023-05-05T21:02:58.000Z", + "event.start": "2024-05-05T21:02:58.000Z", "event.timezone": "-02:00", "event.type": [ "info" @@ -1868,13 +1868,13 @@ "event.code": 302023, "event.dataset": "cisco.asa", "event.duration": 0, - "event.end": "2023-05-05T19:02:58.000-02:00", + "event.end": "2024-05-05T19:02:58.000-02:00", "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-6-302023: Teardown stub TCP connection for net:10.10.10.10/10051 to unknown:192.168.2.2/39222 duration 0:00:00 forwarded bytes 0 Forwarding or redirect flow removed to create director or backup flow", "event.reason": "Forwarding or redirect flow removed to create director or backup flow", "event.severity": 6, - "event.start": "2023-05-05T21:02:58.000Z", + "event.start": "2024-05-05T21:02:58.000Z", "event.timezone": "-02:00", "event.type": [ "info" @@ -2687,13 +2687,13 @@ "event.code": 302304, "event.dataset": "cisco.asa", "event.duration": 3602000000000, - "event.end": "2023-04-27T04:12:23.000-02:00", + "event.end": "2024-04-27T04:12:23.000-02:00", "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-6-302304: Teardown TCP state-bypass connection 2751765169 from server.deflan:81.2.69.143/54242 to server.deflan:67.43.156.12/9101 duration 1:00:02 bytes 245 Connection timeout", "event.reason": "Connection timeout", "event.severity": 6, - "event.start": "2023-04-27T05:12:21.000Z", + "event.start": "2024-04-27T05:12:21.000Z", "event.timezone": "-02:00", "event.type": [ "connection", @@ -3227,13 +3227,13 @@ "event.code": 113019, "event.dataset": "cisco.asa", "event.duration": 1936000000000, - "event.end": "2023-04-27T02:03:03.000-02:00", + "event.end": "2024-04-27T02:03:03.000-02:00", "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-4-113019: Group = 81.2.69.143, Username = 81.2.69.143, IP = 81.2.69.143, Session disconnected. Session Type: LAN-to-LAN, Duration: 0h:32m:16s, Bytes xmt: 297103, Bytes rcv: 1216163, Reason: User Requested", "event.reason": "User Requested", "event.severity": 4, - "event.start": "2023-04-27T03:30:47.000Z", + "event.start": "2024-04-27T03:30:47.000Z", "event.timezone": "-02:00", "event.type": [ "info" diff --git a/x-pack/filebeat/module/cisco/asa/test/non-canonical.log-expected.json b/x-pack/filebeat/module/cisco/asa/test/non-canonical.log-expected.json index 63f46eabbba2..d7c455136e2f 100644 --- a/x-pack/filebeat/module/cisco/asa/test/non-canonical.log-expected.json +++ b/x-pack/filebeat/module/cisco/asa/test/non-canonical.log-expected.json @@ -361,12 +361,12 @@ "event.code": 305012, "event.dataset": "cisco.asa", "event.duration": 41000000000, - "event.end": "2023-07-15T13:38:47.000-02:00", + "event.end": "2024-07-15T13:38:47.000-02:00", "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-6-305012: Teardown dynamic UDP translation from SERVERS:exp-wait/62409 to outside:81.2.69.142/62409 duration 0:00:41", "event.severity": 6, - "event.start": "2023-07-15T15:38:06.000Z", + "event.start": "2024-07-15T15:38:06.000Z", "event.timezone": "-02:00", "event.type": [ "connection", @@ -423,12 +423,12 @@ "event.code": 305012, "event.dataset": "cisco.asa", "event.duration": 30000000000, - "event.end": "2023-07-15T13:37:33.000-02:00", + "event.end": "2024-07-15T13:37:33.000-02:00", "event.kind": "event", "event.module": "cisco", "event.original": "%ASA-6-305012: Teardown dynamic UDP translation from SERVERS:exp-wait/56421 to outside:81.2.69.142/56421 duration 0:00:30", "event.severity": 6, - "event.start": "2023-07-15T15:37:03.000Z", + "event.start": "2024-07-15T15:37:03.000Z", "event.timezone": "-02:00", "event.type": [ "connection", diff --git a/x-pack/filebeat/module/cyberarkpas/audit/test/288_auto_clear_users_history_start.log-expected.json b/x-pack/filebeat/module/cyberarkpas/audit/test/288_auto_clear_users_history_start.log-expected.json index 129ad664676d..fb3cfbbb9cb5 100644 --- a/x-pack/filebeat/module/cyberarkpas/audit/test/288_auto_clear_users_history_start.log-expected.json +++ b/x-pack/filebeat/module/cyberarkpas/audit/test/288_auto_clear_users_history_start.log-expected.json @@ -38,7 +38,7 @@ ] }, { - "@timestamp": "2023-03-08T03:00:20.000-02:00", + "@timestamp": "2024-03-08T03:00:20.000-02:00", "cyberarkpas.audit.action": "Auto Clear Users History start", "cyberarkpas.audit.desc": "Auto Clear Users History start", "cyberarkpas.audit.issuer": "Batch", diff --git a/x-pack/filebeat/module/cyberarkpas/audit/test/289_auto_clear_users_history_end.log-expected.json b/x-pack/filebeat/module/cyberarkpas/audit/test/289_auto_clear_users_history_end.log-expected.json index de251078346b..9ad5b886c6ca 100644 --- a/x-pack/filebeat/module/cyberarkpas/audit/test/289_auto_clear_users_history_end.log-expected.json +++ b/x-pack/filebeat/module/cyberarkpas/audit/test/289_auto_clear_users_history_end.log-expected.json @@ -38,7 +38,7 @@ ] }, { - "@timestamp": "2023-03-08T03:00:20.000-02:00", + "@timestamp": "2024-03-08T03:00:20.000-02:00", "cyberarkpas.audit.action": "Auto Clear Users History end", "cyberarkpas.audit.desc": "Auto Clear Users History end", "cyberarkpas.audit.issuer": "Batch", diff --git a/x-pack/filebeat/module/cyberarkpas/audit/test/310_monitor_dr_replication_start.log-expected.json b/x-pack/filebeat/module/cyberarkpas/audit/test/310_monitor_dr_replication_start.log-expected.json index 41b565a5a4a0..9d813f639d65 100644 --- a/x-pack/filebeat/module/cyberarkpas/audit/test/310_monitor_dr_replication_start.log-expected.json +++ b/x-pack/filebeat/module/cyberarkpas/audit/test/310_monitor_dr_replication_start.log-expected.json @@ -38,7 +38,7 @@ ] }, { - "@timestamp": "2023-03-08T02:48:07.000-02:00", + "@timestamp": "2024-03-08T02:48:07.000-02:00", "cyberarkpas.audit.action": "Monitor DR Replication start", "cyberarkpas.audit.desc": "Monitor DR Replication start", "cyberarkpas.audit.issuer": "Batch", diff --git a/x-pack/filebeat/module/cyberarkpas/audit/test/311_monitor_dr_replication_end.log-expected.json b/x-pack/filebeat/module/cyberarkpas/audit/test/311_monitor_dr_replication_end.log-expected.json index 13cd9bf1248f..ee767935d3b0 100644 --- a/x-pack/filebeat/module/cyberarkpas/audit/test/311_monitor_dr_replication_end.log-expected.json +++ b/x-pack/filebeat/module/cyberarkpas/audit/test/311_monitor_dr_replication_end.log-expected.json @@ -38,7 +38,7 @@ ] }, { - "@timestamp": "2023-03-08T02:48:07.000-02:00", + "@timestamp": "2024-03-08T02:48:07.000-02:00", "cyberarkpas.audit.action": "Monitor DR Replication end", "cyberarkpas.audit.desc": "Monitor DR Replication end", "cyberarkpas.audit.issuer": "Batch", diff --git a/x-pack/filebeat/module/cyberarkpas/audit/test/357_monitor_fw_rules_start.log-expected.json b/x-pack/filebeat/module/cyberarkpas/audit/test/357_monitor_fw_rules_start.log-expected.json index 22738846d864..2943356268b9 100644 --- a/x-pack/filebeat/module/cyberarkpas/audit/test/357_monitor_fw_rules_start.log-expected.json +++ b/x-pack/filebeat/module/cyberarkpas/audit/test/357_monitor_fw_rules_start.log-expected.json @@ -38,7 +38,7 @@ ] }, { - "@timestamp": "2023-03-08T02:32:56.000-02:00", + "@timestamp": "2024-03-08T02:32:56.000-02:00", "cyberarkpas.audit.action": "Monitor FW rules start", "cyberarkpas.audit.desc": "Monitor FW rules start", "cyberarkpas.audit.issuer": "Batch", diff --git a/x-pack/filebeat/module/cyberarkpas/audit/test/358_monitor_fw_rules_end.log-expected.json b/x-pack/filebeat/module/cyberarkpas/audit/test/358_monitor_fw_rules_end.log-expected.json index 6518fbedab7f..bed2becb5d42 100644 --- a/x-pack/filebeat/module/cyberarkpas/audit/test/358_monitor_fw_rules_end.log-expected.json +++ b/x-pack/filebeat/module/cyberarkpas/audit/test/358_monitor_fw_rules_end.log-expected.json @@ -38,7 +38,7 @@ ] }, { - "@timestamp": "2023-03-08T02:32:56.000-02:00", + "@timestamp": "2024-03-08T02:32:56.000-02:00", "cyberarkpas.audit.action": "Monitor FW Rules end", "cyberarkpas.audit.desc": "Monitor FW Rules end", "cyberarkpas.audit.issuer": "Batch", diff --git a/x-pack/filebeat/module/cyberarkpas/audit/test/479_security_warning_the_signature_hash_algorithm_of_the_vault_certificate_is_sha1.log-expected.json b/x-pack/filebeat/module/cyberarkpas/audit/test/479_security_warning_the_signature_hash_algorithm_of_the_vault_certificate_is_sha1.log-expected.json index eafc4237e717..bb66629fa39b 100644 --- a/x-pack/filebeat/module/cyberarkpas/audit/test/479_security_warning_the_signature_hash_algorithm_of_the_vault_certificate_is_sha1.log-expected.json +++ b/x-pack/filebeat/module/cyberarkpas/audit/test/479_security_warning_the_signature_hash_algorithm_of_the_vault_certificate_is_sha1.log-expected.json @@ -39,7 +39,7 @@ ] }, { - "@timestamp": "2023-03-08T07:46:54.000-02:00", + "@timestamp": "2024-03-08T07:46:54.000-02:00", "cyberarkpas.audit.action": "Security warning - The Signature Hash Algorithm of the Vault certificate is SHA1.", "cyberarkpas.audit.desc": "Security warning - The Signature Hash Algorithm of the Vault certificate is SHA1.", "cyberarkpas.audit.issuer": "Builtin", diff --git a/x-pack/filebeat/module/cyberarkpas/audit/test/59_clear_safe_history.log-expected.json b/x-pack/filebeat/module/cyberarkpas/audit/test/59_clear_safe_history.log-expected.json index d3e8d85a46f5..ef8f8d42bb26 100644 --- a/x-pack/filebeat/module/cyberarkpas/audit/test/59_clear_safe_history.log-expected.json +++ b/x-pack/filebeat/module/cyberarkpas/audit/test/59_clear_safe_history.log-expected.json @@ -39,7 +39,7 @@ ] }, { - "@timestamp": "2023-03-08T03:10:31.000-02:00", + "@timestamp": "2024-03-08T03:10:31.000-02:00", "cyberarkpas.audit.action": "Clear Safe History", "cyberarkpas.audit.desc": "Clear Safe History", "cyberarkpas.audit.issuer": "PasswordManager", diff --git a/x-pack/filebeat/module/cyberarkpas/audit/test/88_set_password.log-expected.json b/x-pack/filebeat/module/cyberarkpas/audit/test/88_set_password.log-expected.json index 728e84742ea2..65ec1710d275 100644 --- a/x-pack/filebeat/module/cyberarkpas/audit/test/88_set_password.log-expected.json +++ b/x-pack/filebeat/module/cyberarkpas/audit/test/88_set_password.log-expected.json @@ -76,7 +76,7 @@ ] }, { - "@timestamp": "2023-03-08T02:54:46.000-02:00", + "@timestamp": "2024-03-08T02:54:46.000-02:00", "cyberarkpas.audit.action": "Set Password", "cyberarkpas.audit.desc": "Set Password", "cyberarkpas.audit.issuer": "PVWAGWUser", diff --git a/x-pack/filebeat/module/cyberarkpas/audit/test/legacysyslog.log-expected.json b/x-pack/filebeat/module/cyberarkpas/audit/test/legacysyslog.log-expected.json index c4e72e65c5f0..439a5355e95b 100644 --- a/x-pack/filebeat/module/cyberarkpas/audit/test/legacysyslog.log-expected.json +++ b/x-pack/filebeat/module/cyberarkpas/audit/test/legacysyslog.log-expected.json @@ -1,6 +1,6 @@ [ { - "@timestamp": "2023-03-08T03:41:01.000-02:00", + "@timestamp": "2024-03-08T03:41:01.000-02:00", "cyberarkpas.audit.action": "Retrieve File", "cyberarkpas.audit.desc": "Retrieve File", "cyberarkpas.audit.file": "Root\\Policies\\Policy-BusinessWebsite.ini", From 3bc0d1f7cbdf7611e50512d9aa8cfaaf5139a06c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Jan 2024 17:09:07 +0000 Subject: [PATCH 022/129] Bump urllib3 from 1.26.5 to 1.26.18 in /libbeat/tests/system (#37033) Bumps [urllib3](https://github.com/urllib3/urllib3) from 1.26.5 to 1.26.18. - [Release notes](https://github.com/urllib3/urllib3/releases) - [Changelog](https://github.com/urllib3/urllib3/blob/main/CHANGES.rst) - [Commits](https://github.com/urllib3/urllib3/compare/1.26.5...1.26.18) --- updated-dependencies: - dependency-name: urllib3 dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- libbeat/tests/system/requirements.txt | 2 +- libbeat/tests/system/requirements_aix.txt | 2 +- pytest.ini | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/libbeat/tests/system/requirements.txt b/libbeat/tests/system/requirements.txt index 96e0245d89b1..11442585fdc9 100644 --- a/libbeat/tests/system/requirements.txt +++ b/libbeat/tests/system/requirements.txt @@ -46,7 +46,7 @@ stomp.py==4.1.22 termcolor==1.1.0 texttable==0.9.1 toml==0.10.1 -urllib3==1.26.5 +urllib3==1.26.18 wcwidth==0.2.5 websocket-client==0.47.0 zipp>=1.2.0,<=3.1.0 diff --git a/libbeat/tests/system/requirements_aix.txt b/libbeat/tests/system/requirements_aix.txt index 7bcd677f73d4..a0b2b0025887 100644 --- a/libbeat/tests/system/requirements_aix.txt +++ b/libbeat/tests/system/requirements_aix.txt @@ -45,7 +45,7 @@ stomp.py==4.1.22 termcolor==1.1.0 texttable==0.9.1 toml==0.10.1 -urllib3==1.26.5 +urllib3==1.26.18 wcwidth==0.2.5 websocket-client==0.47.0 zipp>=1.2.0,<=3.1.0 diff --git a/pytest.ini b/pytest.ini index 18f364eb7953..5112e7736685 100644 --- a/pytest.ini +++ b/pytest.ini @@ -16,3 +16,4 @@ filterwarnings = # Ignore distutil Version class deprecation in the compose package until it can be upgraded not to use them. ignore:distutils Version classes are deprecated. Use packaging.version instead.:DeprecationWarning:.*compose.* ignore:distutils Version classes are deprecated. Use packaging.version instead.:DeprecationWarning:.*docker.* + ignore:HTTPResponse.getheaders\(\) is deprecated and will be removed in urllib3 v2.1.0. Instead access HTTPResponse.headers directly.:DeprecationWarning From 6f1389903869b7e77843c0487f0f91d30ea49acb Mon Sep 17 00:00:00 2001 From: Giuseppe Santoro Date: Thu, 4 Jan 2024 17:48:48 +0000 Subject: [PATCH 023/129] ignore prometheus parsing errors (#37383) * ignore prometheus parsing errors --- CHANGELOG.next.asciidoc | 1 + metricbeat/helper/openmetrics/openmetrics.go | 2 +- metricbeat/helper/prometheus/prometheus.go | 2 +- .../helper/prometheus/prometheus_test.go | 71 ++++++++++++++++ metricbeat/helper/prometheus/textparse.go | 26 +++++- .../helper/prometheus/textparse_test.go | 81 ++++++++++++++++--- 6 files changed, 165 insertions(+), 18 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index c3ff88d0c8ac..1f1f3e359353 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -116,6 +116,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Fix unintended skip in metric collection on Azure Monitor {issue}37204[37204] {pull}37203[37203] - Fix the "api-version query parameter (?api-version=) is required for all requests" error in Azure Billing. {pull}37158[37158] - Add memory hard limit from container metadata and remove usage percentage in AWS Fargate. {pull}37194[37194] +- Ignore parser errors from unsupported metrics types on Prometheus client and continue parsing until EOF is reached {pull}37383[37383] *Osquerybeat* diff --git a/metricbeat/helper/openmetrics/openmetrics.go b/metricbeat/helper/openmetrics/openmetrics.go index f4714c56f74a..5a0afaf2dba4 100644 --- a/metricbeat/helper/openmetrics/openmetrics.go +++ b/metricbeat/helper/openmetrics/openmetrics.go @@ -107,7 +107,7 @@ func (p *openmetrics) GetFamilies() ([]*prometheus.MetricFamily, error) { if err != nil { return nil, fmt.Errorf("failed to read response: %w", err) } - families, err := prometheus.ParseMetricFamilies(b, contentType, appendTime) + families, err := prometheus.ParseMetricFamilies(b, contentType, appendTime, p.logger) if err != nil { return nil, fmt.Errorf("failed to parse families: %w", err) } diff --git a/metricbeat/helper/prometheus/prometheus.go b/metricbeat/helper/prometheus/prometheus.go index 0cfab2dafe28..686bd97b9c48 100644 --- a/metricbeat/helper/prometheus/prometheus.go +++ b/metricbeat/helper/prometheus/prometheus.go @@ -105,7 +105,7 @@ func (p *prometheus) GetFamilies() ([]*MetricFamily, error) { if err != nil { return nil, fmt.Errorf("failed to read response: %w", err) } - families, err := ParseMetricFamilies(b, contentType, appendTime) + families, err := ParseMetricFamilies(b, contentType, appendTime, p.logger) if err != nil { return nil, fmt.Errorf("failed to parse families: %w", err) } diff --git a/metricbeat/helper/prometheus/prometheus_test.go b/metricbeat/helper/prometheus/prometheus_test.go index b135f0683d6a..9f7fc8a34a42 100644 --- a/metricbeat/helper/prometheus/prometheus_test.go +++ b/metricbeat/helper/prometheus/prometheus_test.go @@ -63,6 +63,14 @@ histogram_decimal_metric_bucket{le="+Inf"} 5 histogram_decimal_metric_sum 4.31 histogram_decimal_metric_count 5 +` + + promInfoMetrics = ` +# TYPE target info +target_info 1 +# TYPE first_metric gauge +first_metric{label1="value1",label2="value2",label3="Value3",label4="FOO"} 1 + ` promGaugeKeyLabel = ` @@ -530,6 +538,69 @@ func TestPrometheus(t *testing.T) { } } +// NOTE: if the content type = text/plain prometheus doesn't support Info metrics +// with the current implementation, info metrics should just be ignored and all other metrics +// correctly processed +func TestInfoMetricPrometheus(t *testing.T) { + + p := &prometheus{mockFetcher{response: promInfoMetrics}, logp.NewLogger("test")} + + tests := []struct { + mapping *MetricsMapping + msg string + expected []mapstr.M + }{ + { + msg: "Ignore metrics not in mapping", + mapping: &MetricsMapping{ + Metrics: map[string]MetricMap{ + "first_metric": Metric("first.metric"), + }, + }, + expected: []mapstr.M{ + mapstr.M{ + "first": mapstr.M{ + "metric": 1.0, + }, + }, + }, + }, + { + msg: "Ignore metric in mapping but of unsupported type (eg. Info metric)", + mapping: &MetricsMapping{ + Metrics: map[string]MetricMap{ + "first_metric": Metric("first.metric"), + "target_info": Metric("target.info"), + }, + }, + expected: []mapstr.M{ + mapstr.M{ + "first": mapstr.M{ + "metric": 1.0, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.msg, func(t *testing.T) { + reporter := &mbtest.CapturingReporterV2{} + _ = p.ReportProcessedMetrics(test.mapping, reporter) + assert.Nil(t, reporter.GetErrors(), test.msg) + // Sort slice to avoid randomness + res := reporter.GetEvents() + sort.Slice(res, func(i, j int) bool { + return res[i].MetricSetFields.String() < res[j].MetricSetFields.String() + }) + assert.Equal(t, len(test.expected), len(res)) + for j, ev := range res { + assert.Equal(t, test.expected[j], ev.MetricSetFields, test.msg) + } + }) + } +} + func TestPrometheusKeyLabels(t *testing.T) { testCases := []struct { diff --git a/metricbeat/helper/prometheus/textparse.go b/metricbeat/helper/prometheus/textparse.go index 4ce573ca6abd..4dca85c3aa5a 100644 --- a/metricbeat/helper/prometheus/textparse.go +++ b/metricbeat/helper/prometheus/textparse.go @@ -18,6 +18,8 @@ package prometheus import ( + "errors" + "io" "math" "mime" "net/http" @@ -30,6 +32,8 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/textparse" "github.com/prometheus/prometheus/pkg/timestamp" + + "github.com/elastic/elastic-agent-libs/logp" ) const ( @@ -476,7 +480,7 @@ func histogramMetricName(name string, s float64, qv string, lbls string, t *int6 return name, metric } -func ParseMetricFamilies(b []byte, contentType string, ts time.Time) ([]*MetricFamily, error) { +func ParseMetricFamilies(b []byte, contentType string, ts time.Time, logger *logp.Logger) ([]*MetricFamily, error) { var ( parser = textparse.New(b, contentType) defTime = timestamp.FromTime(ts) @@ -495,8 +499,24 @@ func ParseMetricFamilies(b []byte, contentType string, ts time.Time) ([]*MetricF e exemplar.Exemplar ) if et, err = parser.Next(); err != nil { - // TODO: log here - // if errors.Is(err, io.EOF) {} + if strings.HasPrefix(err.Error(), "invalid metric type") { + logger.Debugf("Ignored invalid metric type : %v ", err) + + // NOTE: ignore any errors that are not EOF. This is to avoid breaking the parsing. + // if acceptHeader in the prometheus client is `Accept: text/plain; version=0.0.4` (like it is now) + // any `info` metrics are not supported, and then there will be ignored here. + // if acceptHeader in the prometheus client `Accept: application/openmetrics-text; version=0.0.1` + // any `info` metrics are supported, and then there will be parsed here. + continue + } + + if errors.Is(err, io.EOF) { + break + } + if strings.HasPrefix(err.Error(), "data does not end with # EOF") { + break + } + logger.Debugf("Error while parsing metrics: %v ", err) break } switch et { diff --git a/metricbeat/helper/prometheus/textparse_test.go b/metricbeat/helper/prometheus/textparse_test.go index 59b6b8ce1999..cd76e14691f1 100644 --- a/metricbeat/helper/prometheus/textparse_test.go +++ b/metricbeat/helper/prometheus/textparse_test.go @@ -23,6 +23,8 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/stretchr/testify/require" + + "github.com/elastic/elastic-agent-libs/logp" ) func stringp(x string) *string { @@ -86,7 +88,7 @@ metric_without_suffix 10 }, } - result, err := ParseMetricFamilies([]byte(input[1:]), OpenMetricsType, time.Now()) + result, err := ParseMetricFamilies([]byte(input[1:]), OpenMetricsType, time.Now(), nil) if err != nil { t.Fatalf("ParseMetricFamilies for content type %s returned an error.", OpenMetricsType) } @@ -135,7 +137,7 @@ process_cpu 20 }, } - result, err := ParseMetricFamilies([]byte(input), ContentTypeTextFormat, time.Now()) + result, err := ParseMetricFamilies([]byte(input), ContentTypeTextFormat, time.Now(), nil) if err != nil { t.Fatalf("ParseMetricFamilies for content type %s returned an error.", ContentTypeTextFormat) } @@ -189,7 +191,7 @@ second_metric 0 }, } - result, err := ParseMetricFamilies([]byte(input[1:]), OpenMetricsType, time.Now()) + result, err := ParseMetricFamilies([]byte(input[1:]), OpenMetricsType, time.Now(), nil) if err != nil { t.Fatalf("ParseMetricFamilies for content type %s returned an error.", OpenMetricsType) } @@ -242,7 +244,7 @@ second_metric 0 }, } - result, err := ParseMetricFamilies([]byte(input[1:]), ContentTypeTextFormat, time.Now()) + result, err := ParseMetricFamilies([]byte(input[1:]), ContentTypeTextFormat, time.Now(), nil) if err != nil { t.Fatalf("ParseMetricFamilies for content type %s returned an error.", ContentTypeTextFormat) } @@ -292,7 +294,60 @@ metric_without_suffix 3 }, } - result, err := ParseMetricFamilies([]byte(input[1:]), OpenMetricsType, time.Now()) + result, err := ParseMetricFamilies([]byte(input[1:]), OpenMetricsType, time.Now(), nil) + if err != nil { + t.Fatalf("ParseMetricFamilies for content type %s returned an error.", OpenMetricsType) + } + require.ElementsMatch(t, expected, result) +} + +func TestInfoPrometheus(t *testing.T) { + input := ` +# TYPE target info +target_info 1 +# TYPE first_metric gauge +first_metric{label1="value1"} 1 +# EOF +` + expected := []*MetricFamily{ + { + Name: stringp("target_info"), + Help: nil, + Type: "unknown", + Unit: nil, + Metric: []*OpenMetric{ + { + Label: []*labels.Label{}, + Name: stringp("target_info"), + Unknown: &Unknown{ + Value: float64p(1), + }, + }, + }, + }, + { + Name: stringp("first_metric"), + Help: nil, + Type: "gauge", + Unit: nil, + Metric: []*OpenMetric{ + { + Label: []*labels.Label{ + { + Name: "label1", + Value: "value1", + }, + }, + Name: stringp("first_metric"), + Gauge: &Gauge{ + Value: float64p(1), + }, + }, + }, + }, + } + + result, err := ParseMetricFamilies([]byte(input), ContentTypeTextFormat, time.Now(), logp.NewLogger("test")) if err != nil { t.Fatalf("ParseMetricFamilies for content type %s returned an error.", OpenMetricsType) } @@ -342,7 +397,7 @@ a{a="foo"} 1.0 }, } - result, err := ParseMetricFamilies([]byte(input[1:]), OpenMetricsType, time.Now()) + result, err := ParseMetricFamilies([]byte(input[1:]), OpenMetricsType, time.Now(), nil) if err != nil { t.Fatalf("ParseMetricFamilies for content type %s returned an error.", OpenMetricsType) } @@ -395,7 +450,7 @@ summary_metric_impossible 123 }, } - result, err := ParseMetricFamilies([]byte(input[1:]), OpenMetricsType, time.Now()) + result, err := ParseMetricFamilies([]byte(input[1:]), OpenMetricsType, time.Now(), nil) if err != nil { t.Fatalf("ParseMetricFamilies for content type %s returned an error.", OpenMetricsType) } @@ -447,7 +502,7 @@ summary_metric_impossible 123 }, } - result, err := ParseMetricFamilies([]byte(input), ContentTypeTextFormat, time.Now()) + result, err := ParseMetricFamilies([]byte(input), ContentTypeTextFormat, time.Now(), nil) if err != nil { t.Fatalf("ParseMetricFamilies for content type %s returned an error.", ContentTypeTextFormat) } @@ -507,7 +562,7 @@ http_server_requests_seconds_created{exception="None",uri="/actuator/prometheus" }, } - result, err := ParseMetricFamilies([]byte(input[1:]), OpenMetricsType, time.Now()) + result, err := ParseMetricFamilies([]byte(input[1:]), OpenMetricsType, time.Now(), nil) if err != nil { t.Fatalf("ParseMetricFamilies for content type %s returned an error.", OpenMetricsType) } @@ -566,7 +621,7 @@ http_server_requests_seconds_created{exception="None",uri="/actuator/prometheus" }, } - result, err := ParseMetricFamilies([]byte(input), ContentTypeTextFormat, time.Now()) + result, err := ParseMetricFamilies([]byte(input), ContentTypeTextFormat, time.Now(), nil) if err != nil { t.Fatalf("ParseMetricFamilies for content type %s returned an error.", ContentTypeTextFormat) } @@ -609,7 +664,7 @@ ggh 99 }, } - result, err := ParseMetricFamilies([]byte(input[1:]), OpenMetricsType, time.Now()) + result, err := ParseMetricFamilies([]byte(input[1:]), OpenMetricsType, time.Now(), nil) if err != nil { t.Fatalf("ParseMetricFamilies for content type %s returned an error.", OpenMetricsType) } @@ -644,7 +699,7 @@ redis_connected_clients{instance="rough-snowflake-web"} 10.0 }, }, } - result, err := ParseMetricFamilies([]byte(input[1:]), OpenMetricsType, time.Now()) + result, err := ParseMetricFamilies([]byte(input[1:]), OpenMetricsType, time.Now(), nil) if err != nil { t.Fatalf("ParseMetricFamilies for content type %s returned an error.", OpenMetricsType) } @@ -678,7 +733,7 @@ redis_connected_clients{instance="rough-snowflake-web"} 10.0` }, }, } - result, err := ParseMetricFamilies([]byte(input), ContentTypeTextFormat, time.Now()) + result, err := ParseMetricFamilies([]byte(input), ContentTypeTextFormat, time.Now(), nil) if err != nil { t.Fatalf("ParseMetricFamilies for content type %s returned an error.", ContentTypeTextFormat) } From bcee0e9655db2a3509203c30c86d86665b6c9417 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Jan 2024 13:39:19 -0500 Subject: [PATCH 024/129] build(deps): bump github.com/elastic/elastic-agent-libs from 0.7.2 to 0.7.3 (#37544) * build(deps): bump github.com/elastic/elastic-agent-libs Bumps [github.com/elastic/elastic-agent-libs](https://github.com/elastic/elastic-agent-libs) from 0.7.2 to 0.7.3. - [Release notes](https://github.com/elastic/elastic-agent-libs/releases) - [Commits](https://github.com/elastic/elastic-agent-libs/compare/v0.7.2...v0.7.3) --- updated-dependencies: - dependency-name: github.com/elastic/elastic-agent-libs dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update NOTICE.txt * Add changelog. --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Craig MacKenzie --- CHANGELOG.next.asciidoc | 1 + NOTICE.txt | 8 ++++---- go.mod | 4 ++-- go.sum | 8 ++++---- 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 1f1f3e359353..1d2803e09334 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -152,6 +152,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will disable the netinfo.enabled option of add_host_metadata processor - Upgrade to Go 1.20.12. {pull}37350[37350] - The Elasticsearch output can now configure performance presets with the `preset` configuration field. {pull}37259[37259] +- Upgrade to elastic-agent-libs v0.7.3 and golang.org/x/crypto v0.17.0. {pull}37544[37544] *Auditbeat* diff --git a/NOTICE.txt b/NOTICE.txt index 192819bb6e72..1f4940889c6a 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -12712,11 +12712,11 @@ SOFTWARE -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-libs -Version: v0.7.2 +Version: v0.7.3 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.7.2/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.7.3/LICENSE: Apache License Version 2.0, January 2004 @@ -24663,11 +24663,11 @@ THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : golang.org/x/crypto -Version: v0.16.0 +Version: v0.17.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.16.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.17.0/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. diff --git a/go.mod b/go.mod index e4943105c6a4..4515f81a31ab 100644 --- a/go.mod +++ b/go.mod @@ -151,7 +151,7 @@ require ( go.uber.org/atomic v1.11.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.26.0 - golang.org/x/crypto v0.16.0 + golang.org/x/crypto v0.17.0 golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 golang.org/x/mod v0.14.0 golang.org/x/net v0.19.0 @@ -201,7 +201,7 @@ require ( github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20220623125934-28468a6701b5 github.com/elastic/bayeux v1.0.5 github.com/elastic/elastic-agent-autodiscover v0.6.6 - github.com/elastic/elastic-agent-libs v0.7.2 + github.com/elastic/elastic-agent-libs v0.7.3 github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3 github.com/elastic/elastic-agent-system-metrics v0.9.1 github.com/elastic/go-elasticsearch/v8 v8.11.1 diff --git a/go.sum b/go.sum index c83841bdcb59..baf769cc681c 100644 --- a/go.sum +++ b/go.sum @@ -656,8 +656,8 @@ github.com/elastic/elastic-agent-autodiscover v0.6.6 h1:P1y0dDpbhJc7Uw/xe85irPEa github.com/elastic/elastic-agent-autodiscover v0.6.6/go.mod h1:chulyCAyZb/njMHgzkhC/yWnt8v/Y6eCRUhmFVnsA5o= github.com/elastic/elastic-agent-client/v7 v7.6.0 h1:FEn6FjzynW4TIQo5G096Tr7xYK/P5LY9cSS6wRbXZTc= github.com/elastic/elastic-agent-client/v7 v7.6.0/go.mod h1:GlUKrbVd/O1CRAZonpBeN3J0RlVqP6VGcrBjFWca+aM= -github.com/elastic/elastic-agent-libs v0.7.2 h1:yT0hF0UAxJCdQqhHh6SFpgYrcpB10oFzPj8IaytPS2o= -github.com/elastic/elastic-agent-libs v0.7.2/go.mod h1:pVBEElQJUO9mr4WStWNXuQGsJn54lcjAoYAHmsvBLBc= +github.com/elastic/elastic-agent-libs v0.7.3 h1:tc6JDXYR+2XFMHJVv+7+M0OwAbZPxm3caLJEd943dlE= +github.com/elastic/elastic-agent-libs v0.7.3/go.mod h1:9hlSaDPm0XTrUWrZjwvckgov1pDHnsGyybzAjNe/1wA= github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3 h1:sb+25XJn/JcC9/VL8HX4r4QXSUq4uTNzGS2kxOE7u1U= github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3/go.mod h1:rWarFM7qYxJKsi9WcV6ONcFjH/NA3niDNpTxO+8/GVI= github.com/elastic/elastic-agent-system-metrics v0.9.1 h1:r0ofKHgPpl+W09ie7tzGcCDC0d4NZbQUv37rSgHf4FM= @@ -2030,8 +2030,8 @@ golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= -golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= From 7a3a9f9be5900ce5d77053a701d264355ddf98bf Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 4 Jan 2024 17:21:40 -0500 Subject: [PATCH 025/129] [updatecli] update elastic stack version for testing 8.13.0-ybxdr713 (#37502) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update snapshot.yml Made with ❤️️ by updatecli * chore: Update snapshot.yml Made with ❤️️ by updatecli * chore: Update snapshot.yml Made with ❤️️ by updatecli * chore: Update snapshot.yml Made with ❤️️ by updatecli * chore: Update snapshot.yml Made with ❤️️ by updatecli * chore: Update snapshot.yml Made with ❤️️ by updatecli * chore: Update snapshot.yml Made with ❤️️ by updatecli * chore: Update snapshot.yml Made with ❤️️ by updatecli * chore: Update snapshot.yml Made with ❤️️ by updatecli --------- Co-authored-by: apmmachine Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: Craig MacKenzie --- testing/environments/snapshot.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 525657c20fd1..a52226b47855 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-vqaxdghw-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-ybxdr713-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.13.0-vqaxdghw-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.13.0-ybxdr713-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.13.0-vqaxdghw-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.13.0-ybxdr713-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From d1b32774aef47110f24c880e7cc3cff38878501b Mon Sep 17 00:00:00 2001 From: Craig MacKenzie Date: Fri, 5 Jan 2024 09:14:43 -0500 Subject: [PATCH 026/129] Update to Go 1.21.5 (#37550) * Upgrade to Go 1.21.5. * Update to latest linter. * Update changelog * Update golangci.yml. --- .github/workflows/golangci-lint.yml | 2 +- .go-version | 2 +- .golangci.yml | 10 +++++----- CHANGELOG.next.asciidoc | 2 +- auditbeat/Dockerfile | 2 +- dev-tools/kubernetes/filebeat/Dockerfile.debug | 2 +- dev-tools/kubernetes/heartbeat/Dockerfile.debug | 2 +- dev-tools/kubernetes/metricbeat/Dockerfile.debug | 2 +- go.mod | 2 +- go.sum | 9 +++++++++ heartbeat/Dockerfile | 2 +- libbeat/docs/version.asciidoc | 2 +- metricbeat/Dockerfile | 2 +- metricbeat/module/http/_meta/Dockerfile | 2 +- metricbeat/module/nats/_meta/Dockerfile | 2 +- metricbeat/module/vsphere/_meta/Dockerfile | 2 +- packetbeat/Dockerfile | 2 +- x-pack/functionbeat/Dockerfile | 2 +- x-pack/metricbeat/module/stan/_meta/Dockerfile | 2 +- 19 files changed, 31 insertions(+), 22 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index c8e871ce7720..140b9ce302ea 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -39,7 +39,7 @@ jobs: uses: golangci/golangci-lint-action@v3 with: # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version - version: v1.51.2 + version: v1.55.2 # Give the job more time to execute. # Regarding `--whole-files`, the linter is supposed to support linting of changed a patch only but, diff --git a/.go-version b/.go-version index 3b9e4a0c187a..ce2dd53570bb 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.20.12 +1.21.5 diff --git a/.golangci.yml b/.golangci.yml index 3cc6336695e3..9e1b7636436b 100755 --- a/.golangci.yml +++ b/.golangci.yml @@ -2,7 +2,7 @@ run: # timeout for analysis, e.g. 30s, 5m, default is 1m timeout: 15m - build-tags: + build-tags: - synthetics - integration @@ -114,7 +114,7 @@ linters-settings: gosimple: # Select the Go version to target. The default is '1.13'. - go: "1.20.12" + go: "1.21.5" nakedret: # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 @@ -132,19 +132,19 @@ linters-settings: staticcheck: # Select the Go version to target. The default is '1.13'. - go: "1.20.12" + go: "1.21.5" checks: ["all"] stylecheck: # Select the Go version to target. The default is '1.13'. - go: "1.20.12" + go: "1.21.5" # Disabled: # ST1005: error strings should not be capitalized checks: ["all", "-ST1005"] unused: # Select the Go version to target. The default is '1.13'. - go: "1.20.12" + go: "1.21.5" gosec: excludes: diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 1d2803e09334..76540fbc48c3 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -150,7 +150,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - elasticsearch output now supports `idle_connection_timeout`. {issue}35616[35615] {pull}36843[36843] - Upgrade golang/x/net to v0.17.0. Updates the publicsuffix table used by the registered_domain processor. {pull}36969[36969] Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will disable the netinfo.enabled option of add_host_metadata processor -- Upgrade to Go 1.20.12. {pull}37350[37350] +- Upgrade to Go 1.21.5. {pull}37550[37550] - The Elasticsearch output can now configure performance presets with the `preset` configuration field. {pull}37259[37259] - Upgrade to elastic-agent-libs v0.7.3 and golang.org/x/crypto v0.17.0. {pull}37544[37544] diff --git a/auditbeat/Dockerfile b/auditbeat/Dockerfile index 742041d66af8..a43c12d013d4 100644 --- a/auditbeat/Dockerfile +++ b/auditbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20.12 +FROM golang:1.21.5 RUN \ apt-get update \ diff --git a/dev-tools/kubernetes/filebeat/Dockerfile.debug b/dev-tools/kubernetes/filebeat/Dockerfile.debug index c0c8768861cc..8a77046e6657 100644 --- a/dev-tools/kubernetes/filebeat/Dockerfile.debug +++ b/dev-tools/kubernetes/filebeat/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:1.20.12 as builder +FROM golang:1.21.5 as builder ENV PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/go/bin:/usr/local/go/bin diff --git a/dev-tools/kubernetes/heartbeat/Dockerfile.debug b/dev-tools/kubernetes/heartbeat/Dockerfile.debug index aa48a8d58d7e..d10e04d0ceaa 100644 --- a/dev-tools/kubernetes/heartbeat/Dockerfile.debug +++ b/dev-tools/kubernetes/heartbeat/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:1.20.12 as builder +FROM golang:1.21.5 as builder ENV PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/go/bin:/usr/local/go/bin diff --git a/dev-tools/kubernetes/metricbeat/Dockerfile.debug b/dev-tools/kubernetes/metricbeat/Dockerfile.debug index 854cf1ac32be..d6a0055f7f6b 100644 --- a/dev-tools/kubernetes/metricbeat/Dockerfile.debug +++ b/dev-tools/kubernetes/metricbeat/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:1.20.12 as builder +FROM golang:1.21.5 as builder ENV PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/go/bin:/usr/local/go/bin diff --git a/go.mod b/go.mod index 4515f81a31ab..786e0cdd8b57 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/elastic/beats/v7 -go 1.20 +go 1.21 require ( cloud.google.com/go/bigquery v1.52.0 diff --git a/go.sum b/go.sum index baf769cc681c..1fa1bc366e26 100644 --- a/go.sum +++ b/go.sum @@ -41,11 +41,13 @@ cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdi cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datacatalog v1.14.1 h1:cFPBt8V5V2T3mu/96tc4nhcMB+5cYcpwjBfn79bZDI8= +cloud.google.com/go/datacatalog v1.14.1/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y= cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= cloud.google.com/go/kms v1.12.1 h1:xZmZuwy2cwzsocmKDOPu4BL7umg8QXagQx6fKVmf45U= +cloud.google.com/go/kms v1.12.1/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= cloud.google.com/go/longrunning v0.5.1 h1:Fr7TXftcqTudoyRJa113hyaqlGdiBQkp0Gq7tErFDWI= cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= cloud.google.com/go/monitoring v1.15.1 h1:65JhLMd+JiYnXr6j5Z63dUYCuOg770p8a/VC+gil/58= @@ -552,6 +554,7 @@ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7Do github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= @@ -750,6 +753,7 @@ github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVB github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/gabriel-vasile/mimetype v1.4.1/go.mod h1:05Vi0w3Y9c/lNvJOdmIwvrrAhX3rYhfQQCaf9VJcv7M= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= @@ -1060,6 +1064,7 @@ github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIG github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -1075,6 +1080,7 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20230426061923-93006964c1fc h1:AGDHt781oIcL4EFk7cPnvBUYTwU8BEU6GDTO3ZMn1sE= +github.com/google/pprof v0.0.0-20230426061923-93006964c1fc/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= @@ -1573,6 +1579,7 @@ github.com/osquery/osquery-go v0.0.0-20231108163517-e3cde127e724/go.mod h1:mLJRc github.com/otiai10/copy v1.12.0 h1:cLMgSQnXBs1eehF0Wy/FAGsgDTDmAqFR7rQylBb1nDY= github.com/otiai10/copy v1.12.0/go.mod h1:rSaLseMUsZFFbsFGc7wCJnnkTAvdc5L6VWxPE4308Ww= github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= +github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab5aMCs5usQRVBGThelUKBNnoSOuso= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= @@ -1972,6 +1979,7 @@ go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= +go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= @@ -2710,6 +2718,7 @@ gotest.tools/gotestsum v1.7.0/go.mod h1:V1m4Jw3eBerhI/A6qCxUE07RnCg7ACkKj9BYcAm0 gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/heartbeat/Dockerfile b/heartbeat/Dockerfile index e3e4a8bf989c..db338a4089ca 100644 --- a/heartbeat/Dockerfile +++ b/heartbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20.12 +FROM golang:1.21.5 RUN \ apt-get update \ diff --git a/libbeat/docs/version.asciidoc b/libbeat/docs/version.asciidoc index ccb198c4a924..098dee31e9df 100644 --- a/libbeat/docs/version.asciidoc +++ b/libbeat/docs/version.asciidoc @@ -1,6 +1,6 @@ :stack-version: 8.13.0 :doc-branch: main -:go-version: 1.20.12 +:go-version: 1.21.5 :release-state: unreleased :python: 3.7 :docker: 1.12 diff --git a/metricbeat/Dockerfile b/metricbeat/Dockerfile index 634db34245b7..4ed0762fc4cb 100644 --- a/metricbeat/Dockerfile +++ b/metricbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20.12 +FROM golang:1.21.5 RUN \ apt update \ diff --git a/metricbeat/module/http/_meta/Dockerfile b/metricbeat/module/http/_meta/Dockerfile index bf7a9fc931ec..1b08a63aab50 100644 --- a/metricbeat/module/http/_meta/Dockerfile +++ b/metricbeat/module/http/_meta/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20.12 +FROM golang:1.21.5 COPY test/main.go main.go diff --git a/metricbeat/module/nats/_meta/Dockerfile b/metricbeat/module/nats/_meta/Dockerfile index 44fdc695748e..b3f2a06d8253 100644 --- a/metricbeat/module/nats/_meta/Dockerfile +++ b/metricbeat/module/nats/_meta/Dockerfile @@ -2,7 +2,7 @@ ARG NATS_VERSION=2.0.4 FROM nats:$NATS_VERSION # build stage -FROM golang:1.20.12 AS build-env +FROM golang:1.21.5 AS build-env RUN apt-get install git mercurial gcc RUN git clone https://github.com/nats-io/nats.go.git /nats-go RUN cd /nats-go/examples/nats-bench && git checkout tags/v1.10.0 && go build . diff --git a/metricbeat/module/vsphere/_meta/Dockerfile b/metricbeat/module/vsphere/_meta/Dockerfile index 993137b89f3a..601d8317ac2c 100644 --- a/metricbeat/module/vsphere/_meta/Dockerfile +++ b/metricbeat/module/vsphere/_meta/Dockerfile @@ -1,5 +1,5 @@ ARG VSPHERE_GOLANG_VERSION -FROM golang:1.20.12 +FROM golang:1.21.5 RUN apt-get install curl git RUN go install github.com/vmware/govmomi/vcsim@v0.30.4 diff --git a/packetbeat/Dockerfile b/packetbeat/Dockerfile index fe13ef47b61f..d3718ecfcdb5 100644 --- a/packetbeat/Dockerfile +++ b/packetbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20.12 +FROM golang:1.21.5 RUN \ apt-get update \ diff --git a/x-pack/functionbeat/Dockerfile b/x-pack/functionbeat/Dockerfile index 662b27d669db..196dd8d5f234 100644 --- a/x-pack/functionbeat/Dockerfile +++ b/x-pack/functionbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20.12 +FROM golang:1.21.5 RUN \ apt-get update \ diff --git a/x-pack/metricbeat/module/stan/_meta/Dockerfile b/x-pack/metricbeat/module/stan/_meta/Dockerfile index 92ee1d834571..40162f9181b5 100644 --- a/x-pack/metricbeat/module/stan/_meta/Dockerfile +++ b/x-pack/metricbeat/module/stan/_meta/Dockerfile @@ -2,7 +2,7 @@ ARG STAN_VERSION=0.15.1 FROM nats-streaming:$STAN_VERSION # build stage -FROM golang:1.20.12 AS build-env +FROM golang:1.21.5 AS build-env RUN apt-get install git mercurial gcc RUN git clone https://github.com/nats-io/stan.go.git /stan-go RUN cd /stan-go/examples/stan-bench && git checkout tags/v0.5.2 && go build . From 824dd04debeea12a34408bb5840609d1a57222a2 Mon Sep 17 00:00:00 2001 From: Maurizio Branca Date: Fri, 5 Jan 2024 17:30:24 +0100 Subject: [PATCH 027/129] Fix the reference time rounding on Azure Metrics (#37365) ### What Change the `MetricRegistry.NeedsUpdate()` method to decide whether to collect the metrics by comparing the collection interval with the time grain. If the time since the last collection < time grain duration, then the metrics skip the collection. For example, given the following scenario: #### Scenario A: collect PT1M metrics every 60s - time grain: PT1M (one minute, or 60s) - collection interval: 60s In this case, the time since the last collection is never shorter than the time grain, so the metricset fetch metric values on every collection. #### Scenario B: collect PT15M metrics every 60s - time grain: PT5M (five minutes, or 300s) - collection interval: 60s In this case, the time since the last collection is shorter (60s, 120s, 180s, 240s) than the time grain for four collections. The metricset fetch metric values every five collections. #### The jitter During our tests, we noticed the collection scheduling had some variations, causing the time since the last collection to be shorter than expected by a few milliseconds. To compensate for these scheduling fluctuations, the function also adds a short jitter duration (1 second) to avoid false positives due to small fluctuations in collection scheduling. ### Why During a testing session on 8.11.2, we [noticed](https://github.com/elastic/beats/issues/37204#issuecomment-1847023185) one out of four agents skipped some metrics collections. The debug logs revealed the metricset skipped collections due to a 1-second difference between the reference time for the current and previous collections (299s instead of 300s). ![CleanShot 2023-12-08 at 20 13 19](https://github.com/elastic/beats/assets/25941/dc3d5040-c89b-47d2-a86a-124eb838ca36) The 1-second difference may happen due to an inaccurate rounding in the reference time. For example, suppose the following two events occur: 1. Metricbeat calls `Fetch()` on the metricset a few milliseconds earlier than in the previous collection. 2. The timestamp is 2023-12-08T10:58:32.999Z. In this case, the reference time becomes 2023-12-08T10:58:32.000Z due to the truncation. This problem happened to one test agent. However, if it happens to one agent, it can happen to others. ### Extended Structured Logging We also added new fields to the debug structured logs: ```shell $ cat metricbeat.log.ndjson | grep "MetricRegistry" | head -n 1 | jq { "log.level": "debug", "@timestamp": "2024-01-05T15:03:12.235+0100", "log.logger": "azure monitor client", "log.origin": { "function": "github.com/elastic/beats/v7/x-pack/metricbeat/module/azure.(*MetricRegistry).NeedsUpdate", "file.name": "azure/metric_registry.go", "file.line": 80 }, "message": "MetricRegistry: Metric needs an update", "service.name": "metricbeat", "needs_update": true, "reference_time": "2024-01-05T14:03:07.197Z", "last_collection_time": "2024-01-05T14:02:07.199Z", "time_since_last_collection_seconds": 66.035681, "time_grain": "PT1M", "time_grain_duration_seconds": 60, "resource_id": "/subscriptions/123/resourceGroups/crest-test-lens-migration/providers/Microsoft.Compute/virtualMachines/rajvi-test-vm", "namespace": "Microsoft.Compute/virtualMachines", "aggregation": "Total", "names": "Network In,Network Out,Disk Read Bytes,Disk Write Bytes,Network In Total,Network Out Total", "ecs.version": "1.6.0" } ``` Here's an example using `jq`: ```shell $ cat metricbeat.log.ndjson | grep "MetricRegistry" | jq -r '[.namespace, .aggregation, .needs_update, .reference_time, .last_collection_time//"na", .time_since_last_collection_seconds//"na", .time_grain_duration_seconds//"na", .time_grain] | @tsv' | grep Microsoft.Compute/virtualMachines .aggregation aggregation .needs_update .reference_time .last_collection_time time_since_last_collection_seconds .time_grain_duration_seconds .time_grain Microsoft.Compute/virtualMachines Average true 2024-01-05T14:16:07.193Z 2024-01-05T14:15:07.193Z 60.999661 60 PT1M Microsoft.Compute/virtualMachines Total true 2024-01-05T14:16:07.193Z 2024-01-05T14:15:07.193Z 61.795341 60 PT1M Microsoft.Compute/virtualMachines Average true 2024-01-05T14:16:07.193Z 2024-01-05T14:15:07.193Z 62.080088 60 PT1M Microsoft.Compute/virtualMachines Total true 2024-01-05T14:16:07.193Z 2024-01-05T14:15:07.193Z 64.929579 60 PT1M Microsoft.Compute/virtualMachines Average true 2024-01-05T14:16:07.193Z 2024-01-05T14:15:07.193Z 65.632209 60 PT1M Microsoft.Compute/virtualMachines Total true 2024-01-05T14:16:07.193Z 2024-01-05T14:15:07.193Z 67.832918 60 PT1M Microsoft.Compute/virtualMachines Average true 2024-01-05T14:16:07.193Z 2024-01-05T14:15:07.193Z 68.576239 60 PT1M Microsoft.Compute/virtualMachines Total true 2024-01-05T14:16:07.193Z 2024-01-05T14:15:07.193Z 69.927988 60 PT1M Microsoft.Compute/virtualMachines Total true 2024-01-05T14:16:07.193Z 2024-01-05T14:15:07.193Z 70.351148 60 PT1M Microsoft.Compute/virtualMachines Average true 2024-01-05T14:16:07.193Z 2024-01-05T14:15:07.193Z 70.872058 60 PT1M Microsoft.Compute/virtualMachines Average true 2024-01-05T14:16:07.193Z 2024-01-05T14:15:07.193Z 72.47401 60 PT1M Microsoft.Compute/virtualMachines Total true 2024-01-05T14:16:07.193Z 2024-01-05T14:15:07.193Z 72.971242 60 PT1M Microsoft.Compute/virtualMachines Average true 2024-01-05T14:16:07.193Z 2024-01-05T14:15:07.193Z 73.143605 60 PT1M Microsoft.Compute/virtualMachines Total true 2024-01-05T14:16:07.193Z 2024-01-05T14:15:07.193Z 74.831489 60 PT1M ``` --- CHANGELOG.next.asciidoc | 1 + x-pack/metricbeat/module/azure/azure.go | 11 +- x-pack/metricbeat/module/azure/client.go | 104 --------------- .../metricbeat/module/azure/client_utils.go | 4 +- .../module/azure/metric_registry.go | 125 ++++++++++++++++++ .../module/azure/metric_registry_test.go | 93 +++++++++++++ x-pack/metricbeat/module/azure/resources.go | 2 +- 7 files changed, 230 insertions(+), 110 deletions(-) create mode 100644 x-pack/metricbeat/module/azure/metric_registry.go create mode 100644 x-pack/metricbeat/module/azure/metric_registry_test.go diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 76540fbc48c3..6875c33bb879 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -117,6 +117,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Fix the "api-version query parameter (?api-version=) is required for all requests" error in Azure Billing. {pull}37158[37158] - Add memory hard limit from container metadata and remove usage percentage in AWS Fargate. {pull}37194[37194] - Ignore parser errors from unsupported metrics types on Prometheus client and continue parsing until EOF is reached {pull}37383[37383] +- Fix the reference time rounding on Azure Metrics {issue}37204[37204] {pull}37365[37365] *Osquerybeat* diff --git a/x-pack/metricbeat/module/azure/azure.go b/x-pack/metricbeat/module/azure/azure.go index 7812feed838c..dd7f121b2697 100644 --- a/x-pack/metricbeat/module/azure/azure.go +++ b/x-pack/metricbeat/module/azure/azure.go @@ -96,9 +96,14 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { // depending on metric time grain (check `MetricRegistry` // for more information). // - // We truncate the reference time to the second to avoid millisecond - // variations in the collection period causing skipped collections. - referenceTime := time.Now().UTC().Truncate(time.Second) + // We round the reference time to the nearest second to avoid + // millisecond variations in the collection period causing + // skipped collections. + // + // See "Round outer limits" and "Round inner limits" tests in + // the metric_registry_test.go for more information. + //referenceTime := time.Now().UTC().Round(time.Second) + referenceTime := time.Now().UTC() // Initialize cloud resources and monitor metrics // information. diff --git a/x-pack/metricbeat/module/azure/client.go b/x-pack/metricbeat/module/azure/client.go index ce9a6cb824fc..3b22a5713cd3 100644 --- a/x-pack/metricbeat/module/azure/client.go +++ b/x-pack/metricbeat/module/azure/client.go @@ -16,110 +16,6 @@ import ( "github.com/elastic/elastic-agent-libs/logp" ) -// NewMetricRegistry instantiates a new metric registry. -func NewMetricRegistry(logger *logp.Logger) *MetricRegistry { - return &MetricRegistry{ - logger: logger, - collectionsInfo: make(map[string]MetricCollectionInfo), - } -} - -// MetricRegistry keeps track of the last time a metric was collected and -// the time grain used. -// -// This is used to avoid collecting the same metric values over and over again -// when the time grain is larger than the collection interval. -type MetricRegistry struct { - logger *logp.Logger - collectionsInfo map[string]MetricCollectionInfo -} - -// Update updates the metric registry with the latest timestamp and -// time grain for the given metric. -func (m *MetricRegistry) Update(metric Metric, info MetricCollectionInfo) { - m.collectionsInfo[m.buildMetricKey(metric)] = info -} - -// NeedsUpdate returns true if the metric needs to be collected again -// for the given `referenceTime`. -func (m *MetricRegistry) NeedsUpdate(referenceTime time.Time, metric Metric) bool { - // Build a key to store the metric in the registry. - // The key is a combination of the namespace, - // resource ID and metric names. - metricKey := m.buildMetricKey(metric) - - // Get the now time in UTC, only to be used for logging. - // It's interesting to see when the registry evaluate each - // metric in relation to the reference time. - now := time.Now().UTC() - - if collection, exists := m.collectionsInfo[metricKey]; exists { - // Turn the time grain into a duration (for example, PT5M -> 5 minutes). - timeGrainDuration := convertTimeGrainToDuration(collection.timeGrain) - - // Calculate the start time of the time grain in relation to - // the reference time. - timeGrainStartTime := referenceTime.Add(-timeGrainDuration) - - // If the last collection time is after the start time of the time grain, - // it means that we already have a value for the given time grain. - // - // In this case, the metricset does not need to collect the metric - // values again. - if collection.timestamp.After(timeGrainStartTime) { - m.logger.Debugw( - "MetricRegistry: Metric does not need an update", - "needs_update", false, - "reference_time", referenceTime, - "now", now, - "time_grain_start_time", timeGrainStartTime, - "last_collection_at", collection.timestamp, - ) - - return false - } - - // The last collection time is before the start time of the time grain, - // it means that the metricset needs to collect the metric values again. - m.logger.Debugw( - "MetricRegistry: Metric needs an update", - "needs_update", true, - "reference_time", referenceTime, - "now", now, - "time_grain_start_time", timeGrainStartTime, - "last_collection_at", collection.timestamp, - ) - - return true - } - - // If the metric is not in the registry, it means that it has never - // been collected before. - // - // In this case, we need to collect the metric. - m.logger.Debugw( - "MetricRegistry: Metric needs an update", - "needs_update", true, - "reference_time", referenceTime, - "now", now, - ) - - return true -} - -// buildMetricKey builds a key for the metric registry. -// -// The key is a combination of the namespace, resource ID and metric names. -func (m *MetricRegistry) buildMetricKey(metric Metric) string { - keyComponents := []string{ - metric.Namespace, - metric.ResourceId, - } - keyComponents = append(keyComponents, metric.Names...) - - return strings.Join(keyComponents, ",") -} - // MetricCollectionInfo contains information about the last time // a metric was collected and the time grain used. type MetricCollectionInfo struct { diff --git a/x-pack/metricbeat/module/azure/client_utils.go b/x-pack/metricbeat/module/azure/client_utils.go index 986125ba6b68..114ccd95baf2 100644 --- a/x-pack/metricbeat/module/azure/client_utils.go +++ b/x-pack/metricbeat/module/azure/client_utils.go @@ -135,14 +135,14 @@ func compareMetricValues(metVal *float64, metricVal *float64) bool { return false } -// convertTimeGrainToDuration converts the Azure time grain options to the equivalent +// asDuration converts the Azure time grain options to the equivalent // `time.Duration` value. // // For example, converts "PT1M" to `time.Minute`. // // See https://docs.microsoft.com/en-us/azure/azure-monitor/platform/metrics-supported#time-grain // for more information. -func convertTimeGrainToDuration(timeGrain string) time.Duration { +func asDuration(timeGrain string) time.Duration { var duration time.Duration switch timeGrain { case "PT1M": diff --git a/x-pack/metricbeat/module/azure/metric_registry.go b/x-pack/metricbeat/module/azure/metric_registry.go new file mode 100644 index 000000000000..cdaa9496b5d6 --- /dev/null +++ b/x-pack/metricbeat/module/azure/metric_registry.go @@ -0,0 +1,125 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package azure + +import ( + "strings" + "time" + + "github.com/elastic/elastic-agent-libs/logp" +) + +// NewMetricRegistry instantiates a new metric registry. +func NewMetricRegistry(logger *logp.Logger) *MetricRegistry { + return &MetricRegistry{ + logger: logger, + collectionsInfo: make(map[string]MetricCollectionInfo), + jitter: 1 * time.Second, + } +} + +// MetricRegistry keeps track of the last time a metric was collected and +// the time grain used. +// +// This is used to avoid collecting the same metric values over and over again +// when the time grain is larger than the collection interval. +type MetricRegistry struct { + logger *logp.Logger + collectionsInfo map[string]MetricCollectionInfo + // The collection period can be jittered by a second. + // We introduce a small jitter to avoid skipping collections + // when the collection period is close (usually < 1s) to the + // time grain start time. + jitter time.Duration +} + +// Update updates the metric registry with the latest timestamp and +// time grain for the given metric. +func (m *MetricRegistry) Update(metric Metric, info MetricCollectionInfo) { + m.collectionsInfo[m.buildMetricKey(metric)] = info +} + +// NeedsUpdate returns true if the metric needs to be collected again +// for the given `referenceTime`. +func (m *MetricRegistry) NeedsUpdate(referenceTime time.Time, metric Metric) bool { + // Build a key to store the metric in the registry. + // The key is a combination of the namespace, + // resource ID and metric names. + metricKey := m.buildMetricKey(metric) + + if lastCollection, exists := m.collectionsInfo[metricKey]; exists { + // Turn the time grain into a duration (for example, PT5M -> 5 minutes). + timeGrainDuration := asDuration(lastCollection.timeGrain) + + // Adjust the last collection time by adding a small jitter to avoid + // skipping collections when the collection period is close (usually < 1s). + timeSinceLastCollection := time.Since(lastCollection.timestamp) + m.jitter + + if timeSinceLastCollection < timeGrainDuration { + m.logger.Debugw( + "MetricRegistry: Metric does not need an update", + "needs_update", false, + "reference_time", referenceTime, + "last_collection_time", lastCollection.timestamp, + "time_since_last_collection_seconds", timeSinceLastCollection.Seconds(), + "time_grain", lastCollection.timeGrain, + "time_grain_duration_seconds", timeGrainDuration.Seconds(), + "resource_id", metric.ResourceId, + "namespace", metric.Namespace, + "aggregation", metric.Aggregations, + "names", strings.Join(metric.Names, ","), + ) + + return false + } + + // The last collection time is before the start time of the time grain, + // it means that the metricset needs to collect the metric values again. + m.logger.Debugw( + "MetricRegistry: Metric needs an update", + "needs_update", true, + "reference_time", referenceTime, + "last_collection_time", lastCollection.timestamp, + "time_since_last_collection_seconds", timeSinceLastCollection.Seconds(), + "time_grain", lastCollection.timeGrain, + "time_grain_duration_seconds", timeGrainDuration.Seconds(), + "resource_id", metric.ResourceId, + "namespace", metric.Namespace, + "aggregation", metric.Aggregations, + "names", strings.Join(metric.Names, ","), + ) + + return true + } + + // If the metric is not in the registry, it means that it has never + // been collected before. + // + // In this case, we need to collect the metric. + m.logger.Debugw( + "MetricRegistry: Metric needs an update (no collection info in the metric registry)", + "needs_update", true, + "reference_time", referenceTime, + "resource_id", metric.ResourceId, + "namespace", metric.Namespace, + "aggregation", metric.Aggregations, + "names", strings.Join(metric.Names, ","), + ) + + return true +} + +// buildMetricKey builds a key for the metric registry. +// +// The key is a combination of the namespace, resource ID and metric names. +func (m *MetricRegistry) buildMetricKey(metric Metric) string { + keyComponents := []string{ + metric.Namespace, + metric.ResourceId, + } + keyComponents = append(keyComponents, metric.Names...) + + return strings.Join(keyComponents, ",") +} diff --git a/x-pack/metricbeat/module/azure/metric_registry_test.go b/x-pack/metricbeat/module/azure/metric_registry_test.go new file mode 100644 index 000000000000..a0ecdc84b85d --- /dev/null +++ b/x-pack/metricbeat/module/azure/metric_registry_test.go @@ -0,0 +1,93 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package azure + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/elastic-agent-libs/logp" +) + +func TestNewMetricRegistry(t *testing.T) { + logger := logp.NewLogger("test azure monitor") + + t.Run("Collect metrics with a regular 5 minutes period", func(t *testing.T) { + metricRegistry := NewMetricRegistry(logger) + + // Create a lastCollectionAt parsing the string 2023-12-08T16:37:50.000Z into a time.Time + lastCollectionAt, _ := time.Parse(time.RFC3339, "2023-12-08T16:37:50.000Z") + + // Create a referenceTime parsing 2023-12-08T16:42:50.000Z into a time.Time + referenceTime, _ := time.Parse(time.RFC3339, "2023-12-08T16:42:50.000Z") + + metric := Metric{ + ResourceId: "test", + Namespace: "test", + } + metricCollectionInfo := MetricCollectionInfo{ + timeGrain: "PT5M", + timestamp: lastCollectionAt, + } + + metricRegistry.Update(metric, metricCollectionInfo) + + needsUpdate := metricRegistry.NeedsUpdate(referenceTime, metric) + + assert.True(t, needsUpdate, "metric should need update") + }) + + t.Run("Collect metrics using a period 3 seconds longer than previous", func(t *testing.T) { + metricRegistry := NewMetricRegistry(logger) + + // Create a lastCollectionAt parsing the string 2023-12-08T16:37:50.000Z into a time.Time + lastCollectionAt, _ := time.Parse(time.RFC3339, "2023-12-08T16:37:50.000Z") + + // Create a referenceTime parsing 2023-12-08T16:42:50.000Z into a time.Time + referenceTime, _ := time.Parse(time.RFC3339, "2023-12-08T16:42:53.000Z") + + metric := Metric{ + ResourceId: "test", + Namespace: "test", + } + metricCollectionInfo := MetricCollectionInfo{ + timeGrain: "PT5M", + timestamp: lastCollectionAt, + } + + metricRegistry.Update(metric, metricCollectionInfo) + + needsUpdate := metricRegistry.NeedsUpdate(referenceTime, metric) + + assert.True(t, needsUpdate, "metric should need update") + }) + + t.Run("Collect metrics using a period (1 second) shorter than previous", func(t *testing.T) { + metricRegistry := NewMetricRegistry(logger) + + // Create a referenceTime parsing 2023-12-08T16:42:50.000Z into a time.Time + referenceTime, _ := time.Parse(time.RFC3339, "2023-12-08T10:58:33.000Z") + + // Create a lastCollectionAt parsing the string 2023-12-08T16:37:50.000Z into a time.Time + lastCollectionAt, _ := time.Parse(time.RFC3339, "2023-12-08T10:53:34.000Z") + + metric := Metric{ + ResourceId: "test", + Namespace: "test", + } + metricCollectionInfo := MetricCollectionInfo{ + timeGrain: "PT5M", + timestamp: lastCollectionAt, + } + + metricRegistry.Update(metric, metricCollectionInfo) + + needsUpdate := metricRegistry.NeedsUpdate(referenceTime, metric) + + assert.True(t, needsUpdate, "metric should not need update") + }) +} diff --git a/x-pack/metricbeat/module/azure/resources.go b/x-pack/metricbeat/module/azure/resources.go index 0a723c82bd5a..6a633663cb1c 100644 --- a/x-pack/metricbeat/module/azure/resources.go +++ b/x-pack/metricbeat/module/azure/resources.go @@ -38,7 +38,7 @@ type Metric struct { Values []MetricValue TimeGrain string ResourceId string - // ResourceSubId is used for the metric values api as namespaces can apply to sub resrouces ex. storage account: container, blob, vm scaleset: vms + // ResourceSubId is used for the metric values api as namespaces can apply to sub resources ex. storage account: container, blob, vm scaleset: vms ResourceSubId string } From 091da4e3cc21b04497ac6ca9d4cdd2ddfd93fd52 Mon Sep 17 00:00:00 2001 From: Denis Date: Fri, 5 Jan 2024 19:46:19 +0000 Subject: [PATCH 028/129] Add a warning about altering existing event fields by processors (#37459) This is important to mention, since some of our processors rely on a certain event schema and pre-existing fields. So, these fields should not be removed or overwritten. --- libbeat/docs/processors.asciidoc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libbeat/docs/processors.asciidoc b/libbeat/docs/processors.asciidoc index f4fda6c50f7c..fc91b31a49af 100644 --- a/libbeat/docs/processors.asciidoc +++ b/libbeat/docs/processors.asciidoc @@ -25,3 +25,5 @@ order they are defined in the {beatname_uc} configuration file. ------- event -> processor 1 -> event1 -> processor 2 -> event2 ... ------- + +IMPORTANT: It's recommended to do all drop and renaming of existing fields as the last step in a processor configuration. This is because dropping or renaming fields can remove data necessary for the next processor in the chain, for example dropping the `source.ip` field would remove one of the fields necessary for the `community_id` processor to function. If it's necessary to remove, rename or overwrite an existing event field, please make sure it's done by a corresponding processor (<>, <> or <>) placed at the end of the processor list defined in the input configuration. From 6cb79c0b061f8663cfe1f143aacc602a923ee9e1 Mon Sep 17 00:00:00 2001 From: Gabriel Pop <94497545+gpop63@users.noreply.github.com> Date: Sun, 7 Jan 2024 21:51:31 +0200 Subject: [PATCH 029/129] [metricbeat] [containerd] group metrics by dimensions (#37537) * add grouping logic * update tests data * add changelog entry * make update * add comments * fix changelog * frange over group events * range over grouped events --- CHANGELOG.next.asciidoc | 1 + .../module/containerd/cpu/_meta/data.json | 47 ++- .../cpu/_meta/test/containerd.v1.5.2.expected | 382 ++--------------- .../_meta/testdata/docs.plain-expected.json | 392 ++---------------- .../metricbeat/module/containerd/cpu/cpu.go | 177 +++++--- 5 files changed, 221 insertions(+), 778 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 6875c33bb879..3f6c12cec990 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -238,6 +238,7 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d - Add GCP CloudSQL metadata {pull}33066[33066] - Add GCP Carbon Footprint metricbeat data {pull}34820[34820] - Add event loop utilization metric to Kibana module {pull}35020[35020] +- Fix containerd metrics grouping for TSDB {pull}37537[37537] - Add metrics grouping by dimensions and time to Azure app insights {pull}36634[36634] - Align on the algorithm used to transform Prometheus histograms into Elasticsearch histograms {pull}36647[36647] - Enhance GCP billing with detailed tables identification, additional fields, and optimized data handling. {pull}36902[36902] diff --git a/x-pack/metricbeat/module/containerd/cpu/_meta/data.json b/x-pack/metricbeat/module/containerd/cpu/_meta/data.json index 73210e76e26a..439888e6f29e 100644 --- a/x-pack/metricbeat/module/containerd/cpu/_meta/data.json +++ b/x-pack/metricbeat/module/containerd/cpu/_meta/data.json @@ -7,11 +7,56 @@ "cpu": { "usage": { "cpu": { + "0": { + "ns": 99137817570 + }, + "1": { + "ns": 116475261138 + }, + "10": { + "ns": 106709905770 + }, + "11": { + "ns": 104878380370 + }, + "2": { + "ns": 105305653633 + }, + "3": { + "ns": 101195506344 + }, "4": { "ns": 105731762224 + }, + "5": { + "ns": 98155683224 + }, + "6": { + "ns": 95075348914 + }, + "7": { + "ns": 97134782770 + }, + "8": { + "ns": 104266711568 + }, + "9": { + "ns": 102272190459 } }, - "percpu": {} + "kernel": { + "ns": 532180000000, + "pct": 0 + }, + "percpu": {}, + "total": { + "ns": 1236339003984, + "pct": 0 + }, + "user": { + "ns": 525470000000, + "pct": 0 + } } }, "namespace": "k8s.io" diff --git a/x-pack/metricbeat/module/containerd/cpu/_meta/test/containerd.v1.5.2.expected b/x-pack/metricbeat/module/containerd/cpu/_meta/test/containerd.v1.5.2.expected index cf41191bb31c..f0b13c66850b 100644 --- a/x-pack/metricbeat/module/containerd/cpu/_meta/test/containerd.v1.5.2.expected +++ b/x-pack/metricbeat/module/containerd/cpu/_meta/test/containerd.v1.5.2.expected @@ -1,244 +1,4 @@ [ - { - "RootFields": { - "container": { - "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" - } - }, - "ModuleFields": { - "namespace": "k8s.io" - }, - "MetricSetFields": { - "usage": { - "cpu": { - "7": { - "ns": 97134782770 - } - }, - "percpu": {} - } - }, - "Index": "", - "ID": "", - "Namespace": "containerd.cpu", - "Timestamp": "0001-01-01T00:00:00Z", - "Error": null, - "Host": "", - "Service": "", - "Took": 0, - "Period": 0, - "DisableTimeSeries": false - }, - { - "RootFields": { - "container": { - "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" - } - }, - "ModuleFields": { - "namespace": "k8s.io" - }, - "MetricSetFields": { - "usage": { - "cpu": { - "4": { - "ns": 105731762224 - } - }, - "percpu": {} - } - }, - "Index": "", - "ID": "", - "Namespace": "containerd.cpu", - "Timestamp": "0001-01-01T00:00:00Z", - "Error": null, - "Host": "", - "Service": "", - "Took": 0, - "Period": 0, - "DisableTimeSeries": false - }, - { - "RootFields": { - "container": { - "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" - } - }, - "ModuleFields": { - "namespace": "k8s.io" - }, - "MetricSetFields": { - "usage": { - "cpu": { - "9": { - "ns": 102272190459 - } - }, - "percpu": {} - } - }, - "Index": "", - "ID": "", - "Namespace": "containerd.cpu", - "Timestamp": "0001-01-01T00:00:00Z", - "Error": null, - "Host": "", - "Service": "", - "Took": 0, - "Period": 0, - "DisableTimeSeries": false - }, - { - "RootFields": { - "container": { - "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" - } - }, - "ModuleFields": { - "namespace": "k8s.io" - }, - "MetricSetFields": { - "usage": { - "cpu": { - "8": { - "ns": 104266711568 - } - }, - "percpu": {} - } - }, - "Index": "", - "ID": "", - "Namespace": "containerd.cpu", - "Timestamp": "0001-01-01T00:00:00Z", - "Error": null, - "Host": "", - "Service": "", - "Took": 0, - "Period": 0, - "DisableTimeSeries": false - }, - { - "RootFields": { - "container": { - "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" - } - }, - "ModuleFields": { - "namespace": "k8s.io" - }, - "MetricSetFields": { - "usage": { - "cpu": { - "10": { - "ns": 106709905770 - } - }, - "percpu": {} - } - }, - "Index": "", - "ID": "", - "Namespace": "containerd.cpu", - "Timestamp": "0001-01-01T00:00:00Z", - "Error": null, - "Host": "", - "Service": "", - "Took": 0, - "Period": 0, - "DisableTimeSeries": false - }, - { - "RootFields": { - "container": { - "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" - } - }, - "ModuleFields": { - "namespace": "k8s.io" - }, - "MetricSetFields": { - "usage": { - "cpu": { - "5": { - "ns": 98155683224 - } - }, - "percpu": {} - } - }, - "Index": "", - "ID": "", - "Namespace": "containerd.cpu", - "Timestamp": "0001-01-01T00:00:00Z", - "Error": null, - "Host": "", - "Service": "", - "Took": 0, - "Period": 0, - "DisableTimeSeries": false - }, - { - "RootFields": { - "container": { - "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" - } - }, - "ModuleFields": { - "namespace": "k8s.io" - }, - "MetricSetFields": { - "usage": { - "cpu": { - "2": { - "ns": 105305653633 - } - }, - "percpu": {} - } - }, - "Index": "", - "ID": "", - "Namespace": "containerd.cpu", - "Timestamp": "0001-01-01T00:00:00Z", - "Error": null, - "Host": "", - "Service": "", - "Took": 0, - "Period": 0, - "DisableTimeSeries": false - }, - { - "RootFields": { - "container": { - "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" - } - }, - "ModuleFields": { - "namespace": "k8s.io" - }, - "MetricSetFields": { - "usage": { - "cpu": { - "3": { - "ns": 101195506344 - } - }, - "percpu": {} - } - }, - "Index": "", - "ID": "", - "Namespace": "containerd.cpu", - "Timestamp": "0001-01-01T00:00:00Z", - "Error": null, - "Host": "", - "Service": "", - "Took": 0, - "Period": 0, - "DisableTimeSeries": false - }, { "RootFields": { "container": { @@ -253,130 +13,46 @@ "cpu": { "0": { "ns": 99137817570 - } - }, - "percpu": {} - } - }, - "Index": "", - "ID": "", - "Namespace": "containerd.cpu", - "Timestamp": "0001-01-01T00:00:00Z", - "Error": null, - "Host": "", - "Service": "", - "Took": 0, - "Period": 0, - "DisableTimeSeries": false - }, - { - "RootFields": { - "container": { - "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" - } - }, - "ModuleFields": { - "namespace": "k8s.io" - }, - "MetricSetFields": { - "usage": { - "cpu": { + }, "1": { "ns": 116475261138 - } - }, - "percpu": {} - } - }, - "Index": "", - "ID": "", - "Namespace": "containerd.cpu", - "Timestamp": "0001-01-01T00:00:00Z", - "Error": null, - "Host": "", - "Service": "", - "Took": 0, - "Period": 0, - "DisableTimeSeries": false - }, - { - "RootFields": { - "container": { - "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" - } - }, - "ModuleFields": { - "namespace": "k8s.io" - }, - "MetricSetFields": { - "usage": { - "cpu": { + }, + "2": { + "ns": 105305653633 + }, + "3": { + "ns": 101195506344 + }, + "4": { + "ns": 105731762224 + }, + "5": { + "ns": 98155683224 + }, "6": { "ns": 95075348914 - } - }, - "percpu": {} - } - }, - "Index": "", - "ID": "", - "Namespace": "containerd.cpu", - "Timestamp": "0001-01-01T00:00:00Z", - "Error": null, - "Host": "", - "Service": "", - "Took": 0, - "Period": 0, - "DisableTimeSeries": false - }, - { - "RootFields": { - "container": { - "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" - } - }, - "ModuleFields": { - "namespace": "k8s.io" - }, - "MetricSetFields": { - "usage": { - "cpu": { + }, + "7": { + "ns": 97134782770 + }, + "8": { + "ns": 104266711568 + }, + "9": { + "ns": 102272190459 + }, + "10": { + "ns": 106709905770 + }, "11": { "ns": 104878380370 } }, - "percpu": {} - } - }, - "Index": "", - "ID": "", - "Namespace": "containerd.cpu", - "Timestamp": "0001-01-01T00:00:00Z", - "Error": null, - "Host": "", - "Service": "", - "Took": 0, - "Period": 0, - "DisableTimeSeries": false - }, - { - "RootFields": { - "container": { - "cpu": { - "usage": 0 - }, - "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" - } - }, - "ModuleFields": { - "namespace": "k8s.io" - }, - "MetricSetFields": { - "usage": { "kernel": { "ns": 532180000000, "pct": 0 }, + "percpu": {}, "total": { "ns": 1236339003984, "pct": 0 @@ -398,4 +74,4 @@ "Period": 0, "DisableTimeSeries": false } -] +] \ No newline at end of file diff --git a/x-pack/metricbeat/module/containerd/cpu/_meta/testdata/docs.plain-expected.json b/x-pack/metricbeat/module/containerd/cpu/_meta/testdata/docs.plain-expected.json index 18d9662fac3d..571ccd396791 100644 --- a/x-pack/metricbeat/module/containerd/cpu/_meta/testdata/docs.plain-expected.json +++ b/x-pack/metricbeat/module/containerd/cpu/_meta/testdata/docs.plain-expected.json @@ -3,102 +3,6 @@ "container": { "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" }, - "containerd": { - "cpu": { - "usage": { - "cpu": { - "4": { - "ns": 105731762224 - } - }, - "percpu": {} - } - }, - "namespace": "k8s.io" - }, - "event": { - "dataset": "containerd.cpu", - "duration": 115000, - "module": "containerd" - }, - "metricset": { - "name": "cpu", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "containerd" - } - }, - { - "container": { - "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" - }, - "containerd": { - "cpu": { - "usage": { - "cpu": { - "3": { - "ns": 101195506344 - } - }, - "percpu": {} - } - }, - "namespace": "k8s.io" - }, - "event": { - "dataset": "containerd.cpu", - "duration": 115000, - "module": "containerd" - }, - "metricset": { - "name": "cpu", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "containerd" - } - }, - { - "container": { - "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" - }, - "containerd": { - "cpu": { - "usage": { - "cpu": { - "0": { - "ns": 99137817570 - } - }, - "percpu": {} - } - }, - "namespace": "k8s.io" - }, - "event": { - "dataset": "containerd.cpu", - "duration": 115000, - "module": "containerd" - }, - "metricset": { - "name": "cpu", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "containerd" - } - }, - { - "container": { - "cpu": { - "usage": 0 - }, - "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" - }, "containerd": { "cpu": { "usage": { @@ -113,250 +17,41 @@ "user": { "ns": 525470000000, "pct": 0 - } - } - }, - "namespace": "k8s.io" - }, - "event": { - "dataset": "containerd.cpu", - "duration": 115000, - "module": "containerd" - }, - "metricset": { - "name": "cpu", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "containerd" - } - }, - { - "container": { - "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" - }, - "containerd": { - "cpu": { - "usage": { - "cpu": { - "9": { - "ns": 102272190459 - } }, - "percpu": {} - } - }, - "namespace": "k8s.io" - }, - "event": { - "dataset": "containerd.cpu", - "duration": 115000, - "module": "containerd" - }, - "metricset": { - "name": "cpu", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "containerd" - } - }, - { - "container": { - "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" - }, - "containerd": { - "cpu": { - "usage": { "cpu": { + "0": { + "ns": 99137817570 + }, + "1": { + "ns": 116475261138 + }, "2": { "ns": 105305653633 - } - }, - "percpu": {} - } - }, - "namespace": "k8s.io" - }, - "event": { - "dataset": "containerd.cpu", - "duration": 115000, - "module": "containerd" - }, - "metricset": { - "name": "cpu", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "containerd" - } - }, - { - "container": { - "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" - }, - "containerd": { - "cpu": { - "usage": { - "cpu": { - "8": { - "ns": 104266711568 - } - }, - "percpu": {} - } - }, - "namespace": "k8s.io" - }, - "event": { - "dataset": "containerd.cpu", - "duration": 115000, - "module": "containerd" - }, - "metricset": { - "name": "cpu", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "containerd" - } - }, - { - "container": { - "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" - }, - "containerd": { - "cpu": { - "usage": { - "cpu": { - "7": { - "ns": 97134782770 - } - }, - "percpu": {} - } - }, - "namespace": "k8s.io" - }, - "event": { - "dataset": "containerd.cpu", - "duration": 115000, - "module": "containerd" - }, - "metricset": { - "name": "cpu", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "containerd" - } - }, - { - "container": { - "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" - }, - "containerd": { - "cpu": { - "usage": { - "cpu": { + }, + "3": { + "ns": 101195506344 + }, + "4": { + "ns": 105731762224 + }, "5": { "ns": 98155683224 - } - }, - "percpu": {} - } - }, - "namespace": "k8s.io" - }, - "event": { - "dataset": "containerd.cpu", - "duration": 115000, - "module": "containerd" - }, - "metricset": { - "name": "cpu", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "containerd" - } - }, - { - "container": { - "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" - }, - "containerd": { - "cpu": { - "usage": { - "cpu": { - "1": { - "ns": 116475261138 - } - }, - "percpu": {} - } - }, - "namespace": "k8s.io" - }, - "event": { - "dataset": "containerd.cpu", - "duration": 115000, - "module": "containerd" - }, - "metricset": { - "name": "cpu", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "containerd" - } - }, - { - "container": { - "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" - }, - "containerd": { - "cpu": { - "usage": { - "cpu": { + }, + "6": { + "ns": 95075348914 + }, + "7": { + "ns": 97134782770 + }, + "8": { + "ns": 104266711568 + }, + "9": { + "ns": 102272190459 + }, "10": { "ns": 106709905770 - } - }, - "percpu": {} - } - }, - "namespace": "k8s.io" - }, - "event": { - "dataset": "containerd.cpu", - "duration": 115000, - "module": "containerd" - }, - "metricset": { - "name": "cpu", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "containerd" - } - }, - { - "container": { - "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" - }, - "containerd": { - "cpu": { - "usage": { - "cpu": { + }, "11": { "ns": 104878380370 } @@ -379,36 +74,5 @@ "address": "127.0.0.1:55555", "type": "containerd" } - }, - { - "container": { - "id": "7434687dbe3684407afa899582f2909203b9dc5537632b512f76798db5c0787d" - }, - "containerd": { - "cpu": { - "usage": { - "cpu": { - "6": { - "ns": 95075348914 - } - }, - "percpu": {} - } - }, - "namespace": "k8s.io" - }, - "event": { - "dataset": "containerd.cpu", - "duration": 115000, - "module": "containerd" - }, - "metricset": { - "name": "cpu", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "containerd" - } } -] +] \ No newline at end of file diff --git a/x-pack/metricbeat/module/containerd/cpu/cpu.go b/x-pack/metricbeat/module/containerd/cpu/cpu.go index 3744f651faa4..ecc238842ef5 100644 --- a/x-pack/metricbeat/module/containerd/cpu/cpu.go +++ b/x-pack/metricbeat/module/containerd/cpu/cpu.go @@ -98,6 +98,45 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { }, nil } +func createDimensionsKey(containerId, namespace string) string { + return fmt.Sprintf("%s-%s", containerId, namespace) +} + +// groupByFields aggregates metrics by their common dimensions into consolidated events. +// It creates a map where each key represents a unique pairing of container ID and namespace, +// ensuring that metrics with identical dimensions are grouped into a single event, +// preventing duplicated documents which would get dropped when TSDB is enabled. +func (m *metricset) groupByFields(events []mapstr.M) map[string][]mapstr.M { + groupedMetrics := make(map[string][]mapstr.M, 0) + + for _, event := range events { + containerID, err := event.GetValue("id") + if err != nil { + continue + } + + namespace, err := event.GetValue("namespace") + if err != nil { + continue + } + + containerIDStr, ok := containerID.(string) + if !ok { + continue + } + + namespaceStr, ok := namespace.(string) + if !ok { + continue + } + + dimensionKey := createDimensionsKey(containerIDStr, namespaceStr) + groupedMetrics[dimensionKey] = append(groupedMetrics[dimensionKey], event) + } + + return groupedMetrics +} + // Fetch gathers information from the containerd and reports events with this information. func (m *metricset) Fetch(reporter mb.ReporterV2) error { families, timestamp, err := m.mod.GetContainerdMetricsFamilies(m.prometheusClient) @@ -110,6 +149,9 @@ func (m *metricset) Fetch(reporter mb.ReporterV2) error { return fmt.Errorf("error getting events: %w", err) } + // Group the events by their common dimensions + grouped := m.groupByFields(events) + perContainerCpus := make(map[string]int) if m.calcPct { for _, event := range events { @@ -120,82 +162,97 @@ func (m *metricset) Fetch(reporter mb.ReporterV2) error { } } - for _, event := range events { - // setting ECS container.id and module field containerd.namespace + // Iterate through each group and consolidate them into a single event per group + for _, group := range grouped { + cID := containerd.GetAndDeleteCid(group[0]) + for _, event := range group { + // setting ECS container.id and module field containerd.namespace + containerFields := mapstr.M{} + if m.calcPct { + contCpus, ok := perContainerCpus[cID] + if !ok { + contCpus = 1 + } + // calculate timestamp delta + timestampDelta := int64(0) + if !m.preTimestamp.IsZero() { + timestampDelta = timestamp.UnixNano() - m.preTimestamp.UnixNano() + } + // Calculate cpu total usage percentage + cpuUsageTotal, err := event.GetValue("usage.total.ns") + if err == nil { + cpuUsageTotalPct := calcUsagePct(timestampDelta, cpuUsageTotal.(float64), + float64(contCpus), cID, m.preContainerCpuTotalUsage) + m.Logger().Debugf("cpuUsageTotalPct for %+v is %+v", cID, cpuUsageTotalPct) + _, _ = event.Put("usage.total.pct", cpuUsageTotalPct) + // Update container.cpu.usage ECS field + _, _ = containerFields.Put("cpu.usage", cpuUsageTotalPct) + // Update values + m.preContainerCpuTotalUsage[cID], _ = cpuUsageTotal.(float64) + } + + // Calculate cpu kernel usage percentage + // If event does not contain usage.kernel.ns skip the calculation (event has only system.total) + cpuUsageKernel, err := event.GetValue("usage.kernel.ns") + if err == nil { + cpuUsageKernelPct := calcUsagePct(timestampDelta, cpuUsageKernel.(float64), + float64(contCpus), cID, m.preContainerCpuKernelUsage) + m.Logger().Debugf("cpuUsageKernelPct for %+v is %+v", cID, cpuUsageKernelPct) + _, _ = event.Put("usage.kernel.pct", cpuUsageKernelPct) + // Update values + m.preContainerCpuKernelUsage[cID], _ = cpuUsageKernel.(float64) + } + + // Calculate cpu user usage percentage + cpuUsageUser, err := event.GetValue("usage.user.ns") + if err == nil { + cpuUsageUserPct := calcUsagePct(timestampDelta, cpuUsageUser.(float64), + float64(contCpus), cID, m.preContainerCpuUserUsage) + m.Logger().Debugf("cpuUsageUserPct for %+v is %+v", cID, cpuUsageUserPct) + _, _ = event.Put("usage.user.pct", cpuUsageUserPct) + // Update values + m.preContainerCpuUserUsage[cID], _ = cpuUsageUser.(float64) + } + } + if cpuId, err := event.GetValue("cpu"); err == nil { + perCpuNs, err := event.GetValue("usage.percpu.ns") + if err == nil { + key := fmt.Sprintf("usage.cpu.%s.ns", cpuId) + _, _ = event.Put(key, perCpuNs) + _ = event.Delete("cpu") + _ = event.Delete("usage.percpu.ns") + } + } + } + containerFields := mapstr.M{} moduleFields := mapstr.M{} rootFields := mapstr.M{} - cID := containerd.GetAndDeleteCid(event) - namespace := containerd.GetAndDeleteNamespace(event) + namespace := containerd.GetAndDeleteNamespace(group[0]) _, _ = containerFields.Put("id", cID) _, _ = rootFields.Put("container", containerFields) _, _ = moduleFields.Put("namespace", namespace) - if m.calcPct { - contCpus, ok := perContainerCpus[cID] - if !ok { - contCpus = 1 - } - // calculate timestamp delta - timestampDelta := int64(0) - if !m.preTimestamp.IsZero() { - timestampDelta = timestamp.UnixNano() - m.preTimestamp.UnixNano() - } - // Calculate cpu total usage percentage - cpuUsageTotal, err := event.GetValue("usage.total.ns") - if err == nil { - cpuUsageTotalPct := calcUsagePct(timestampDelta, cpuUsageTotal.(float64), - float64(contCpus), cID, m.preContainerCpuTotalUsage) - m.Logger().Debugf("cpuUsageTotalPct for %+v is %+v", cID, cpuUsageTotalPct) - _, _ = event.Put("usage.total.pct", cpuUsageTotalPct) - // Update container.cpu.usage ECS field - _, _ = containerFields.Put("cpu.usage", cpuUsageTotalPct) - // Update values - m.preContainerCpuTotalUsage[cID], _ = cpuUsageTotal.(float64) - } + metricsetFields := mapstr.M{} - // Calculate cpu kernel usage percentage - // If event does not contain usage.kernel.ns skip the calculation (event has only system.total) - cpuUsageKernel, err := event.GetValue("usage.kernel.ns") - if err == nil { - cpuUsageKernelPct := calcUsagePct(timestampDelta, cpuUsageKernel.(float64), - float64(contCpus), cID, m.preContainerCpuKernelUsage) - m.Logger().Debugf("cpuUsageKernelPct for %+v is %+v", cID, cpuUsageKernelPct) - _, _ = event.Put("usage.kernel.pct", cpuUsageKernelPct) - // Update values - m.preContainerCpuKernelUsage[cID], _ = cpuUsageKernel.(float64) - } - - // Calculate cpu user usage percentage - cpuUsageUser, err := event.GetValue("usage.user.ns") - if err == nil { - cpuUsageUserPct := calcUsagePct(timestampDelta, cpuUsageUser.(float64), - float64(contCpus), cID, m.preContainerCpuUserUsage) - m.Logger().Debugf("cpuUsageUserPct for %+v is %+v", cID, cpuUsageUserPct) - _, _ = event.Put("usage.user.pct", cpuUsageUserPct) - // Update values - m.preContainerCpuUserUsage[cID], _ = cpuUsageUser.(float64) - } - } - if cpuId, err := event.GetValue("cpu"); err == nil { - perCpuNs, err := event.GetValue("usage.percpu.ns") - if err == nil { - key := fmt.Sprintf("usage.cpu.%s.ns", cpuId) - _, _ = event.Put(key, perCpuNs) - _ = event.Delete("cpu") - _ = event.Delete("usage.percpu.ns") - } + for _, event := range group { + metricsetFields.DeepUpdateNoOverwrite(event) } - reporter.Event(mb.Event{ + ev := mb.Event{ RootFields: rootFields, ModuleFields: moduleFields, - MetricSetFields: event, + MetricSetFields: metricsetFields, Namespace: "containerd.cpu", - }) + } + _ = ev.MetricSetFields.Delete("id") + _ = ev.MetricSetFields.Delete("namespace") + + reporter.Event(ev) } + // set Timestamp of previous event m.preTimestamp = timestamp return nil From 95f0f85a3edd45799b9928ee48fe00be49e09431 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Constan=C3=A7a=20Manteigas?= <113898685+constanca-m@users.noreply.github.com> Date: Mon, 8 Jan 2024 10:47:15 +0100 Subject: [PATCH 030/129] [Filebeat] Fix id for config map (#37545) * Fix config map --- deploy/kubernetes/filebeat-kubernetes.yaml | 2 +- deploy/kubernetes/filebeat/filebeat-configmap.yaml | 2 +- dev-tools/kubernetes/filebeat/manifest.debug.yaml | 2 +- dev-tools/kubernetes/filebeat/manifest.run.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/deploy/kubernetes/filebeat-kubernetes.yaml b/deploy/kubernetes/filebeat-kubernetes.yaml index 6c365ced4cb9..554f89ec3993 100644 --- a/deploy/kubernetes/filebeat-kubernetes.yaml +++ b/deploy/kubernetes/filebeat-kubernetes.yaml @@ -113,7 +113,7 @@ data: filebeat.yml: |- filebeat.inputs: - type: filestream - id: kubernetes-container-logs-${data.kubernetes.pod.name}-${data.kubernetes.container.id} + id: kubernetes-container-logs paths: - /var/log/containers/*.log parsers: diff --git a/deploy/kubernetes/filebeat/filebeat-configmap.yaml b/deploy/kubernetes/filebeat/filebeat-configmap.yaml index 8c2fb6603a48..b5472fa9241b 100644 --- a/deploy/kubernetes/filebeat/filebeat-configmap.yaml +++ b/deploy/kubernetes/filebeat/filebeat-configmap.yaml @@ -9,7 +9,7 @@ data: filebeat.yml: |- filebeat.inputs: - type: filestream - id: kubernetes-container-logs-${data.kubernetes.pod.name}-${data.kubernetes.container.id} + id: kubernetes-container-logs paths: - /var/log/containers/*.log parsers: diff --git a/dev-tools/kubernetes/filebeat/manifest.debug.yaml b/dev-tools/kubernetes/filebeat/manifest.debug.yaml index 36fc03bc559c..97cd538c3eeb 100644 --- a/dev-tools/kubernetes/filebeat/manifest.debug.yaml +++ b/dev-tools/kubernetes/filebeat/manifest.debug.yaml @@ -113,7 +113,7 @@ data: filebeat.yml: |- filebeat.inputs: - type: filestream - id: kubernetes-container-logs-${data.kubernetes.pod.name}-${data.kubernetes.container.id} + id: kubernetes-container-logs paths: - /var/log/containers/*.log parsers: diff --git a/dev-tools/kubernetes/filebeat/manifest.run.yaml b/dev-tools/kubernetes/filebeat/manifest.run.yaml index 2263bdd77e67..b8c9edb75577 100644 --- a/dev-tools/kubernetes/filebeat/manifest.run.yaml +++ b/dev-tools/kubernetes/filebeat/manifest.run.yaml @@ -113,7 +113,7 @@ data: filebeat.yml: |- filebeat.inputs: - type: filestream - id: kubernetes-container-logs-${data.kubernetes.pod.name}-${data.kubernetes.container.id} + id: kubernetes-container-logs paths: - /var/log/containers/*.log parsers: From 32c7343db9e967bc8429b8fb934005ff93e6f637 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 9 Jan 2024 12:30:56 -0500 Subject: [PATCH 031/129] chore: Update snapshot.yml (#37584) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Made with ❤️️ by updatecli Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index a52226b47855..554289517c9a 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-ybxdr713-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-8upz6ftd-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.13.0-ybxdr713-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.13.0-8upz6ftd-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.13.0-ybxdr713-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.13.0-8upz6ftd-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 635b2861d60814cd65dd368c73bcd7c376a60fef Mon Sep 17 00:00:00 2001 From: Denis Date: Wed, 10 Jan 2024 13:34:44 +0000 Subject: [PATCH 032/129] Add test for handling processing errors while publishing events (#37491) There were some instances of this code producing infinite loops. Now there is a test case for the processing pipeline covering this. --- libbeat/publisher/pipeline/client_test.go | 169 ++++++++++++++++++++-- 1 file changed, 153 insertions(+), 16 deletions(-) diff --git a/libbeat/publisher/pipeline/client_test.go b/libbeat/publisher/pipeline/client_test.go index e5fa370e292c..a3f0c822b9e8 100644 --- a/libbeat/publisher/pipeline/client_test.go +++ b/libbeat/publisher/pipeline/client_test.go @@ -19,6 +19,8 @@ package pipeline import ( "context" + "errors" + "io" "sync" "testing" "time" @@ -28,6 +30,7 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/outputs" + "github.com/elastic/beats/v7/libbeat/processors" "github.com/elastic/beats/v7/libbeat/publisher" "github.com/elastic/beats/v7/libbeat/publisher/processing" "github.com/elastic/beats/v7/libbeat/publisher/queue" @@ -35,26 +38,25 @@ import ( "github.com/elastic/beats/v7/libbeat/tests/resources" conf "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent-libs/mapstr" "github.com/elastic/elastic-agent-libs/monitoring" ) -func TestClient(t *testing.T) { - makePipeline := func(settings Settings, qu queue.Queue) *Pipeline { - p, err := New(beat.Info{}, - Monitors{}, - conf.Namespace{}, - outputs.Group{}, - settings, - ) - if err != nil { - panic(err) - } - // Inject a test queue so the outputController doesn't create one - p.outputController.queue = qu +func makePipeline(t *testing.T, settings Settings, qu queue.Queue) *Pipeline { + p, err := New(beat.Info{}, + Monitors{}, + conf.Namespace{}, + outputs.Group{}, + settings, + ) + require.NoError(t, err) + // Inject a test queue so the outputController doesn't create one + p.outputController.queue = qu - return p - } + return p +} +func TestClient(t *testing.T) { t.Run("client close", func(t *testing.T) { // Note: no asserts. If closing fails we have a deadlock, because Publish // would block forever @@ -90,7 +92,7 @@ func TestClient(t *testing.T) { routinesChecker := resources.NewGoroutinesChecker() defer routinesChecker.Check(t) - pipeline := makePipeline(Settings{}, makeTestQueue()) + pipeline := makePipeline(t, Settings{}, makeTestQueue()) defer pipeline.Close() var ctx context.Context @@ -119,6 +121,105 @@ func TestClient(t *testing.T) { }) } }) + + t.Run("no infinite loop when processing fails", func(t *testing.T) { + logp.TestingSetup() + l := logp.L() + + // a small in-memory queue with a very short flush interval + q := memqueue.NewQueue(l, nil, memqueue.Settings{ + Events: 5, + FlushMinEvents: 1, + FlushTimeout: time.Millisecond, + }, 5) + + // model a processor that we're going to make produce errors after + p := &testProcessor{} + ps := testProcessorSupporter{Processor: p} + + // now we create a pipeline that makes sure that all + // events are acked while shutting down + pipeline := makePipeline(t, Settings{ + WaitClose: 100 * time.Millisecond, + WaitCloseMode: WaitOnPipelineClose, + Processors: ps, + }, q) + client, err := pipeline.Connect() + require.NoError(t, err) + defer client.Close() + + // consuming all the published events + var received []beat.Event + done := make(chan struct{}) + go func() { + for { + batch, err := q.Get(2) + if errors.Is(err, io.EOF) { + break + } + assert.NoError(t, err) + if batch == nil { + continue + } + for i := 0; i < batch.Count(); i++ { + e := batch.Entry(i).(publisher.Event) + received = append(received, e.Content) + } + batch.Done() + } + close(done) + }() + + sent := []beat.Event{ + { + Fields: mapstr.M{"number": 1}, + }, + { + Fields: mapstr.M{"number": 2}, + }, + { + Fields: mapstr.M{"number": 3}, + }, + { + Fields: mapstr.M{"number": 4}, + }, + } + + expected := []beat.Event{ + { + Fields: mapstr.M{"number": 1, "test": "value"}, + }, + { + Fields: mapstr.M{"number": 2, "test": "value"}, + }, + // { + // // this event must be excluded due to the processor error + // Fields: mapstr.M{"number": 3}, + // }, + { + Fields: mapstr.M{"number": 4, "test": "value"}, + }, + } + + client.PublishAll(sent[:2]) // first 2 + + // this causes our processor to malfunction and produce errors for all events + p.ErrorSwitch() + + client.PublishAll(sent[2:3]) // number 3 + + // back to normal + p.ErrorSwitch() + + client.PublishAll(sent[3:]) // number 4 + + client.Close() + pipeline.Close() + + // waiting for all events to be consumed from the queue + <-done + require.Equal(t, expected, received) + }) } func TestClientWaitClose(t *testing.T) { @@ -258,3 +359,39 @@ func TestMonitoring(t *testing.T) { assert.Equal(t, int64(batchSize), telemetrySnapshot.Ints["output.batch_size"]) assert.Equal(t, int64(numClients), telemetrySnapshot.Ints["output.clients"]) } + +type testProcessor struct{ error bool } + +func (p *testProcessor) String() string { + return "testProcessor" +} +func (p *testProcessor) Run(in *beat.Event) (event *beat.Event, err error) { + if p.error { + return nil, errors.New("test error") + } + _, err = in.Fields.Put("test", "value") + return in, err +} + +func (p *testProcessor) ErrorSwitch() { + p.error = !p.error +} + +type testProcessorSupporter struct { + beat.Processor +} + +// Create a running processor interface based on the given config +func (p testProcessorSupporter) Create(cfg beat.ProcessingConfig, drop bool) (beat.Processor, error) { + return p.Processor, nil +} + +// Processors returns a list of config strings for the given processor, for debug purposes +func (p testProcessorSupporter) Processors() []string { + return []string{p.Processor.String()} +} + +// Close the processor supporter +func (p testProcessorSupporter) Close() error { + return processors.Close(p.Processor) +} From e501145259de3b4aed3614a6c8c576faa66ac9f2 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 10 Jan 2024 12:33:56 -0500 Subject: [PATCH 033/129] chore: Update snapshot.yml (#37595) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Made with ❤️️ by updatecli Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 554289517c9a..657bf7b6301e 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-8upz6ftd-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-d752tfli-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.13.0-8upz6ftd-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.13.0-d752tfli-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.13.0-8upz6ftd-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.13.0-d752tfli-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 0c73852e6429f68fd6a897a30e734d7deff940a3 Mon Sep 17 00:00:00 2001 From: Fae Charlton Date: Wed, 10 Jan 2024 12:42:52 -0500 Subject: [PATCH 034/129] Document the Elasticsearch output's 'preset' field (#37315) --- libbeat/docs/queueconfig.asciidoc | 3 + .../elasticsearch/docs/elasticsearch.asciidoc | 75 ++++++++++++++++++- 2 files changed, 77 insertions(+), 1 deletion(-) diff --git a/libbeat/docs/queueconfig.asciidoc b/libbeat/docs/queueconfig.asciidoc index f4e2d62c6eae..08ece0f752f5 100644 --- a/libbeat/docs/queueconfig.asciidoc +++ b/libbeat/docs/queueconfig.asciidoc @@ -61,6 +61,7 @@ queue.mem: You can specify the following options in the `queue.mem` section of the +{beatname_lc}.yml+ config file: [float] +[[queue-mem-events-option]] ===== `events` Number of events the queue can store. This value should be evenly divisible by `flush.min_events` to @@ -69,6 +70,7 @@ avoid sending partial batches to the output. The default value is 3200 events. [float] +[[queue-mem-flush-min-events-option]] ===== `flush.min_events` Minimum number of events required for publishing. If this value is set to 0 or 1, events are @@ -80,6 +82,7 @@ sent by the output. The default value is 1600. [float] +[[queue-mem-flush-timeout-option]] ===== `flush.timeout` Maximum wait time for `flush.min_events` to be fulfilled. If set to 0s, events are available to the diff --git a/libbeat/outputs/elasticsearch/docs/elasticsearch.asciidoc b/libbeat/outputs/elasticsearch/docs/elasticsearch.asciidoc index 1b84948b2779..046c45a34dc1 100644 --- a/libbeat/outputs/elasticsearch/docs/elasticsearch.asciidoc +++ b/libbeat/outputs/elasticsearch/docs/elasticsearch.asciidoc @@ -98,6 +98,7 @@ output.elasticsearch: In the previous example, the Elasticsearch nodes are available at `https://10.45.3.2:9220/elasticsearch` and `https://10.45.3.1:9230/elasticsearch`. +[[compression-level-option]] ===== `compression_level` The gzip compression level. Setting this value to `0` disables compression. @@ -114,6 +115,7 @@ Configure escaping of HTML in strings. Set to `true` to enable escaping. The default value is `false`. +[[worker-option]] ===== `worker` The number of workers per configured host publishing events to Elasticsearch. This @@ -659,6 +661,7 @@ The default is 3. endif::[] +[[bulk-max-size-option]] ===== `bulk_max_size` The maximum number of events to bulk in a single Elasticsearch bulk API index request. The default is 1600. @@ -691,6 +694,7 @@ default is `1s`. The maximum number of seconds to wait before attempting to connect to Elasticsearch after a network error. The default is `60s`. +[[idle-connection-timeout-option]] ===== `idle_connection_timeout` The maximum amount of time an idle connection will remain idle before closing itself. @@ -701,7 +705,7 @@ The default is 3s. The http request timeout in seconds for the Elasticsearch request. The default is 90. -==== `allow_older_versions` +===== `allow_older_versions` By default, {beatname_uc} expects the Elasticsearch instance to be on the same or newer version to provide optimal experience. We suggest you connect to the same version to make sure all features {beatname_uc} is using are @@ -759,6 +763,75 @@ output.elasticsearch: index: "my-dead-letter-index" ------------------------------------------------------------------------------ +===== `preset` + +The performance preset to apply to the output configuration. + +["source","yaml"] +------------------------------------------------------------------------------ +output.elasticsearch: + hosts: ["http://localhost:9200"] + preset: balanced +------------------------------------------------------------------------------ + +Performance presets apply a set of configuration overrides based on a desired performance goal. If set, a performance preset will override other configuration flags to match the recommended settings for that preset. Valid options are: +* `balanced`: good starting point for general efficiency +* `throughput`: good for high data volumes, may increase cpu and memory requirements +* `scale`: reduces ambient resource use in large low-throughput deployments +* `latency`: minimize the time for fresh data to become visible in Elasticsearch +* `custom`: apply user configuration directly with no overrides + +The default if unspecified is `custom`. + +Presets represent current recommendations based on the intended goal; their effect may change between versions to better suit those goals. Currently the presets have the following effects: + +[cols="2,1,1,1,1"] +|=== +|preset |balanced |throughput |scale |latency + +|<> +|1600 +|1600 +|1600 +|50 + +|<> +|1 +|4 +|1 +|1 + +|<> +|3200 +|12800 +|3200 +|4100 + +|<> +|1600 +|1600 +|1600 +|2050 + +|<> +|`10s` +|`5s` +|`20s` +|`1s` + +|<> +|1 +|1 +|1 +|1 + +|<> +|`3s` +|`15s` +|`1s` +|`60s` +|=== + [[es-apis]] ==== Elasticsearch APIs {beatname_uc} will use the `_bulk` API from {es}, the events are sent From fffe228735005439f684e4bc28fdbfbed399b9a4 Mon Sep 17 00:00:00 2001 From: kaiyan-sheng Date: Wed, 10 Jan 2024 16:43:30 -0700 Subject: [PATCH 035/129] update apache/arrow library to latest to fix slice bounds out of range (#37588) --- NOTICE.txt | 2295 +++++++++++++++-- go.mod | 43 +- go.sum | 98 +- x-pack/libbeat/reader/parquet/parquet.go | 8 +- x-pack/libbeat/reader/parquet/parquet_test.go | 10 +- 5 files changed, 2129 insertions(+), 325 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index 1f4940889c6a..5bffb7c1b677 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -11,11 +11,11 @@ Third party libraries used by the Elastic Beats project: -------------------------------------------------------------------------------- Dependency : cloud.google.com/go -Version: v0.110.4 +Version: v0.110.8 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go@v0.110.4/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go@v0.110.8/LICENSE: Apache License @@ -223,11 +223,11 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go@v0.110.4/LICEN -------------------------------------------------------------------------------- Dependency : cloud.google.com/go/bigquery -Version: v1.52.0 +Version: v1.55.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/bigquery@v1.52.0/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/bigquery@v1.55.0/LICENSE: Apache License @@ -435,11 +435,11 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/bigquery@v1.52 -------------------------------------------------------------------------------- Dependency : cloud.google.com/go/compute -Version: v1.21.0 +Version: v1.23.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/compute@v1.21.0/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/compute@v1.23.0/LICENSE: Apache License @@ -647,11 +647,11 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/compute@v1.21. -------------------------------------------------------------------------------- Dependency : cloud.google.com/go/monitoring -Version: v1.15.1 +Version: v1.16.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/monitoring@v1.15.1/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/monitoring@v1.16.0/LICENSE: Apache License @@ -859,11 +859,11 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/monitoring@v1. -------------------------------------------------------------------------------- Dependency : cloud.google.com/go/pubsub -Version: v1.32.0 +Version: v1.33.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/pubsub@v1.32.0/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/pubsub@v1.33.0/LICENSE: Apache License @@ -2969,12 +2969,12 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- -Dependency : github.com/apache/arrow/go/v12 -Version: v12.0.1 +Dependency : github.com/apache/arrow/go/v14 +Version: v14.0.2 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/apache/arrow/go/v12@v12.0.1/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/apache/arrow/go/v14@v14.0.2/LICENSE.txt: Apache License @@ -4659,17 +4659,6 @@ for PyArrow. Ibis is released under the Apache License, Version 2.0. -------------------------------------------------------------------------------- -This project includes code from the autobrew project. - -* r/tools/autobrew and dev/tasks/homebrew-formulae/autobrew/apache-arrow.rb - are based on code from the autobrew project. - -Copyright (c) 2019, Jeroen Ooms -License: MIT -Homepage: https://github.com/jeroen/autobrew - --------------------------------------------------------------------------------- - dev/tasks/homebrew-formulae/apache-arrow.rb has the following license: BSD 2-Clause License @@ -15845,11 +15834,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/fatih/color -Version: v1.13.0 +Version: v1.15.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/fatih/color@v1.13.0/LICENSE.md: +Contents of probable licence file $GOMODCACHE/github.com/fatih/color@v1.15.0/LICENSE.md: The MIT License (MIT) @@ -17792,11 +17781,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/google/flatbuffers -Version: v23.3.3+incompatible +Version: v23.5.26+incompatible Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/google/flatbuffers@v23.3.3+incompatible/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/google/flatbuffers@v23.5.26+incompatible/LICENSE: Apache License @@ -18079,11 +18068,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/google/uuid -Version: v1.3.0 +Version: v1.3.1 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/google/uuid@v1.3.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/google/uuid@v1.3.1/LICENSE: Copyright (c) 2009,2014 Google Inc. All rights reserved. @@ -18116,11 +18105,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/googleapis/gax-go/v2 -Version: v2.11.0 +Version: v2.12.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/googleapis/gax-go/v2@v2.11.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/googleapis/gax-go/v2@v2.12.0/LICENSE: Copyright 2016, Google Inc. All rights reserved. @@ -20635,11 +20624,11 @@ Contents of probable licence file $GOMODCACHE/github.com/magefile/mage@v1.15.0/L -------------------------------------------------------------------------------- Dependency : github.com/mattn/go-colorable -Version: v0.1.12 +Version: v0.1.13 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/mattn/go-colorable@v0.1.12/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/mattn/go-colorable@v0.1.13/LICENSE: The MIT License (MIT) @@ -21209,11 +21198,11 @@ THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/pierrec/lz4/v4 -Version: v4.1.16 +Version: v4.1.18 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/pierrec/lz4/v4@v4.1.16/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/pierrec/lz4/v4@v4.1.18/LICENSE: Copyright (c) 2015, Pierre Curto All rights reserved. @@ -25070,11 +25059,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : google.golang.org/api -Version: v0.126.0 +Version: v0.128.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/api@v0.126.0/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/api@v0.128.0/LICENSE: Copyright (c) 2011 Google Inc. All rights reserved. @@ -25107,11 +25096,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : google.golang.org/genproto/googleapis/api -Version: v0.0.0-20230711160842-782d3b101e98 +Version: v0.0.0-20230913181813-007df8e322eb Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/genproto/googleapis/api@v0.0.0-20230711160842-782d3b101e98/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/genproto/googleapis/api@v0.0.0-20230913181813-007df8e322eb/LICENSE: Apache License @@ -27652,11 +27641,11 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/compute/metada -------------------------------------------------------------------------------- Dependency : cloud.google.com/go/datacatalog -Version: v1.14.1 +Version: v1.17.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/datacatalog@v1.14.1/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/datacatalog@v1.17.1/LICENSE: Apache License @@ -27864,11 +27853,11 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/datacatalog@v1 -------------------------------------------------------------------------------- Dependency : cloud.google.com/go/iam -Version: v1.1.1 +Version: v1.1.2 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/iam@v1.1.1/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/iam@v1.1.2/LICENSE: Apache License @@ -28076,11 +28065,11 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/iam@v1.1.1/LIC -------------------------------------------------------------------------------- Dependency : cloud.google.com/go/kms -Version: v1.12.1 +Version: v1.15.2 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/kms@v1.12.1/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/kms@v1.15.2/LICENSE: Apache License @@ -31354,12 +31343,12 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- -Dependency : github.com/apache/thrift -Version: v0.19.0 +Dependency : github.com/apache/arrow/go/v12 +Version: v12.0.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/apache/thrift@v0.19.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/apache/arrow/go/v12@v12.0.0/LICENSE.txt: Apache License @@ -31564,129 +31553,269 @@ Contents of probable licence file $GOMODCACHE/github.com/apache/thrift@v0.19.0/L See the License for the specific language governing permissions and limitations under the License. --------------------------------------------------- -SOFTWARE DISTRIBUTED WITH THRIFT: +-------------------------------------------------------------------------------- -The Apache Thrift software includes a number of subcomponents with -separate copyright notices and license terms. Your use of the source -code for the these subcomponents is subject to the terms and -conditions of the following licenses. +src/arrow/util (some portions): Apache 2.0, and 3-clause BSD --------------------------------------------------- -Portions of the following files are licensed under the MIT License: +Some portions of this module are derived from code in the Chromium project, +copyright (c) Google inc and (c) The Chromium Authors and licensed under the +Apache 2.0 License or the under the 3-clause BSD license: - lib/erl/src/Makefile.am + Copyright (c) 2013 The Chromium Authors. All rights reserved. -Please see doc/otp-base-license.txt for the full terms of this license. + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: --------------------------------------------------- -For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. -# Copyright (c) 2007 Thomas Porschberg -# -# Copying and distribution of this file, with or without -# modification, are permitted in any medium without royalty provided -# the copyright notice and this notice are preserved. + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------- -For the lib/nodejs/lib/thrift/json_parse.js: +-------------------------------------------------------------------------------- -/* - json_parse.js - 2015-05-02 - Public Domain. - NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +This project includes code from Daniel Lemire's FrameOfReference project. -*/ -(By Douglas Crockford ) +https://github.com/lemire/FrameOfReference/blob/6ccaf9e97160f9a3b299e23a8ef739e711ef0c71/src/bpacking.cpp --------------------------------------------------- -For lib/cpp/src/thrift/windows/SocketPair.cpp +Copyright: 2013 Daniel Lemire +Home page: http://lemire.me/en/ +Project page: https://github.com/lemire/FrameOfReference +License: Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 -/* socketpair.c - * Copyright 2007 by Nathan C. Myers ; some rights reserved. - * This code is Free Software. It may be copied freely, in original or - * modified form, subject only to the restrictions that (1) the author is - * relieved from all responsibilities for any use for any purpose, and (2) - * this copyright notice must be retained, unchanged, in its entirety. If - * for any reason the author might be held responsible for any consequences - * of copying or use, license is withheld. - */ +-------------------------------------------------------------------------------- +This project includes code from the TensorFlow project --------------------------------------------------- -For lib/py/compat/win32/stdint.h +Copyright 2015 The TensorFlow Authors. All Rights Reserved. -// ISO C9x compliant stdint.h for Microsoft Visual Studio -// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 -// -// Copyright (c) 2006-2008 Alexander Chemeris -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. The name of the author may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED -// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; -// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -/////////////////////////////////////////////////////////////////////////////// +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 --------------------------------------------------- -Codegen template in t_html_generator.h +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. -* Bootstrap v2.0.3 -* -* Copyright 2012 Twitter, Inc -* Licensed under the Apache License v2.0 -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Designed and built with all the love in the world @twitter by @mdo and @fat. +-------------------------------------------------------------------------------- ---------------------------------------------------- -For t_cl_generator.cc +This project includes code from the NumPy project. - * Copyright (c) 2008- Patrick Collison - * Copyright (c) 2006- Facebook +https://github.com/numpy/numpy/blob/e1f191c46f2eebd6cb892a4bfe14d9dd43a06c4e/numpy/core/src/multiarray/multiarraymodule.c#L2910 ---------------------------------------------------- +https://github.com/numpy/numpy/blob/68fd82271b9ea5a9e50d4e761061dfcca851382a/numpy/core/src/multiarray/datetime.c + +Copyright (c) 2005-2017, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- -Dependency : github.com/poy/eachers -Version: v0.0.0-20181020210610-23942921fe77 -Licence type (autodetected): MIT + +This project includes code from the Boost project + +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/poy/eachers@v0.0.0-20181020210610-23942921fe77/LICENSE.md: +This project includes code from the FlatBuffers project -The MIT License (MIT) +Copyright 2014 Google Inc. -Copyright (c) 2016 Andrew Poydence +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +This project includes code from the tslib project + +Copyright 2015 Microsoft Corporation. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +This project includes code from the jemalloc project + +https://github.com/jemalloc/jemalloc + +Copyright (C) 2002-2017 Jason Evans . +All rights reserved. +Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. +Copyright (C) 2009-2017 Facebook, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +1. Redistributions of source code must retain the above copyright notice(s), + this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice(s), + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- + +This project includes code from the Go project, BSD 3-clause license + PATENTS +weak patent termination clause +(https://github.com/golang/go/blob/master/PATENTS). + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +This project includes code from the hs2client + +https://github.com/cloudera/hs2client + +Copyright 2016 Cloudera Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +The script ci/scripts/util_wait_for_it.sh has the following license + +Copyright (c) 2016 Giles Hall + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. @@ -31699,126 +31828,1798 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - --------------------------------------------------------------------------------- -Dependency : github.com/armon/go-radix -Version: v1.0.0 -Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/armon/go-radix@v1.0.0/LICENSE: - -The MIT License (MIT) +The script r/configure has the following license (MIT) -Copyright (c) 2014 Armon Dadgar +Copyright (c) 2017, Jeroen Ooms and Jim Hester Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go -Version: v1.38.60 -Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go@v1.38.60/LICENSE.txt: +cpp/src/arrow/util/logging.cc, cpp/src/arrow/util/logging.h and +cpp/src/arrow/util/logging-test.cc are adapted from +Ray Project (https://github.com/ray-project/ray) (Apache 2.0). +Copyright (c) 2016 Ray Project (https://github.com/ray-project/ray) - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + http://www.apache.org/licenses/LICENSE-2.0 - 1. Definitions. +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. +-------------------------------------------------------------------------------- +The files cpp/src/arrow/vendored/datetime/date.h, cpp/src/arrow/vendored/datetime/tz.h, +cpp/src/arrow/vendored/datetime/tz_private.h, cpp/src/arrow/vendored/datetime/ios.h, +cpp/src/arrow/vendored/datetime/ios.mm, +cpp/src/arrow/vendored/datetime/tz.cpp are adapted from +Howard Hinnant's date library (https://github.com/HowardHinnant/date) +It is licensed under MIT license. - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. +The MIT License (MIT) +Copyright (c) 2015, 2016, 2017 Howard Hinnant +Copyright (c) 2016 Adrian Colomitchi +Copyright (c) 2017 Florian Dang +Copyright (c) 2017 Paul Thompson +Copyright (c) 2018 Tomasz Kamiński - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. +-------------------------------------------------------------------------------- - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). +The file cpp/src/arrow/util/utf8.h includes code adapted from the page + https://bjoern.hoehrmann.de/utf-8/decoder/dfa/ +with the following license (MIT) - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. +Copyright (c) 2008-2009 Bjoern Hoehrmann - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) +-------------------------------------------------------------------------------- + +The file cpp/src/arrow/vendored/string_view.hpp has the following license + +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/xxhash/ have the following license +(BSD 2-Clause License) + +xxHash Library +Copyright (c) 2012-2014, Yann Collet +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +You can contact the author at : +- xxHash homepage: http://www.xxhash.com +- xxHash source repository : https://github.com/Cyan4973/xxHash + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/double-conversion/ have the following license +(BSD 3-Clause License) + +Copyright 2006-2011, the V8 project authors. All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/uriparser/ have the following license +(BSD 3-Clause License) + +uriparser - RFC 3986 URI parsing library + +Copyright (C) 2007, Weijia Song +Copyright (C) 2007, Sebastian Pipping +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + * Redistributions of source code must retain the above + copyright notice, this list of conditions and the following + disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials + provided with the distribution. + + * Neither the name of the nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +The files under dev/tasks/conda-recipes have the following license + +BSD 3-clause license +Copyright (c) 2015-2018, conda-forge +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +The files in cpp/src/arrow/vendored/utf8cpp/ have the following license + +Copyright 2006 Nemanja Trifunovic + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +This project includes code from Apache Kudu. + + * cpp/cmake_modules/CompilerInfo.cmake is based on Kudu's cmake_modules/CompilerInfo.cmake + +Copyright: 2016 The Apache Software Foundation. +Home page: https://kudu.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from Apache Impala (incubating), formerly +Impala. The Impala code and rights were donated to the ASF as part of the +Incubator process after the initial code imports into Apache Parquet. + +Copyright: 2012 Cloudera, Inc. +Copyright: 2016 The Apache Software Foundation. +Home page: http://impala.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from Apache Aurora. + +* dev/release/{release,changelog,release-candidate} are based on the scripts from + Apache Aurora + +Copyright: 2016 The Apache Software Foundation. +Home page: https://aurora.apache.org/ +License: http://www.apache.org/licenses/LICENSE-2.0 + +-------------------------------------------------------------------------------- + +This project includes code from the Google styleguide. + +* cpp/build-support/cpplint.py is based on the scripts from the Google styleguide. + +Copyright: 2009 Google Inc. All rights reserved. +Homepage: https://github.com/google/styleguide +License: 3-clause BSD + +-------------------------------------------------------------------------------- + +This project includes code from Snappy. + +* cpp/cmake_modules/{SnappyCMakeLists.txt,SnappyConfig.h} are based on code + from Google's Snappy project. + +Copyright: 2009 Google Inc. All rights reserved. +Homepage: https://github.com/google/snappy +License: 3-clause BSD + +-------------------------------------------------------------------------------- + +This project includes code from the manylinux project. + +* python/manylinux1/scripts/{build_python.sh,python-tag-abi-tag.py, + requirements.txt} are based on code from the manylinux project. + +Copyright: 2016 manylinux +Homepage: https://github.com/pypa/manylinux +License: The MIT License (MIT) + +-------------------------------------------------------------------------------- + +This project includes code from the cymove project: + +* python/pyarrow/includes/common.pxd includes code from the cymove project + +The MIT License (MIT) +Copyright (c) 2019 Omer Ozarslan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE +OR OTHER DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +The projects includes code from the Ursabot project under the dev/archery +directory. + +License: BSD 2-Clause + +Copyright 2019 RStudio, Inc. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +This project include code from CMake. + +* cpp/cmake_modules/FindGTest.cmake is based on code from CMake. + +Copyright: Copyright 2000-2019 Kitware, Inc. and Contributors +Homepage: https://gitlab.kitware.com/cmake/cmake +License: 3-clause BSD + +-------------------------------------------------------------------------------- + +This project include code from mingw-w64. + +* cpp/src/arrow/util/cpu-info.cc has a polyfill for mingw-w64 < 5 + +Copyright (c) 2009 - 2013 by the mingw-w64 project +Homepage: https://mingw-w64.org +License: Zope Public License (ZPL) Version 2.1. + +--------------------------------------------------------------------------------- + +This project include code from Google's Asylo project. + +* cpp/src/arrow/result.h is based on status_or.h + +Copyright (c) Copyright 2017 Asylo authors +Homepage: https://asylo.dev/ +License: Apache 2.0 + +-------------------------------------------------------------------------------- + +This project includes code from Google's protobuf project + +* cpp/src/arrow/result.h ARROW_ASSIGN_OR_RAISE is based off ASSIGN_OR_RETURN + +Copyright 2008 Google Inc. All rights reserved. +Homepage: https://developers.google.com/protocol-buffers/ +License: + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Code generated by the Protocol Buffer compiler is owned by the owner +of the input file used when generating it. This code is not +standalone and requires a support library to be linked with it. This +support library is itself covered by the above license. + +-------------------------------------------------------------------------------- + +3rdparty dependency LLVM is statically linked in certain binary distributions. +Additionally some sections of source code have been derived from sources in LLVM +and have been clearly labeled as such. LLVM has the following license: + +============================================================================== +LLVM Release License +============================================================================== +University of Illinois/NCSA +Open Source License + +Copyright (c) 2003-2018 University of Illinois at Urbana-Champaign. +All rights reserved. + +Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal with +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimers. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimers in the + documentation and/or other materials provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois at + Urbana-Champaign, nor the names of its contributors may be used to + endorse or promote products derived from this Software without specific + prior written permission. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE +SOFTWARE. + +============================================================================== +Copyrights and Licenses for Third Party Software Distributed with LLVM: +============================================================================== +The LLVM software contains code written by third parties. Such software will +have its own individual LICENSE.TXT file in the directory in which it appears. +This file will describe the copyrights, license, and restrictions which apply +to that code. + +The disclaimer of warranty in the University of Illinois Open Source License +applies to all code in the LLVM Distribution, and nothing in any of the +other licenses gives permission to use the names of the LLVM Team or the +University of Illinois to endorse or promote products derived from this +Software. + +The following pieces of software have additional or alternate copyrights, +licenses, and/or restrictions: + +Program Directory +------- --------- +Google Test llvm/utils/unittest/googletest +OpenBSD regex llvm/lib/Support/{reg*, COPYRIGHT.regex} +pyyaml tests llvm/test/YAMLParser/{*.data, LICENSE.TXT} +ARM contributions llvm/lib/Target/ARM/LICENSE.TXT +md5 contributions llvm/lib/Support/MD5.cpp llvm/include/llvm/Support/MD5.h + +-------------------------------------------------------------------------------- + +3rdparty dependency gRPC is statically linked in certain binary +distributions, like the python wheels. gRPC has the following license: + +Copyright 2014 gRPC authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +3rdparty dependency Apache Thrift is statically linked in certain binary +distributions, like the python wheels. Apache Thrift has the following license: + +Apache Thrift +Copyright (C) 2006 - 2019, The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +3rdparty dependency Apache ORC is statically linked in certain binary +distributions, like the python wheels. Apache ORC has the following license: + +Apache ORC +Copyright 2013-2019 The Apache Software Foundation + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by Hewlett-Packard: +(c) Copyright [2014-2015] Hewlett-Packard Development Company, L.P + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +-------------------------------------------------------------------------------- + +3rdparty dependency zstd is statically linked in certain binary +distributions, like the python wheels. ZSTD has the following license: + +BSD License + +For Zstandard software + +Copyright (c) 2016-present, Facebook, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name Facebook nor the names of its contributors may be used to + endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency lz4 is statically linked in certain binary +distributions, like the python wheels. lz4 has the following license: + +LZ4 Library +Copyright (c) 2011-2016, Yann Collet +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency Brotli is statically linked in certain binary +distributions, like the python wheels. Brotli has the following license: + +Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +-------------------------------------------------------------------------------- + +3rdparty dependency snappy is statically linked in certain binary +distributions, like the python wheels. snappy has the following license: + +Copyright 2011, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Google Inc. nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=== + +Some of the benchmark data in testdata/ is licensed differently: + + - fireworks.jpeg is Copyright 2013 Steinar H. Gunderson, and + is licensed under the Creative Commons Attribution 3.0 license + (CC-BY-3.0). See https://creativecommons.org/licenses/by/3.0/ + for more information. + + - kppkn.gtb is taken from the Gaviota chess tablebase set, and + is licensed under the MIT License. See + https://sites.google.com/site/gaviotachessengine/Home/endgame-tablebases-1 + for more information. + + - paper-100k.pdf is an excerpt (bytes 92160 to 194560) from the paper + “Combinatorial Modeling of Chromatin Features Quantitatively Predicts DNA + Replication Timing in _Drosophila_” by Federico Comoglio and Renato Paro, + which is licensed under the CC-BY license. See + http://www.ploscompbiol.org/static/license for more ifnormation. + + - alice29.txt, asyoulik.txt, plrabn12.txt and lcet10.txt are from Project + Gutenberg. The first three have expired copyrights and are in the public + domain; the latter does not have expired copyright, but is still in the + public domain according to the license information + (http://www.gutenberg.org/ebooks/53). + +-------------------------------------------------------------------------------- + +3rdparty dependency gflags is statically linked in certain binary +distributions, like the python wheels. gflags has the following license: + +Copyright (c) 2006, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency glog is statically linked in certain binary +distributions, like the python wheels. glog has the following license: + +Copyright (c) 2008, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +A function gettimeofday in utilities.cc is based on + +http://www.google.com/codesearch/p?hl=en#dR3YEbitojA/COPYING&q=GetSystemTimeAsFileTime%20license:bsd + +The license of this code is: + +Copyright (c) 2003-2008, Jouni Malinen and contributors +All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name(s) of the above-listed copyright holder(s) nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency re2 is statically linked in certain binary +distributions, like the python wheels. re2 has the following license: + +Copyright (c) 2009 The RE2 Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of Google Inc. nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +3rdparty dependency c-ares is statically linked in certain binary +distributions, like the python wheels. c-ares has the following license: + +# c-ares license + +Copyright (c) 2007 - 2018, Daniel Stenberg with many contributors, see AUTHORS +file. + +Copyright 1998 by the Massachusetts Institute of Technology. + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, provided that +the above copyright notice appear in all copies and that both that copyright +notice and this permission notice appear in supporting documentation, and that +the name of M.I.T. not be used in advertising or publicity pertaining to +distribution of the software without specific, written prior permission. +M.I.T. makes no representations about the suitability of this software for any +purpose. It is provided "as is" without express or implied warranty. + +-------------------------------------------------------------------------------- + +3rdparty dependency zlib is redistributed as a dynamically linked shared +library in certain binary distributions, like the python wheels. In the future +this will likely change to static linkage. zlib has the following license: + +zlib.h -- interface of the 'zlib' general purpose compression library + version 1.2.11, January 15th, 2017 + + Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + +-------------------------------------------------------------------------------- + +3rdparty dependency openssl is redistributed as a dynamically linked shared +library in certain binary distributions, like the python wheels. openssl +preceding version 3 has the following license: + + LICENSE ISSUES + ============== + + The OpenSSL toolkit stays under a double license, i.e. both the conditions of + the OpenSSL License and the original SSLeay license apply to the toolkit. + See below for the actual license texts. + + OpenSSL License + --------------- + +/* ==================================================================== + * Copyright (c) 1998-2019 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * openssl-core@openssl.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.openssl.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes cryptographic software written by Eric Young + * (eay@cryptsoft.com). This product includes software written by Tim + * Hudson (tjh@cryptsoft.com). + * + */ + + Original SSLeay License + ----------------------- + +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +-------------------------------------------------------------------------------- + +This project includes code from the rtools-backports project. + +* ci/scripts/PKGBUILD and ci/scripts/r_windows_build.sh are based on code + from the rtools-backports project. + +Copyright: Copyright (c) 2013 - 2019, Алексей and Jeroen Ooms. +All rights reserved. +Homepage: https://github.com/r-windows/rtools-backports +License: 3-clause BSD + +-------------------------------------------------------------------------------- + +Some code from pandas has been adapted for the pyarrow codebase. pandas is +available under the 3-clause BSD license, which follows: + +pandas license +============== + +Copyright (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team +All rights reserved. + +Copyright (c) 2008-2011 AQR Capital Management, LLC +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the copyright holder nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------------------- + +Some bits from DyND, in particular aspects of the build system, have been +adapted from libdynd and dynd-python under the terms of the BSD 2-clause +license + +The BSD 2-Clause License + + Copyright (C) 2011-12, Dynamic NDArray Developers + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Dynamic NDArray Developers list: + + * Mark Wiebe + * Continuum Analytics + +-------------------------------------------------------------------------------- + +Some source code from Ibis (https://github.com/cloudera/ibis) has been adapted +for PyArrow. Ibis is released under the Apache License, Version 2.0. + +-------------------------------------------------------------------------------- + +This project includes code from the autobrew project. + +* r/tools/autobrew and dev/tasks/homebrew-formulae/autobrew/apache-arrow.rb + are based on code from the autobrew project. + +Copyright (c) 2019, Jeroen Ooms +License: MIT +Homepage: https://github.com/jeroen/autobrew + +-------------------------------------------------------------------------------- + +dev/tasks/homebrew-formulae/apache-arrow.rb has the following license: + +BSD 2-Clause License + +Copyright (c) 2009-present, Homebrew contributors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---------------------------------------------------------------------- + +cpp/src/arrow/vendored/base64.cpp has the following license + +ZLIB License + +Copyright (C) 2004-2017 René Nyffenegger + +This source code is provided 'as-is', without any express or implied +warranty. In no event will the author be held liable for any damages arising +from the use of this software. + +Permission is granted to anyone to use this software for any purpose, including +commercial applications, and to alter it and redistribute it freely, subject to +the following restrictions: + +1. The origin of this source code must not be misrepresented; you must not + claim that you wrote the original source code. If you use this source code + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + +2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original source code. + +3. This notice may not be removed or altered from any source distribution. + +René Nyffenegger rene.nyffenegger@adp-gmbh.ch + +-------------------------------------------------------------------------------- + +The file cpp/src/arrow/vendored/optional.hpp has the following license + +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +-------------------------------------------------------------------------------- + +The file cpp/src/arrow/vendored/musl/strptime.c has the following license + +Copyright © 2005-2020 Rich Felker, et al. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/apache/thrift +Version: v0.19.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/apache/thrift@v0.19.0/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------- +SOFTWARE DISTRIBUTED WITH THRIFT: + +The Apache Thrift software includes a number of subcomponents with +separate copyright notices and license terms. Your use of the source +code for the these subcomponents is subject to the terms and +conditions of the following licenses. + +-------------------------------------------------- +Portions of the following files are licensed under the MIT License: + + lib/erl/src/Makefile.am + +Please see doc/otp-base-license.txt for the full terms of this license. + +-------------------------------------------------- +For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: + +# Copyright (c) 2007 Thomas Porschberg +# +# Copying and distribution of this file, with or without +# modification, are permitted in any medium without royalty provided +# the copyright notice and this notice are preserved. + +-------------------------------------------------- +For the lib/nodejs/lib/thrift/json_parse.js: + +/* + json_parse.js + 2015-05-02 + Public Domain. + NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. + +*/ +(By Douglas Crockford ) + +-------------------------------------------------- +For lib/cpp/src/thrift/windows/SocketPair.cpp + +/* socketpair.c + * Copyright 2007 by Nathan C. Myers ; some rights reserved. + * This code is Free Software. It may be copied freely, in original or + * modified form, subject only to the restrictions that (1) the author is + * relieved from all responsibilities for any use for any purpose, and (2) + * this copyright notice must be retained, unchanged, in its entirety. If + * for any reason the author might be held responsible for any consequences + * of copying or use, license is withheld. + */ + + +-------------------------------------------------- +For lib/py/compat/win32/stdint.h + +// ISO C9x compliant stdint.h for Microsoft Visual Studio +// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 +// +// Copyright (c) 2006-2008 Alexander Chemeris +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. The name of the author may be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////////// + + +-------------------------------------------------- +Codegen template in t_html_generator.h + +* Bootstrap v2.0.3 +* +* Copyright 2012 Twitter, Inc +* Licensed under the Apache License v2.0 +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Designed and built with all the love in the world @twitter by @mdo and @fat. + +--------------------------------------------------- +For t_cl_generator.cc + + * Copyright (c) 2008- Patrick Collison + * Copyright (c) 2006- Facebook + +--------------------------------------------------- + + +-------------------------------------------------------------------------------- +Dependency : github.com/poy/eachers +Version: v0.0.0-20181020210610-23942921fe77 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/poy/eachers@v0.0.0-20181020210610-23942921fe77/LICENSE.md: + +The MIT License (MIT) + +Copyright (c) 2016 Andrew Poydence + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/armon/go-radix +Version: v1.0.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/armon/go-radix@v1.0.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2014 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/aws/aws-sdk-go +Version: v1.38.60 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go@v1.38.60/LICENSE.txt: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work @@ -39244,11 +41045,11 @@ Contents of probable licence file $GOMODCACHE/github.com/google/shlex@v0.0.0-201 -------------------------------------------------------------------------------- Dependency : github.com/googleapis/enterprise-certificate-proxy -Version: v0.2.3 +Version: v0.2.4 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/googleapis/enterprise-certificate-proxy@v0.2.3/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/googleapis/enterprise-certificate-proxy@v0.2.4/LICENSE: Apache License @@ -43470,11 +45271,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/klauspost/compress -Version: v1.16.5 +Version: v1.16.7 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/klauspost/compress@v1.16.5/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/klauspost/compress@v1.16.7/LICENSE: Copyright (c) 2012 The Go Authors. All rights reserved. Copyright (c) 2019 Klaus Post. All rights reserved. @@ -43784,11 +45585,11 @@ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLI -------------------------------------------------------------------------------- Dependency : github.com/klauspost/cpuid/v2 -Version: v2.0.9 +Version: v2.2.5 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/klauspost/cpuid/v2@v2.0.9/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/klauspost/cpuid/v2@v2.2.5/LICENSE: The MIT License (MIT) @@ -44415,11 +46216,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/mattn/go-isatty -Version: v0.0.17 +Version: v0.0.19 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/mattn/go-isatty@v0.0.17/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/mattn/go-isatty@v0.0.19/LICENSE: Copyright (c) Yasuhiro MATSUMOTO @@ -51398,11 +53199,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : gonum.org/v1/gonum -Version: v0.11.0 +Version: v0.12.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/gonum.org/v1/gonum@v0.11.0/LICENSE: +Contents of probable licence file $GOMODCACHE/gonum.org/v1/gonum@v0.12.0/LICENSE: Copyright ©2013 The Gonum Authors. All rights reserved. @@ -51642,11 +53443,11 @@ Contents of probable licence file $GOMODCACHE/google.golang.org/appengine@v1.6.7 -------------------------------------------------------------------------------- Dependency : google.golang.org/genproto -Version: v0.0.0-20230711160842-782d3b101e98 +Version: v0.0.0-20230920204549-e6e6cdab5c13 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/genproto@v0.0.0-20230711160842-782d3b101e98/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/genproto@v0.0.0-20230920204549-e6e6cdab5c13/LICENSE: Apache License @@ -51854,11 +53655,11 @@ Contents of probable licence file $GOMODCACHE/google.golang.org/genproto@v0.0.0- -------------------------------------------------------------------------------- Dependency : google.golang.org/genproto/googleapis/rpc -Version: v0.0.0-20230711160842-782d3b101e98 +Version: v0.0.0-20231002182017-d307bd883b97 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/genproto/googleapis/rpc@v0.0.0-20230711160842-782d3b101e98/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/genproto/googleapis/rpc@v0.0.0-20231002182017-d307bd883b97/LICENSE: Apache License diff --git a/go.mod b/go.mod index 786e0cdd8b57..5b2c8932f696 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,9 @@ module github.com/elastic/beats/v7 go 1.21 require ( - cloud.google.com/go/bigquery v1.52.0 - cloud.google.com/go/monitoring v1.15.1 - cloud.google.com/go/pubsub v1.32.0 + cloud.google.com/go/bigquery v1.55.0 + cloud.google.com/go/monitoring v1.16.0 + cloud.google.com/go/pubsub v1.33.0 code.cloudfoundry.org/go-diodes v0.0.0-20190809170250-f77fb823c7ee // indirect code.cloudfoundry.org/go-loggregator v7.4.0+incompatible code.cloudfoundry.org/rfc5424 v0.0.0-20180905210152-236a6d29298a // indirect @@ -81,7 +81,7 @@ require ( github.com/elastic/go-sysinfo v1.11.2 github.com/elastic/go-ucfg v0.8.6 github.com/elastic/gosigar v0.14.2 - github.com/fatih/color v1.13.0 + github.com/fatih/color v1.15.0 github.com/fearful-symmetry/gorapl v0.0.4 github.com/fsnotify/fsevents v0.1.1 github.com/fsnotify/fsnotify v1.5.1 @@ -97,10 +97,10 @@ require ( github.com/golang/mock v1.6.0 github.com/golang/snappy v0.0.4 github.com/gomodule/redigo v1.8.3 - github.com/google/flatbuffers v23.3.3+incompatible + github.com/google/flatbuffers v23.5.26+incompatible github.com/google/go-cmp v0.6.0 github.com/google/gopacket v1.1.19 - github.com/google/uuid v1.3.0 + github.com/google/uuid v1.3.1 github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 github.com/h2non/filetype v1.1.1 github.com/hashicorp/go-multierror v1.1.1 @@ -117,7 +117,7 @@ require ( github.com/jpillora/backoff v1.0.0 // indirect github.com/lib/pq v1.10.3 github.com/magefile/mage v1.15.0 - github.com/mattn/go-colorable v0.1.12 + github.com/mattn/go-colorable v0.1.13 github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe // indirect github.com/miekg/dns v1.1.42 github.com/mitchellh/gox v1.0.1 @@ -161,8 +161,8 @@ require ( golang.org/x/text v0.14.0 golang.org/x/time v0.3.0 golang.org/x/tools v0.16.0 - google.golang.org/api v0.126.0 - google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/api v0.128.0 + google.golang.org/genproto v0.0.0-20230920204549-e6e6cdab5c13 // indirect google.golang.org/grpc v1.58.3 google.golang.org/protobuf v1.31.0 gopkg.in/inf.v0 v0.9.1 @@ -181,8 +181,8 @@ require ( ) require ( - cloud.google.com/go v0.110.4 - cloud.google.com/go/compute v1.21.0 + cloud.google.com/go v0.110.8 + cloud.google.com/go/compute v1.23.0 cloud.google.com/go/redis v1.13.1 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 @@ -192,7 +192,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 github.com/Azure/go-autorest/autorest/adal v0.9.14 - github.com/apache/arrow/go/v12 v12.0.1 + github.com/apache/arrow/go/v14 v14.0.2 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.33 github.com/aws/aws-sdk-go-v2/service/cloudformation v1.20.4 @@ -209,13 +209,13 @@ require ( github.com/elastic/toutoumomoma v0.0.0-20221026030040-594ef30cb640 github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15 github.com/google/cel-go v0.17.7 - github.com/googleapis/gax-go/v2 v2.11.0 + github.com/googleapis/gax-go/v2 v2.12.0 github.com/gorilla/handlers v1.5.1 github.com/gorilla/mux v1.8.0 github.com/icholy/digest v0.1.22 github.com/lestrrat-go/jwx/v2 v2.0.11 github.com/otiai10/copy v1.12.0 - github.com/pierrec/lz4/v4 v4.1.16 + github.com/pierrec/lz4/v4 v4.1.18 github.com/pkg/xattr v0.4.9 github.com/sergi/go-diff v1.3.1 github.com/shirou/gopsutil/v3 v3.22.10 @@ -224,14 +224,14 @@ require ( go.elastic.co/apm/v2 v2.4.7 go.mongodb.org/mongo-driver v1.5.1 golang.org/x/tools/go/vcs v0.1.0-deprecated - google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 + google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) require ( aqwari.net/xml v0.0.0-20210331023308-d9421b293817 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.1 // indirect + cloud.google.com/go/iam v1.1.2 // indirect cloud.google.com/go/longrunning v0.5.1 // indirect code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect @@ -248,6 +248,7 @@ require ( github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect github.com/andybalholm/brotli v1.0.5 // indirect github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect + github.com/apache/arrow/go/v12 v12.0.0 // indirect github.com/apache/thrift v0.19.0 // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/aws/aws-sdk-go v1.38.60 // indirect @@ -294,7 +295,7 @@ require ( github.com/google/licenseclassifier v0.0.0-20221004142553-c1ed8fcf4bab // indirect github.com/google/s2a-go v0.1.4 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.4 // indirect github.com/googleapis/gnostic v0.5.5 // indirect github.com/gorilla/websocket v1.4.2 // indirect github.com/hashicorp/cronexpr v1.1.0 // indirect @@ -316,8 +317,8 @@ require ( github.com/karrick/godirwalk v1.17.0 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/klauspost/asmfmt v1.3.2 // indirect - github.com/klauspost/compress v1.16.5 // indirect - github.com/klauspost/cpuid/v2 v2.0.9 // indirect + github.com/klauspost/compress v1.16.7 // indirect + github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/kortschak/utter v1.5.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/lestrrat-go/blackmagic v1.0.1 // indirect @@ -328,7 +329,7 @@ require ( github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mailru/easyjson v0.7.6 // indirect github.com/markbates/pkger v0.17.1 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 // indirect @@ -371,7 +372,7 @@ require ( golang.org/x/term v0.15.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog/v2 v2.30.0 // indirect k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect diff --git a/go.sum b/go.sum index 1fa1bc366e26..b6d5f3660d59 100644 --- a/go.sum +++ b/go.sum @@ -24,40 +24,40 @@ cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECH cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.110.4 h1:1JYyxKMN9hd5dR2MYTPWkGUgcoxVVhg0LKNKEo0qvmk= -cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME= +cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.52.0 h1:JKLNdxI0N+TIUWD6t9KN646X27N5dQWq9dZbbTWZ8hc= -cloud.google.com/go/bigquery v1.52.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= +cloud.google.com/go/bigquery v1.55.0 h1:hs44Xxov3XLWQiCx2J8lK5U/ihLqnpm4RVVl5fdtLLI= +cloud.google.com/go/bigquery v1.55.0/go.mod h1:9Y5I3PN9kQWuid6183JFhOGOW3GcirA5LpsKCUn+2ec= cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= cloud.google.com/go/bigtable v1.3.0/go.mod h1:z5EyKrPE8OQmeg4h5MNdKvuSnI9CCT49Ki3f23aBzio= -cloud.google.com/go/compute v1.21.0 h1:JNBsyXVoOoNJtTQcnEY5uYpZIbeCTYIeDe0Xh1bySMk= -cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/datacatalog v1.14.1 h1:cFPBt8V5V2T3mu/96tc4nhcMB+5cYcpwjBfn79bZDI8= -cloud.google.com/go/datacatalog v1.14.1/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= +cloud.google.com/go/datacatalog v1.17.1 h1:qGWrlYvWtK+8jD1jhwq5BsGoSr7S4/LOroV7LwXi00g= +cloud.google.com/go/datacatalog v1.17.1/go.mod h1:nCSYFHgtxh2MiEktWIz71s/X+7ds/UT9kp0PC7waCzE= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y= -cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= -cloud.google.com/go/kms v1.12.1 h1:xZmZuwy2cwzsocmKDOPu4BL7umg8QXagQx6fKVmf45U= -cloud.google.com/go/kms v1.12.1/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/iam v1.1.2 h1:gacbrBdWcoVmGLozRuStX45YKvJtzIjJdAolzUs1sm4= +cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/kms v1.15.2 h1:lh6qra6oC4AyWe5fUUUBe/S27k12OHAleOOOw6KakdE= +cloud.google.com/go/kms v1.15.2/go.mod h1:3hopT4+7ooWRCjc2DxgnpESFxhIraaI2IpAVUEhbT/w= cloud.google.com/go/longrunning v0.5.1 h1:Fr7TXftcqTudoyRJa113hyaqlGdiBQkp0Gq7tErFDWI= cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= -cloud.google.com/go/monitoring v1.15.1 h1:65JhLMd+JiYnXr6j5Z63dUYCuOg770p8a/VC+gil/58= -cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= +cloud.google.com/go/monitoring v1.16.0 h1:rlndy4K8yknMY9JuGe2aK4SbCh21FXoCdX7SAGHmRgI= +cloud.google.com/go/monitoring v1.16.0/go.mod h1:Ptp15HgAyM1fNICAojDMoNc/wUmn67mLHQfyqbw+poY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.32.0 h1:JOEkgEYBuUTHSyHS4TcqOFuWr+vD6qO/imsFqShUCp4= -cloud.google.com/go/pubsub v1.32.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= +cloud.google.com/go/pubsub v1.33.0 h1:6SPCPvWav64tj0sVX/+npCBKhUi/UjJehy9op/V3p2g= +cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= cloud.google.com/go/redis v1.13.1 h1:YrjQnCC7ydk+k30op7DSjSHw1yAYhqYXFcOq1bSXRYA= cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= @@ -254,8 +254,10 @@ github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= github.com/apache/arrow/go/arrow v0.0.0-20200923215132-ac86123a3f01/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= -github.com/apache/arrow/go/v12 v12.0.1 h1:JsR2+hzYYjgSUkBSaahpqCetqZMr76djX80fF/DiJbg= -github.com/apache/arrow/go/v12 v12.0.1/go.mod h1:weuTY7JvTG/HDPtMQxEUp7pU73vkLWMLpY67QwZ/WWw= +github.com/apache/arrow/go/v12 v12.0.0 h1:xtZE63VWl7qLdB0JObIXvvhGjoVNrQ9ciIHG2OK5cmc= +github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg= +github.com/apache/arrow/go/v14 v14.0.2 h1:N8OkaJEOfI3mEZt07BIkvo4sC6XDbL+48MBPWO5IONw= +github.com/apache/arrow/go/v14 v14.0.2/go.mod h1:u3fgh3EdgN/YQ8cVQRguVW3R+seMybFg8QBQ5LU+eBY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= @@ -731,8 +733,8 @@ github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQL github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/fearful-symmetry/gomsr v0.0.1 h1:m208RzdTApWVbv8a9kf78rdPLQe+BY9AxRb/nSbHxSA= github.com/fearful-symmetry/gomsr v0.0.1/go.mod h1:Qb/0Y7zwobP7v8Sji+M5mlL4N7Voyz5WaKXXRFPnLio= github.com/fearful-symmetry/gorapl v0.0.4 h1:TMn4fhhtIAd+C3NrAl638oaYlX1vgcKNVVdad53oyiE= @@ -1029,8 +1031,8 @@ github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulN github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v1.12.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/flatbuffers v23.3.3+incompatible h1:5PJI/WbJkaMTvpGxsHVKG/LurN/KnWXNyGpwSCDgen0= -github.com/google/flatbuffers v23.3.3+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v23.5.26+incompatible h1:M9dgRyhJemaM4Sw8+66GHBu8ioaQmyPLg1b8VwK5WJg= +github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -1090,14 +1092,15 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.2.4 h1:uGy6JWR/uMIILU8wbf+OkstIrNiMjGpEIyhx8f6W7s4= +github.com/googleapis/enterprise-certificate-proxy v0.2.4/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= -github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= @@ -1320,11 +1323,12 @@ github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8 github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= -github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= -github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= +github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1394,9 +1398,8 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe h1:YioO2TiJyAHWHyCRQCP8jk5IzTqmsbGc5qQPIhHo6xs= github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= @@ -1406,10 +1409,9 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= @@ -1604,8 +1606,9 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A= github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pierrec/lz4/v4 v4.1.16 h1:kQPfno+wyx6C5572ABwV+Uo3pDFzQ7yhyGchSyRda0c= github.com/pierrec/lz4/v4 v4.1.16/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= +github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrre/gotestcover v0.0.0-20160517101806-924dca7d15f0 h1:i5VIxp6QB8oWZ8IkK8zrDgeT6ORGIUeiN+61iETwJbI= github.com/pierrre/gotestcover v0.0.0-20160517101806-924dca7d15f0/go.mod h1:4xpMLz7RBWyB+ElzHu8Llua96TRCB3YwX+l5EP1wmHk= github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= @@ -2330,11 +2333,9 @@ golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211102192858-4dd72447c267/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2499,8 +2500,9 @@ gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJ gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= -gonum.org/v1/gonum v0.11.0 h1:f1IJhK4Km5tBJmaiJXtk/PkL4cdVX6J+tGiM187uT5E= gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o= +gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= @@ -2531,8 +2533,8 @@ google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBz google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= -google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= +google.golang.org/api v0.128.0 h1:RjPESny5CnQRn9V6siglged+DZCgfu9l6mO9dkX9VOg= +google.golang.org/api v0.128.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2596,12 +2598,12 @@ google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaE google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g= -google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0= -google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 h1:FmF5cCW94Ij59cfpoLiwTgodWmm60eEV0CjlsVg2fuw= -google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto v0.0.0-20230920204549-e6e6cdab5c13 h1:vlzZttNJGVqTsRFU9AmdnrcO1Znh8Ew9kCD//yjigk0= +google.golang.org/genproto v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:CCviP9RmpZ1mxVr8MUjCnSiY09IbAXZxhLE6EhHIdPU= +google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb h1:lK0oleSc7IQsUxO3U5TjL9DWlsxpEBemh+zpB7IqhWI= +google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= diff --git a/x-pack/libbeat/reader/parquet/parquet.go b/x-pack/libbeat/reader/parquet/parquet.go index 2d91f778f334..cc8956155961 100644 --- a/x-pack/libbeat/reader/parquet/parquet.go +++ b/x-pack/libbeat/reader/parquet/parquet.go @@ -10,10 +10,10 @@ import ( "fmt" "io" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/apache/arrow/go/v12/parquet" - "github.com/apache/arrow/go/v12/parquet/file" - "github.com/apache/arrow/go/v12/parquet/pqarrow" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/parquet" + "github.com/apache/arrow/go/v14/parquet/file" + "github.com/apache/arrow/go/v14/parquet/pqarrow" ) // BufferedReader parses parquet inputs from io streams. diff --git a/x-pack/libbeat/reader/parquet/parquet_test.go b/x-pack/libbeat/reader/parquet/parquet_test.go index 1163156e65d3..a4ba04426183 100644 --- a/x-pack/libbeat/reader/parquet/parquet_test.go +++ b/x-pack/libbeat/reader/parquet/parquet_test.go @@ -14,10 +14,10 @@ import ( "path/filepath" "testing" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/array" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/apache/arrow/go/v12/parquet/pqarrow" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/array" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/parquet/pqarrow" "github.com/stretchr/testify/assert" ) @@ -109,7 +109,7 @@ func createRandomParquet(t testing.TB, fname string, numCols int, numRows int) m // defines a map to store the parquet data for validation data := make(map[string]bool) // creates a new Arrow schema - var fields []arrow.Field + fields := make([]arrow.Field, 0, numCols) for i := 0; i < numCols; i++ { fieldType := arrow.PrimitiveTypes.Int32 field := arrow.Field{Name: fmt.Sprintf("col%d", i), Type: fieldType, Nullable: true} From d2349edeb446705b33662d86f60e7c83ba7654bb Mon Sep 17 00:00:00 2001 From: Robert Blank - Reservix <96527386+robertreservix@users.noreply.github.com> Date: Thu, 11 Jan 2024 19:03:53 +0100 Subject: [PATCH 036/129] Add compatibility with AWS Lambda runtime 'Custom runtime on Amazon Linux 2' (#37400) Bump github.com/aws/aws-lambda-go from v1.13.3 to v1.44.0 --- NOTICE.txt | 4 ++-- go.mod | 2 +- go.sum | 3 ++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index 5bffb7c1b677..d258999b356b 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -4801,11 +4801,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/aws/aws-lambda-go -Version: v1.13.3 +Version: v1.44.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-lambda-go@v1.13.3/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-lambda-go@v1.44.0/LICENSE: Apache License diff --git a/go.mod b/go.mod index 5b2c8932f696..a9e019c82923 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/andrewkroh/sys v0.0.0-20151128191922-287798fe3e43 github.com/apoydence/eachers v0.0.0-20181020210610-23942921fe77 // indirect github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 - github.com/aws/aws-lambda-go v1.13.3 + github.com/aws/aws-lambda-go v1.44.0 github.com/aws/aws-sdk-go-v2 v1.18.0 github.com/aws/aws-sdk-go-v2/config v1.17.7 github.com/aws/aws-sdk-go-v2/credentials v1.12.20 diff --git a/go.sum b/go.sum index b6d5f3660d59..716e1a1b4117 100644 --- a/go.sum +++ b/go.sum @@ -278,8 +278,9 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-lambda-go v1.13.3 h1:SuCy7H3NLyp+1Mrfp+m80jcbi9KYWAs9/BXwppwRDzY= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-lambda-go v1.44.0 h1:Xp9PANXKsSJ23IhE4ths592uWTCEewswPhSH9qpAuQQ= +github.com/aws/aws-lambda-go v1.44.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7RfgJv23DymV8A= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.29.16/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= From b9e20cb716cef2375b7e1077fe4113b8b528ae31 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 11 Jan 2024 20:50:54 +0100 Subject: [PATCH 037/129] docs: Prepare Changelog for 8.11.4 (#37611) (#37617) * docs: Close changelog for 8.11.4 * Update CHANGELOG.asciidoc * Apply suggestions from code review Co-authored-by: David Kilfoyle <41695641+kilfoyle@users.noreply.github.com> --------- Co-authored-by: Pierre HILBERT Co-authored-by: David Kilfoyle <41695641+kilfoyle@users.noreply.github.com> (cherry picked from commit 09a13155f5cff96181ba1b21f8df9151b6bb78b6) Co-authored-by: Elastic Machine --- CHANGELOG.asciidoc | 26 ++++++++++++++++++++++++++ CHANGELOG.next.asciidoc | 18 +++--------------- libbeat/docs/release.asciidoc | 1 + 3 files changed, 30 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 8291c2aa2a8d..0a9456d31ec7 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -3,6 +3,32 @@ :issue: https://github.com/elastic/beats/issues/ :pull: https://github.com/elastic/beats/pull/ +[[release-notes-8.11.4]] +=== Beats version 8.11.4 +https://github.com/elastic/beats/compare/v8.11.3\...v8.11.4[View commits] + +==== Bugfixes + +*Heartbeat* + +- Added fix for formatting the logs from stateloader properly. {pull}37369[37369] +- Remove duplicated syscall from ARM seccomp profile. {pull}37440[37440] + +*Metricbeat* + +- Nest the `region` and `availability_zone` ECS fields within the cloud field. {pull}37015[37015] +- Fix CPU and memory metrics collection from privileged process on Windows. {issue}17314[17314]{pull}37027[37027] +- Add memory hard limit from container metadata and remove usage percentage in AWS Fargate. {pull}37194[37194] +- Ignore parser errors from unsupported metrics types on Prometheus client and continue parsing until EOF is reached. {pull}37383[37383] +- Fix the reference time rounding on Azure Metrics. {issue}37204[37204] {pull}37365[37365] + +==== Added + +*Packetbeat* + +- Bump Windows Npcap version to v1.78. {issue}37300[37300] {pull}37370[37370] + + [[release-notes-8.11.3]] === Beats version 8.11.3 https://github.com/elastic/beats/compare/v8.11.2\...v8.11.3[View commits] diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 3f6c12cec990..0221bdb100a5 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -88,8 +88,6 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Heartbeat* - Fix panics when parsing dereferencing invalid parsed url. {pull}34702[34702] -- Added fix for formatting the logs from stateloader properly. {pull}37369[37369] -- Remove duplicated syscall from arm seccomp profile. {pull}37440[37440] *Metricbeat* @@ -105,19 +103,6 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Fix EC2 host.cpu.usage {pull}35717[35717] - Add option in SQL module to execute queries for all dbs. {pull}35688[35688] - Add remaining dimensions for azure storage account to make them available for tsdb enablement. {pull}36331[36331] -- Add missing 'TransactionType' dimension for Azure Storage Account. {pull}36413[36413] -- Add log error when statsd server fails to start {pull}36477[36477] -- Fix CassandraConnectionClosures metric configuration {pull}34742[34742] -- Fix event mapping implementation for statsd module {pull}36925[36925] -- The region and availability_zone ecs fields nested within the cloud field. {pull}37015[37015] -- Fix CPU and memory metrics collection from privileged process on Windows {issue}17314[17314]{pull}37027[37027] -- Enhanced Azure Metrics metricset with refined grouping logic and resolved duplication issues for TSDB compatibility {pull}36823[36823] -- Fix memory leak on Windows {issue}37142[37142] {pull}37171[37171] -- Fix unintended skip in metric collection on Azure Monitor {issue}37204[37204] {pull}37203[37203] -- Fix the "api-version query parameter (?api-version=) is required for all requests" error in Azure Billing. {pull}37158[37158] -- Add memory hard limit from container metadata and remove usage percentage in AWS Fargate. {pull}37194[37194] -- Ignore parser errors from unsupported metrics types on Prometheus client and continue parsing until EOF is reached {pull}37383[37383] -- Fix the reference time rounding on Azure Metrics {issue}37204[37204] {pull}37365[37365] *Osquerybeat* @@ -337,6 +322,9 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d + + + diff --git a/libbeat/docs/release.asciidoc b/libbeat/docs/release.asciidoc index 3219d84d4007..8034a904ff31 100644 --- a/libbeat/docs/release.asciidoc +++ b/libbeat/docs/release.asciidoc @@ -8,6 +8,7 @@ This section summarizes the changes in each release. Also read <> for more detail about changes that affect upgrade. +* <> * <> * <> * <> From 1d89b7ad856a61c5fe028647620a964a6c42f3dc Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 11 Jan 2024 17:34:50 -0500 Subject: [PATCH 038/129] [updatecli] update elastic stack version for testing 8.13.0-u5089rwg (#37595) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update snapshot.yml Made with ❤️️ by updatecli * chore: Update snapshot.yml Made with ❤️️ by updatecli --------- Co-authored-by: apmmachine Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- testing/environments/snapshot.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 657bf7b6301e..5b4d6f9b20f4 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-d752tfli-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-u5089rwg-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.13.0-d752tfli-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.13.0-u5089rwg-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.13.0-d752tfli-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.13.0-u5089rwg-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From b37211847c599e9fae9ede124e7cd60bb53b7373 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 11 Jan 2024 21:31:32 -0500 Subject: [PATCH 039/129] [Automation] Bump Golang version to 1.21.6 (#37615) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update NATS module Dockerfile Made with ❤️️ by updatecli * chore: Update Metricbeat debug Dockerfile Made with ❤️️ by updatecli * chore: Update .go-version Made with ❤️️ by updatecli * chore: Update Auditbeat Dockerfile Made with ❤️️ by updatecli * chore: Update version.asciidoc Made with ❤️️ by updatecli * chore: Update Functionbeat Dockerfile Made with ❤️️ by updatecli * chore: Update HTTP module Dockerfile Made with ❤️️ by updatecli * chore: Update stan Dockerfile Made with ❤️️ by updatecli * chore: Update Heartbeat Dockerfile Made with ❤️️ by updatecli * chore: Update Packetbeat Dockerfile Made with ❤️️ by updatecli * chore: Update .devcontainer/devcontainer.json Made with ❤️️ by updatecli * chore: Update from vsphere Dockerfile Made with ❤️️ by updatecli * chore: Update Filebeat debug Dockerfile Made with ❤️️ by updatecli * chore: Update Heartbeat debug Dockerfile Made with ❤️️ by updatecli * chore: Update Metricbeat Dockerfile Made with ❤️️ by updatecli * chore: Update .golangci.yml Made with ❤️️ by updatecli * Update changelog. --------- Co-authored-by: apmmachine Co-authored-by: Craig MacKenzie --- .devcontainer/devcontainer.json | 2 +- .go-version | 2 +- .golangci.yml | 8 ++++---- CHANGELOG.next.asciidoc | 2 +- auditbeat/Dockerfile | 2 +- dev-tools/kubernetes/filebeat/Dockerfile.debug | 2 +- dev-tools/kubernetes/heartbeat/Dockerfile.debug | 2 +- dev-tools/kubernetes/metricbeat/Dockerfile.debug | 2 +- heartbeat/Dockerfile | 2 +- libbeat/docs/version.asciidoc | 2 +- metricbeat/Dockerfile | 2 +- metricbeat/module/http/_meta/Dockerfile | 2 +- metricbeat/module/nats/_meta/Dockerfile | 2 +- metricbeat/module/vsphere/_meta/Dockerfile | 2 +- packetbeat/Dockerfile | 2 +- x-pack/functionbeat/Dockerfile | 2 +- x-pack/metricbeat/module/stan/_meta/Dockerfile | 2 +- 17 files changed, 20 insertions(+), 20 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 571faef670aa..7fdf17e76ef7 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -5,7 +5,7 @@ { "name": "Beats Development Container", // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile - "image": "mcr.microsoft.com/devcontainers/go:1-1.20-bullseye", + "image": "mcr.microsoft.com/devcontainers/go:1-1.21-bullseye", // Features to add to the dev container. More info: https://containers.dev/features. "features": { "ghcr.io/devcontainers/features/node:1": {}, diff --git a/.go-version b/.go-version index ce2dd53570bb..c262b1f0dfd4 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.21.5 +1.21.6 diff --git a/.golangci.yml b/.golangci.yml index 9e1b7636436b..03a01e24c4f5 100755 --- a/.golangci.yml +++ b/.golangci.yml @@ -114,7 +114,7 @@ linters-settings: gosimple: # Select the Go version to target. The default is '1.13'. - go: "1.21.5" + go: "1.21.6" nakedret: # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 @@ -132,19 +132,19 @@ linters-settings: staticcheck: # Select the Go version to target. The default is '1.13'. - go: "1.21.5" + go: "1.21.6" checks: ["all"] stylecheck: # Select the Go version to target. The default is '1.13'. - go: "1.21.5" + go: "1.21.6" # Disabled: # ST1005: error strings should not be capitalized checks: ["all", "-ST1005"] unused: # Select the Go version to target. The default is '1.13'. - go: "1.21.5" + go: "1.21.6" gosec: excludes: diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 0221bdb100a5..f36c7d7b90e8 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -136,7 +136,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - elasticsearch output now supports `idle_connection_timeout`. {issue}35616[35615] {pull}36843[36843] - Upgrade golang/x/net to v0.17.0. Updates the publicsuffix table used by the registered_domain processor. {pull}36969[36969] Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will disable the netinfo.enabled option of add_host_metadata processor -- Upgrade to Go 1.21.5. {pull}37550[37550] +- Upgrade to Go 1.21.6. {pull}37615[37615] - The Elasticsearch output can now configure performance presets with the `preset` configuration field. {pull}37259[37259] - Upgrade to elastic-agent-libs v0.7.3 and golang.org/x/crypto v0.17.0. {pull}37544[37544] diff --git a/auditbeat/Dockerfile b/auditbeat/Dockerfile index a43c12d013d4..59eb4860b405 100644 --- a/auditbeat/Dockerfile +++ b/auditbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.5 +FROM golang:1.21.6 RUN \ apt-get update \ diff --git a/dev-tools/kubernetes/filebeat/Dockerfile.debug b/dev-tools/kubernetes/filebeat/Dockerfile.debug index 8a77046e6657..842da44f3ab2 100644 --- a/dev-tools/kubernetes/filebeat/Dockerfile.debug +++ b/dev-tools/kubernetes/filebeat/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:1.21.5 as builder +FROM golang:1.21.6 as builder ENV PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/go/bin:/usr/local/go/bin diff --git a/dev-tools/kubernetes/heartbeat/Dockerfile.debug b/dev-tools/kubernetes/heartbeat/Dockerfile.debug index d10e04d0ceaa..fd9970a5b08d 100644 --- a/dev-tools/kubernetes/heartbeat/Dockerfile.debug +++ b/dev-tools/kubernetes/heartbeat/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:1.21.5 as builder +FROM golang:1.21.6 as builder ENV PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/go/bin:/usr/local/go/bin diff --git a/dev-tools/kubernetes/metricbeat/Dockerfile.debug b/dev-tools/kubernetes/metricbeat/Dockerfile.debug index d6a0055f7f6b..00df9d9be1da 100644 --- a/dev-tools/kubernetes/metricbeat/Dockerfile.debug +++ b/dev-tools/kubernetes/metricbeat/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:1.21.5 as builder +FROM golang:1.21.6 as builder ENV PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/go/bin:/usr/local/go/bin diff --git a/heartbeat/Dockerfile b/heartbeat/Dockerfile index db338a4089ca..335bf29256ee 100644 --- a/heartbeat/Dockerfile +++ b/heartbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.5 +FROM golang:1.21.6 RUN \ apt-get update \ diff --git a/libbeat/docs/version.asciidoc b/libbeat/docs/version.asciidoc index 098dee31e9df..13456f9cd187 100644 --- a/libbeat/docs/version.asciidoc +++ b/libbeat/docs/version.asciidoc @@ -1,6 +1,6 @@ :stack-version: 8.13.0 :doc-branch: main -:go-version: 1.21.5 +:go-version: 1.21.6 :release-state: unreleased :python: 3.7 :docker: 1.12 diff --git a/metricbeat/Dockerfile b/metricbeat/Dockerfile index 4ed0762fc4cb..baf372f1859b 100644 --- a/metricbeat/Dockerfile +++ b/metricbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.5 +FROM golang:1.21.6 RUN \ apt update \ diff --git a/metricbeat/module/http/_meta/Dockerfile b/metricbeat/module/http/_meta/Dockerfile index 1b08a63aab50..55df10b1294d 100644 --- a/metricbeat/module/http/_meta/Dockerfile +++ b/metricbeat/module/http/_meta/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.5 +FROM golang:1.21.6 COPY test/main.go main.go diff --git a/metricbeat/module/nats/_meta/Dockerfile b/metricbeat/module/nats/_meta/Dockerfile index b3f2a06d8253..ef6e428ae876 100644 --- a/metricbeat/module/nats/_meta/Dockerfile +++ b/metricbeat/module/nats/_meta/Dockerfile @@ -2,7 +2,7 @@ ARG NATS_VERSION=2.0.4 FROM nats:$NATS_VERSION # build stage -FROM golang:1.21.5 AS build-env +FROM golang:1.21.6 AS build-env RUN apt-get install git mercurial gcc RUN git clone https://github.com/nats-io/nats.go.git /nats-go RUN cd /nats-go/examples/nats-bench && git checkout tags/v1.10.0 && go build . diff --git a/metricbeat/module/vsphere/_meta/Dockerfile b/metricbeat/module/vsphere/_meta/Dockerfile index 601d8317ac2c..1f0881c14ecd 100644 --- a/metricbeat/module/vsphere/_meta/Dockerfile +++ b/metricbeat/module/vsphere/_meta/Dockerfile @@ -1,5 +1,5 @@ ARG VSPHERE_GOLANG_VERSION -FROM golang:1.21.5 +FROM golang:1.21.6 RUN apt-get install curl git RUN go install github.com/vmware/govmomi/vcsim@v0.30.4 diff --git a/packetbeat/Dockerfile b/packetbeat/Dockerfile index d3718ecfcdb5..6e5c1d0bab43 100644 --- a/packetbeat/Dockerfile +++ b/packetbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.5 +FROM golang:1.21.6 RUN \ apt-get update \ diff --git a/x-pack/functionbeat/Dockerfile b/x-pack/functionbeat/Dockerfile index 196dd8d5f234..aec1914698c1 100644 --- a/x-pack/functionbeat/Dockerfile +++ b/x-pack/functionbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.5 +FROM golang:1.21.6 RUN \ apt-get update \ diff --git a/x-pack/metricbeat/module/stan/_meta/Dockerfile b/x-pack/metricbeat/module/stan/_meta/Dockerfile index 40162f9181b5..b4da8bf79a36 100644 --- a/x-pack/metricbeat/module/stan/_meta/Dockerfile +++ b/x-pack/metricbeat/module/stan/_meta/Dockerfile @@ -2,7 +2,7 @@ ARG STAN_VERSION=0.15.1 FROM nats-streaming:$STAN_VERSION # build stage -FROM golang:1.21.5 AS build-env +FROM golang:1.21.6 AS build-env RUN apt-get install git mercurial gcc RUN git clone https://github.com/nats-io/stan.go.git /stan-go RUN cd /stan-go/examples/stan-bench && git checkout tags/v0.5.2 && go build . From e18d7f83b488194725b40967e52c58e944b36fc2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Constan=C3=A7a=20Manteigas?= <113898685+constanca-m@users.noreply.github.com> Date: Fri, 12 Jan 2024 08:09:40 +0100 Subject: [PATCH 040/129] Add support to 1.29 kubernetes version (#37594) Signed-off-by: constanca --- Jenkinsfile | 2 +- deploy/kubernetes/Jenkinsfile.yml | 2 +- metricbeat/docs/modules/kubernetes.asciidoc | 2 +- .../module/kubernetes/_meta/docs.asciidoc | 2 +- .../controllermanager/_meta/data.json | 7 +- .../controllermanager/_meta/test/metrics.1.29 | 2895 +++++++++++++++++ .../_meta/test/metrics.1.29.expected | 2269 +++++++++++++ .../_meta/testdata/docs.plain | 1887 ++++++----- .../_meta/testdata/docs.plain-expected.json | 1079 +++--- .../controllermanager_test.go | 1 + .../kubernetes/proxy/_meta/test/metrics.1.29 | 938 ++++++ .../proxy/_meta/test/metrics.1.29.expected | 380 +++ .../proxy/_meta/testdata/docs.plain | 661 ++-- .../_meta/testdata/docs.plain-expected.json | 382 +-- .../module/kubernetes/proxy/proxy_test.go | 1 + .../scheduler/_meta/test/metrics.1.29 | 1826 +++++++++++ .../_meta/test/metrics.1.29.expected | 857 +++++ .../scheduler/_meta/testdata/docs.plain | 1606 ++++----- .../_meta/testdata/docs.plain-expected.json | 654 ++-- .../kubernetes/scheduler/scheduler_test.go | 1 + 20 files changed, 12408 insertions(+), 3044 deletions(-) create mode 100644 metricbeat/module/kubernetes/controllermanager/_meta/test/metrics.1.29 create mode 100644 metricbeat/module/kubernetes/controllermanager/_meta/test/metrics.1.29.expected create mode 100644 metricbeat/module/kubernetes/proxy/_meta/test/metrics.1.29 create mode 100644 metricbeat/module/kubernetes/proxy/_meta/test/metrics.1.29.expected create mode 100644 metricbeat/module/kubernetes/scheduler/_meta/test/metrics.1.29 create mode 100644 metricbeat/module/kubernetes/scheduler/_meta/test/metrics.1.29.expected diff --git a/Jenkinsfile b/Jenkinsfile index 6709c313f517..9733b7b74c94 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -25,7 +25,7 @@ pipeline { TERRAFORM_VERSION = "1.0.2" XPACK_MODULE_PATTERN = '^x-pack\\/[a-z0-9]+beat\\/module\\/([^\\/]+)\\/.*' KIND_VERSION = 'v0.20.0' - K8S_VERSION = 'v1.28.0' + K8S_VERSION = 'v1.29.0' } options { timeout(time: 6, unit: 'HOURS') diff --git a/deploy/kubernetes/Jenkinsfile.yml b/deploy/kubernetes/Jenkinsfile.yml index 95eeffa42de7..4e6c799e0de9 100644 --- a/deploy/kubernetes/Jenkinsfile.yml +++ b/deploy/kubernetes/Jenkinsfile.yml @@ -18,5 +18,5 @@ stages: make check-no-changes; stage: checks k8sTest: - k8sTest: "v1.28.0,v1.27.3,v1.26.6" + k8sTest: "v1.29.0,v1.28.0,v1.27.3,v1.26.6" stage: mandatory diff --git a/metricbeat/docs/modules/kubernetes.asciidoc b/metricbeat/docs/modules/kubernetes.asciidoc index 9ff079faa3be..ceae7f806920 100644 --- a/metricbeat/docs/modules/kubernetes.asciidoc +++ b/metricbeat/docs/modules/kubernetes.asciidoc @@ -157,7 +157,7 @@ roleRef: === Compatibility The Kubernetes module is tested with the following versions of Kubernetes: -1.26.x, 1.27.x and 1.28.x. +1.26.x, 1.27.x, 1.28.x and 1.29.x. [float] === Dashboard diff --git a/metricbeat/module/kubernetes/_meta/docs.asciidoc b/metricbeat/module/kubernetes/_meta/docs.asciidoc index 8b556add326b..90dc31bb9c53 100644 --- a/metricbeat/module/kubernetes/_meta/docs.asciidoc +++ b/metricbeat/module/kubernetes/_meta/docs.asciidoc @@ -146,7 +146,7 @@ roleRef: === Compatibility The Kubernetes module is tested with the following versions of Kubernetes: -1.26.x, 1.27.x and 1.28.x. +1.26.x, 1.27.x, 1.28.x and 1.29.x. [float] === Dashboard diff --git a/metricbeat/module/kubernetes/controllermanager/_meta/data.json b/metricbeat/module/kubernetes/controllermanager/_meta/data.json index 4e1e62273432..2a12d8f81b1c 100644 --- a/metricbeat/module/kubernetes/controllermanager/_meta/data.json +++ b/metricbeat/module/kubernetes/controllermanager/_meta/data.json @@ -7,10 +7,10 @@ }, "kubernetes": { "controllermanager": { - "name": "serviceaccount", + "name": "noexec_taint_pod", "workqueue": { "adds": { - "count": 5 + "count": 16 }, "depth": { "count": 0 @@ -18,9 +18,6 @@ "longestrunning": { "sec": 0 }, - "retries": { - "count": 0 - }, "unfinished": { "sec": 0 } diff --git a/metricbeat/module/kubernetes/controllermanager/_meta/test/metrics.1.29 b/metricbeat/module/kubernetes/controllermanager/_meta/test/metrics.1.29 new file mode 100644 index 000000000000..7ef3f5e465b2 --- /dev/null +++ b/metricbeat/module/kubernetes/controllermanager/_meta/test/metrics.1.29 @@ -0,0 +1,2895 @@ +# HELP aggregator_discovery_aggregation_count_total [ALPHA] Counter of number of times discovery was aggregated +# TYPE aggregator_discovery_aggregation_count_total counter +aggregator_discovery_aggregation_count_total 0 +# HELP apiextensions_apiserver_validation_ratcheting_seconds [ALPHA] Time for comparison of old to new for the purposes of CRDValidationRatcheting during an UPDATE in seconds. +# TYPE apiextensions_apiserver_validation_ratcheting_seconds histogram +apiextensions_apiserver_validation_ratcheting_seconds_bucket{le="1e-05"} 0 +apiextensions_apiserver_validation_ratcheting_seconds_bucket{le="4e-05"} 0 +apiextensions_apiserver_validation_ratcheting_seconds_bucket{le="0.00016"} 0 +apiextensions_apiserver_validation_ratcheting_seconds_bucket{le="0.00064"} 0 +apiextensions_apiserver_validation_ratcheting_seconds_bucket{le="0.00256"} 0 +apiextensions_apiserver_validation_ratcheting_seconds_bucket{le="0.01024"} 0 +apiextensions_apiserver_validation_ratcheting_seconds_bucket{le="0.04096"} 0 +apiextensions_apiserver_validation_ratcheting_seconds_bucket{le="0.16384"} 0 +apiextensions_apiserver_validation_ratcheting_seconds_bucket{le="0.65536"} 0 +apiextensions_apiserver_validation_ratcheting_seconds_bucket{le="2.62144"} 0 +apiextensions_apiserver_validation_ratcheting_seconds_bucket{le="+Inf"} 0 +apiextensions_apiserver_validation_ratcheting_seconds_sum 0 +apiextensions_apiserver_validation_ratcheting_seconds_count 0 +# HELP apiserver_audit_event_total [ALPHA] Counter of audit events generated and sent to the audit backend. +# TYPE apiserver_audit_event_total counter +apiserver_audit_event_total 0 +# HELP apiserver_audit_requests_rejected_total [ALPHA] Counter of apiserver requests rejected due to an error in audit logging backend. +# TYPE apiserver_audit_requests_rejected_total counter +apiserver_audit_requests_rejected_total 0 +# HELP apiserver_cel_compilation_duration_seconds [ALPHA] CEL compilation time in seconds. +# TYPE apiserver_cel_compilation_duration_seconds histogram +apiserver_cel_compilation_duration_seconds_bucket{le="0.005"} 0 +apiserver_cel_compilation_duration_seconds_bucket{le="0.01"} 0 +apiserver_cel_compilation_duration_seconds_bucket{le="0.025"} 0 +apiserver_cel_compilation_duration_seconds_bucket{le="0.05"} 0 +apiserver_cel_compilation_duration_seconds_bucket{le="0.1"} 0 +apiserver_cel_compilation_duration_seconds_bucket{le="0.25"} 0 +apiserver_cel_compilation_duration_seconds_bucket{le="0.5"} 0 +apiserver_cel_compilation_duration_seconds_bucket{le="1"} 0 +apiserver_cel_compilation_duration_seconds_bucket{le="2.5"} 0 +apiserver_cel_compilation_duration_seconds_bucket{le="5"} 0 +apiserver_cel_compilation_duration_seconds_bucket{le="10"} 0 +apiserver_cel_compilation_duration_seconds_bucket{le="+Inf"} 0 +apiserver_cel_compilation_duration_seconds_sum 0 +apiserver_cel_compilation_duration_seconds_count 0 +# HELP apiserver_cel_evaluation_duration_seconds [ALPHA] CEL evaluation time in seconds. +# TYPE apiserver_cel_evaluation_duration_seconds histogram +apiserver_cel_evaluation_duration_seconds_bucket{le="0.005"} 0 +apiserver_cel_evaluation_duration_seconds_bucket{le="0.01"} 0 +apiserver_cel_evaluation_duration_seconds_bucket{le="0.025"} 0 +apiserver_cel_evaluation_duration_seconds_bucket{le="0.05"} 0 +apiserver_cel_evaluation_duration_seconds_bucket{le="0.1"} 0 +apiserver_cel_evaluation_duration_seconds_bucket{le="0.25"} 0 +apiserver_cel_evaluation_duration_seconds_bucket{le="0.5"} 0 +apiserver_cel_evaluation_duration_seconds_bucket{le="1"} 0 +apiserver_cel_evaluation_duration_seconds_bucket{le="2.5"} 0 +apiserver_cel_evaluation_duration_seconds_bucket{le="5"} 0 +apiserver_cel_evaluation_duration_seconds_bucket{le="10"} 0 +apiserver_cel_evaluation_duration_seconds_bucket{le="+Inf"} 0 +apiserver_cel_evaluation_duration_seconds_sum 0 +apiserver_cel_evaluation_duration_seconds_count 0 +# HELP apiserver_client_certificate_expiration_seconds [ALPHA] Distribution of the remaining lifetime on the certificate used to authenticate a request. +# TYPE apiserver_client_certificate_expiration_seconds histogram +apiserver_client_certificate_expiration_seconds_bucket{le="0"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="1800"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="3600"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="7200"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="21600"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="43200"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="86400"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="172800"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="345600"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="604800"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="2.592e+06"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="7.776e+06"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="1.5552e+07"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="3.1104e+07"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="+Inf"} 0 +apiserver_client_certificate_expiration_seconds_sum 0 +apiserver_client_certificate_expiration_seconds_count 0 +# HELP apiserver_delegated_authn_request_duration_seconds [ALPHA] Request latency in seconds. Broken down by status code. +# TYPE apiserver_delegated_authn_request_duration_seconds histogram +apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="0.25"} 1 +apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="0.5"} 1 +apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="0.7"} 1 +apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="1"} 1 +apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="1.5"} 1 +apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="3"} 1 +apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="5"} 1 +apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="10"} 1 +apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="+Inf"} 1 +apiserver_delegated_authn_request_duration_seconds_sum{code="201"} 0.015306199 +apiserver_delegated_authn_request_duration_seconds_count{code="201"} 1 +# HELP apiserver_delegated_authn_request_total [ALPHA] Number of HTTP requests partitioned by status code. +# TYPE apiserver_delegated_authn_request_total counter +apiserver_delegated_authn_request_total{code="201"} 1 +# HELP apiserver_delegated_authz_request_duration_seconds [ALPHA] Request latency in seconds. Broken down by status code. +# TYPE apiserver_delegated_authz_request_duration_seconds histogram +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="0.25"} 1 +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="0.5"} 1 +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="0.7"} 1 +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="1"} 1 +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="1.5"} 1 +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="3"} 1 +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="5"} 1 +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="10"} 1 +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="+Inf"} 1 +apiserver_delegated_authz_request_duration_seconds_sum{code="201"} 0.003696981 +apiserver_delegated_authz_request_duration_seconds_count{code="201"} 1 +# HELP apiserver_delegated_authz_request_total [ALPHA] Number of HTTP requests partitioned by status code. +# TYPE apiserver_delegated_authz_request_total counter +apiserver_delegated_authz_request_total{code="201"} 1 +# HELP apiserver_envelope_encryption_dek_cache_fill_percent [ALPHA] Percent of the cache slots currently occupied by cached DEKs. +# TYPE apiserver_envelope_encryption_dek_cache_fill_percent gauge +apiserver_envelope_encryption_dek_cache_fill_percent 0 +# HELP apiserver_storage_data_key_generation_duration_seconds [ALPHA] Latencies in seconds of data encryption key(DEK) generation operations. +# TYPE apiserver_storage_data_key_generation_duration_seconds histogram +apiserver_storage_data_key_generation_duration_seconds_bucket{le="5e-06"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="1e-05"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="2e-05"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="4e-05"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="8e-05"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.00016"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.00032"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.00064"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.00128"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.00256"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.00512"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.01024"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.02048"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.04096"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="+Inf"} 0 +apiserver_storage_data_key_generation_duration_seconds_sum 0 +apiserver_storage_data_key_generation_duration_seconds_count 0 +# HELP apiserver_storage_data_key_generation_failures_total [ALPHA] Total number of failed data encryption key(DEK) generation operations. +# TYPE apiserver_storage_data_key_generation_failures_total counter +apiserver_storage_data_key_generation_failures_total 0 +# HELP apiserver_storage_envelope_transformation_cache_misses_total [ALPHA] Total number of cache misses while accessing key decryption key(KEK). +# TYPE apiserver_storage_envelope_transformation_cache_misses_total counter +apiserver_storage_envelope_transformation_cache_misses_total 0 +# HELP apiserver_webhooks_x509_insecure_sha1_total [ALPHA] Counts the number of requests to servers with insecure SHA1 signatures in their serving certificate OR the number of connection failures due to the insecure SHA1 signatures (either/or, based on the runtime environment) +# TYPE apiserver_webhooks_x509_insecure_sha1_total counter +apiserver_webhooks_x509_insecure_sha1_total 0 +# HELP apiserver_webhooks_x509_missing_san_total [ALPHA] Counts the number of requests to servers missing SAN extension in their serving certificate OR the number of connection failures due to the lack of x509 certificate SAN extension missing (either/or, based on the runtime environment) +# TYPE apiserver_webhooks_x509_missing_san_total counter +apiserver_webhooks_x509_missing_san_total 0 +# HELP authenticated_user_requests [ALPHA] Counter of authenticated requests broken out by username. +# TYPE authenticated_user_requests counter +authenticated_user_requests{username="other"} 221 +# HELP authentication_attempts [ALPHA] Counter of authenticated attempts. +# TYPE authentication_attempts counter +authentication_attempts{result="success"} 221 +# HELP authentication_duration_seconds [ALPHA] Authentication duration in seconds broken out by result. +# TYPE authentication_duration_seconds histogram +authentication_duration_seconds_bucket{result="success",le="0.001"} 221 +authentication_duration_seconds_bucket{result="success",le="0.002"} 221 +authentication_duration_seconds_bucket{result="success",le="0.004"} 221 +authentication_duration_seconds_bucket{result="success",le="0.008"} 221 +authentication_duration_seconds_bucket{result="success",le="0.016"} 221 +authentication_duration_seconds_bucket{result="success",le="0.032"} 221 +authentication_duration_seconds_bucket{result="success",le="0.064"} 221 +authentication_duration_seconds_bucket{result="success",le="0.128"} 221 +authentication_duration_seconds_bucket{result="success",le="0.256"} 221 +authentication_duration_seconds_bucket{result="success",le="0.512"} 221 +authentication_duration_seconds_bucket{result="success",le="1.024"} 221 +authentication_duration_seconds_bucket{result="success",le="2.048"} 221 +authentication_duration_seconds_bucket{result="success",le="4.096"} 221 +authentication_duration_seconds_bucket{result="success",le="8.192"} 221 +authentication_duration_seconds_bucket{result="success",le="16.384"} 221 +authentication_duration_seconds_bucket{result="success",le="+Inf"} 221 +authentication_duration_seconds_sum{result="success"} 0.010239524999999996 +authentication_duration_seconds_count{result="success"} 221 +# HELP authentication_token_cache_active_fetch_count [ALPHA] +# TYPE authentication_token_cache_active_fetch_count gauge +authentication_token_cache_active_fetch_count{status="blocked"} 0 +authentication_token_cache_active_fetch_count{status="in_flight"} 0 +# HELP authentication_token_cache_fetch_total [ALPHA] +# TYPE authentication_token_cache_fetch_total counter +authentication_token_cache_fetch_total{status="ok"} 1 +# HELP authentication_token_cache_request_duration_seconds [ALPHA] +# TYPE authentication_token_cache_request_duration_seconds histogram +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="0.005"} 0 +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="0.01"} 0 +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="0.025"} 1 +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="0.05"} 1 +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="0.1"} 1 +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="0.25"} 1 +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="0.5"} 1 +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="1"} 1 +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="2.5"} 1 +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="5"} 1 +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="10"} 1 +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="+Inf"} 1 +authentication_token_cache_request_duration_seconds_sum{status="miss"} 0.015 +authentication_token_cache_request_duration_seconds_count{status="miss"} 1 +# HELP authentication_token_cache_request_total [ALPHA] +# TYPE authentication_token_cache_request_total counter +authentication_token_cache_request_total{status="miss"} 1 +# HELP authorization_attempts_total [ALPHA] Counter of authorization attempts broken down by result. It can be either 'allowed', 'denied', 'no-opinion' or 'error'. +# TYPE authorization_attempts_total counter +authorization_attempts_total{result="allowed"} 221 +# HELP authorization_duration_seconds [ALPHA] Authorization duration in seconds broken out by result. +# TYPE authorization_duration_seconds histogram +authorization_duration_seconds_bucket{result="allowed",le="0.001"} 221 +authorization_duration_seconds_bucket{result="allowed",le="0.002"} 221 +authorization_duration_seconds_bucket{result="allowed",le="0.004"} 221 +authorization_duration_seconds_bucket{result="allowed",le="0.008"} 221 +authorization_duration_seconds_bucket{result="allowed",le="0.016"} 221 +authorization_duration_seconds_bucket{result="allowed",le="0.032"} 221 +authorization_duration_seconds_bucket{result="allowed",le="0.064"} 221 +authorization_duration_seconds_bucket{result="allowed",le="0.128"} 221 +authorization_duration_seconds_bucket{result="allowed",le="0.256"} 221 +authorization_duration_seconds_bucket{result="allowed",le="0.512"} 221 +authorization_duration_seconds_bucket{result="allowed",le="1.024"} 221 +authorization_duration_seconds_bucket{result="allowed",le="2.048"} 221 +authorization_duration_seconds_bucket{result="allowed",le="4.096"} 221 +authorization_duration_seconds_bucket{result="allowed",le="8.192"} 221 +authorization_duration_seconds_bucket{result="allowed",le="16.384"} 221 +authorization_duration_seconds_bucket{result="allowed",le="+Inf"} 221 +authorization_duration_seconds_sum{result="allowed"} 0.001831606000000001 +authorization_duration_seconds_count{result="allowed"} 221 +# HELP cardinality_enforcement_unexpected_categorizations_total [ALPHA] The count of unexpected categorizations during cardinality enforcement. +# TYPE cardinality_enforcement_unexpected_categorizations_total counter +cardinality_enforcement_unexpected_categorizations_total 0 +# HELP cronjob_controller_job_creation_skew_duration_seconds [STABLE] Time between when a cronjob is scheduled to be run, and when the corresponding job is created +# TYPE cronjob_controller_job_creation_skew_duration_seconds histogram +cronjob_controller_job_creation_skew_duration_seconds_bucket{le="1"} 0 +cronjob_controller_job_creation_skew_duration_seconds_bucket{le="2"} 0 +cronjob_controller_job_creation_skew_duration_seconds_bucket{le="4"} 0 +cronjob_controller_job_creation_skew_duration_seconds_bucket{le="8"} 0 +cronjob_controller_job_creation_skew_duration_seconds_bucket{le="16"} 0 +cronjob_controller_job_creation_skew_duration_seconds_bucket{le="32"} 0 +cronjob_controller_job_creation_skew_duration_seconds_bucket{le="64"} 0 +cronjob_controller_job_creation_skew_duration_seconds_bucket{le="128"} 0 +cronjob_controller_job_creation_skew_duration_seconds_bucket{le="256"} 0 +cronjob_controller_job_creation_skew_duration_seconds_bucket{le="512"} 0 +cronjob_controller_job_creation_skew_duration_seconds_bucket{le="+Inf"} 0 +cronjob_controller_job_creation_skew_duration_seconds_sum 0 +cronjob_controller_job_creation_skew_duration_seconds_count 0 +# HELP disabled_metrics_total [BETA] The count of disabled metrics. +# TYPE disabled_metrics_total counter +disabled_metrics_total 0 +# HELP endpoint_slice_controller_changes [ALPHA] Number of EndpointSlice changes +# TYPE endpoint_slice_controller_changes counter +endpoint_slice_controller_changes{operation="create"} 1 +endpoint_slice_controller_changes{operation="update"} 2 +# HELP endpoint_slice_controller_desired_endpoint_slices [ALPHA] Number of EndpointSlices that would exist with perfect endpoint allocation +# TYPE endpoint_slice_controller_desired_endpoint_slices gauge +endpoint_slice_controller_desired_endpoint_slices 1 +# HELP endpoint_slice_controller_endpoints_added_per_sync [ALPHA] Number of endpoints added on each Service sync +# TYPE endpoint_slice_controller_endpoints_added_per_sync histogram +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="2"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="4"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="8"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="16"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="32"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="64"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="128"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="256"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="512"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="1024"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="2048"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="4096"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="8192"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="16384"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="32768"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="+Inf"} 6 +endpoint_slice_controller_endpoints_added_per_sync_sum 2 +endpoint_slice_controller_endpoints_added_per_sync_count 6 +# HELP endpoint_slice_controller_endpoints_desired [ALPHA] Number of endpoints desired +# TYPE endpoint_slice_controller_endpoints_desired gauge +endpoint_slice_controller_endpoints_desired 2 +# HELP endpoint_slice_controller_endpoints_removed_per_sync [ALPHA] Number of endpoints removed on each Service sync +# TYPE endpoint_slice_controller_endpoints_removed_per_sync histogram +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="2"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="4"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="8"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="16"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="32"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="64"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="128"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="256"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="512"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="1024"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="2048"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="4096"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="8192"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="16384"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="32768"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="+Inf"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_sum 0 +endpoint_slice_controller_endpoints_removed_per_sync_count 6 +# HELP endpoint_slice_controller_endpointslices_changed_per_sync [ALPHA] Number of EndpointSlices changed on each Service sync +# TYPE endpoint_slice_controller_endpointslices_changed_per_sync histogram +endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="0.005"} 3 +endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="0.01"} 3 +endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="0.025"} 3 +endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="0.05"} 3 +endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="0.1"} 3 +endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="0.25"} 3 +endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="0.5"} 3 +endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="1"} 6 +endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="2.5"} 6 +endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="5"} 6 +endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="10"} 6 +endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="+Inf"} 6 +endpoint_slice_controller_endpointslices_changed_per_sync_sum{topology="Disabled"} 3 +endpoint_slice_controller_endpointslices_changed_per_sync_count{topology="Disabled"} 6 +# HELP endpoint_slice_controller_num_endpoint_slices [ALPHA] Number of EndpointSlices +# TYPE endpoint_slice_controller_num_endpoint_slices gauge +endpoint_slice_controller_num_endpoint_slices 1 +# HELP endpoint_slice_controller_syncs [ALPHA] Number of EndpointSlice syncs +# TYPE endpoint_slice_controller_syncs counter +endpoint_slice_controller_syncs{result="success"} 7 +# HELP endpoint_slice_mirroring_controller_endpoints_sync_duration [ALPHA] Duration of syncEndpoints() in seconds +# TYPE endpoint_slice_mirroring_controller_endpoints_sync_duration histogram +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.001"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.002"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.004"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.008"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.016"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.032"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.064"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.128"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.256"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.512"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="1.024"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="2.048"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="4.096"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="8.192"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="16.384"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="+Inf"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_sum 0 +endpoint_slice_mirroring_controller_endpoints_sync_duration_count 4 +# HELP ephemeral_volume_controller_create_failures_total [ALPHA] Number of PersistenVolumeClaims creation requests +# TYPE ephemeral_volume_controller_create_failures_total counter +ephemeral_volume_controller_create_failures_total 0 +# HELP ephemeral_volume_controller_create_total [ALPHA] Number of PersistenVolumeClaims creation requests +# TYPE ephemeral_volume_controller_create_total counter +ephemeral_volume_controller_create_total 0 +# HELP garbagecollector_controller_resources_sync_error_total [ALPHA] Number of garbage collector resources sync errors +# TYPE garbagecollector_controller_resources_sync_error_total counter +garbagecollector_controller_resources_sync_error_total 0 +# HELP go_cgo_go_to_c_calls_calls_total Count of calls made from Go to C by the current process. +# TYPE go_cgo_go_to_c_calls_calls_total counter +go_cgo_go_to_c_calls_calls_total 0 +# HELP go_cpu_classes_gc_mark_assist_cpu_seconds_total Estimated total CPU time goroutines spent performing GC tasks to assist the GC and prevent it from falling behind the application. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +# TYPE go_cpu_classes_gc_mark_assist_cpu_seconds_total counter +go_cpu_classes_gc_mark_assist_cpu_seconds_total 0.009980877 +# HELP go_cpu_classes_gc_mark_dedicated_cpu_seconds_total Estimated total CPU time spent performing GC tasks on processors (as defined by GOMAXPROCS) dedicated to those tasks. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +# TYPE go_cpu_classes_gc_mark_dedicated_cpu_seconds_total counter +go_cpu_classes_gc_mark_dedicated_cpu_seconds_total 0.615466956 +# HELP go_cpu_classes_gc_mark_idle_cpu_seconds_total Estimated total CPU time spent performing GC tasks on spare CPU resources that the Go scheduler could not otherwise find a use for. This should be subtracted from the total GC CPU time to obtain a measure of compulsory GC CPU time. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +# TYPE go_cpu_classes_gc_mark_idle_cpu_seconds_total counter +go_cpu_classes_gc_mark_idle_cpu_seconds_total 0.10748798 +# HELP go_cpu_classes_gc_pause_cpu_seconds_total Estimated total CPU time spent with the application paused by the GC. Even if only one thread is running during the pause, this is computed as GOMAXPROCS times the pause latency because nothing else can be executing. This is the exact sum of samples in /gc/pause:seconds if each sample is multiplied by GOMAXPROCS at the time it is taken. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +# TYPE go_cpu_classes_gc_pause_cpu_seconds_total counter +go_cpu_classes_gc_pause_cpu_seconds_total 0.086410944 +# HELP go_cpu_classes_gc_total_cpu_seconds_total Estimated total CPU time spent performing GC tasks. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. Sum of all metrics in /cpu/classes/gc. +# TYPE go_cpu_classes_gc_total_cpu_seconds_total counter +go_cpu_classes_gc_total_cpu_seconds_total 0.819346757 +# HELP go_cpu_classes_idle_cpu_seconds_total Estimated total available CPU time not spent executing any Go or Go runtime code. In other words, the part of /cpu/classes/total:cpu-seconds that was unused. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +# TYPE go_cpu_classes_idle_cpu_seconds_total counter +go_cpu_classes_idle_cpu_seconds_total 34257.660255826 +# HELP go_cpu_classes_scavenge_assist_cpu_seconds_total Estimated total CPU time spent returning unused memory to the underlying platform in response eagerly in response to memory pressure. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +# TYPE go_cpu_classes_scavenge_assist_cpu_seconds_total counter +go_cpu_classes_scavenge_assist_cpu_seconds_total 4.56e-07 +# HELP go_cpu_classes_scavenge_background_cpu_seconds_total Estimated total CPU time spent performing background tasks to return unused memory to the underlying platform. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +# TYPE go_cpu_classes_scavenge_background_cpu_seconds_total counter +go_cpu_classes_scavenge_background_cpu_seconds_total 0.001950821 +# HELP go_cpu_classes_scavenge_total_cpu_seconds_total Estimated total CPU time spent performing tasks that return unused memory to the underlying platform. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. Sum of all metrics in /cpu/classes/scavenge. +# TYPE go_cpu_classes_scavenge_total_cpu_seconds_total counter +go_cpu_classes_scavenge_total_cpu_seconds_total 0.001951277 +# HELP go_cpu_classes_total_cpu_seconds_total Estimated total available CPU time for user Go code or the Go runtime, as defined by GOMAXPROCS. In other words, GOMAXPROCS integrated over the wall-clock duration this process has been executing for. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. Sum of all metrics in /cpu/classes. +# TYPE go_cpu_classes_total_cpu_seconds_total counter +go_cpu_classes_total_cpu_seconds_total 34309.383945808 +# HELP go_cpu_classes_user_cpu_seconds_total Estimated total CPU time spent running user Go code. This may also include some small amount of time spent in the Go runtime. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +# TYPE go_cpu_classes_user_cpu_seconds_total counter +go_cpu_classes_user_cpu_seconds_total 50.902391948 +# HELP go_gc_cycles_automatic_gc_cycles_total Count of completed GC cycles generated by the Go runtime. +# TYPE go_gc_cycles_automatic_gc_cycles_total counter +go_gc_cycles_automatic_gc_cycles_total 29 +# HELP go_gc_cycles_forced_gc_cycles_total Count of completed GC cycles forced by the application. +# TYPE go_gc_cycles_forced_gc_cycles_total counter +go_gc_cycles_forced_gc_cycles_total 0 +# HELP go_gc_cycles_total_gc_cycles_total Count of all completed GC cycles. +# TYPE go_gc_cycles_total_gc_cycles_total counter +go_gc_cycles_total_gc_cycles_total 29 +# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 7.1885e-05 +go_gc_duration_seconds{quantile="0.25"} 0.00010273 +go_gc_duration_seconds{quantile="0.5"} 0.000147269 +go_gc_duration_seconds{quantile="0.75"} 0.000227305 +go_gc_duration_seconds{quantile="1"} 0.000506175 +go_gc_duration_seconds_sum 0.005400684 +go_gc_duration_seconds_count 29 +# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. +# TYPE go_gc_gogc_percent gauge +go_gc_gogc_percent 100 +# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. +# TYPE go_gc_gomemlimit_bytes gauge +go_gc_gomemlimit_bytes 9.223372036854776e+18 +# HELP go_gc_heap_allocs_by_size_bytes Distribution of heap allocations by approximate size. Bucket counts increase monotonically. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. +# TYPE go_gc_heap_allocs_by_size_bytes histogram +go_gc_heap_allocs_by_size_bytes_bucket{le="8.999999999999998"} 20262 +go_gc_heap_allocs_by_size_bytes_bucket{le="24.999999999999996"} 662760 +go_gc_heap_allocs_by_size_bytes_bucket{le="64.99999999999999"} 1.23019e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="144.99999999999997"} 1.564055e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="320.99999999999994"} 1.67643e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="704.9999999999999"} 1.73359e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="1536.9999999999998"} 1.744467e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="3200.9999999999995"} 1.749584e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="6528.999999999999"} 1.752437e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="13568.999999999998"} 1.754058e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="27264.999999999996"} 1.754576e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="+Inf"} 1.754729e+06 +go_gc_heap_allocs_by_size_bytes_sum 1.95122176e+08 +go_gc_heap_allocs_by_size_bytes_count 1.754729e+06 +# HELP go_gc_heap_allocs_bytes_total Cumulative sum of memory allocated to the heap by the application. +# TYPE go_gc_heap_allocs_bytes_total counter +go_gc_heap_allocs_bytes_total 1.95122176e+08 +# HELP go_gc_heap_allocs_objects_total Cumulative count of heap allocations triggered by the application. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. +# TYPE go_gc_heap_allocs_objects_total counter +go_gc_heap_allocs_objects_total 1.754729e+06 +# HELP go_gc_heap_frees_by_size_bytes Distribution of freed heap allocations by approximate size. Bucket counts increase monotonically. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. +# TYPE go_gc_heap_frees_by_size_bytes histogram +go_gc_heap_frees_by_size_bytes_bucket{le="8.999999999999998"} 16105 +go_gc_heap_frees_by_size_bytes_bucket{le="24.999999999999996"} 609148 +go_gc_heap_frees_by_size_bytes_bucket{le="64.99999999999999"} 1.131067e+06 +go_gc_heap_frees_by_size_bytes_bucket{le="144.99999999999997"} 1.44066e+06 +go_gc_heap_frees_by_size_bytes_bucket{le="320.99999999999994"} 1.543796e+06 +go_gc_heap_frees_by_size_bytes_bucket{le="704.9999999999999"} 1.596558e+06 +go_gc_heap_frees_by_size_bytes_bucket{le="1536.9999999999998"} 1.606603e+06 +go_gc_heap_frees_by_size_bytes_bucket{le="3200.9999999999995"} 1.611257e+06 +go_gc_heap_frees_by_size_bytes_bucket{le="6528.999999999999"} 1.613916e+06 +go_gc_heap_frees_by_size_bytes_bucket{le="13568.999999999998"} 1.615381e+06 +go_gc_heap_frees_by_size_bytes_bucket{le="27264.999999999996"} 1.615713e+06 +go_gc_heap_frees_by_size_bytes_bucket{le="+Inf"} 1.615775e+06 +go_gc_heap_frees_by_size_bytes_sum 1.73551056e+08 +go_gc_heap_frees_by_size_bytes_count 1.615775e+06 +# HELP go_gc_heap_frees_bytes_total Cumulative sum of heap memory freed by the garbage collector. +# TYPE go_gc_heap_frees_bytes_total counter +go_gc_heap_frees_bytes_total 1.73551056e+08 +# HELP go_gc_heap_frees_objects_total Cumulative count of heap allocations whose storage was freed by the garbage collector. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. +# TYPE go_gc_heap_frees_objects_total counter +go_gc_heap_frees_objects_total 1.615775e+06 +# HELP go_gc_heap_goal_bytes Heap size target for the end of the GC cycle. +# TYPE go_gc_heap_goal_bytes gauge +go_gc_heap_goal_bytes 3.468156e+07 +# HELP go_gc_heap_live_bytes Heap memory occupied by live objects that were marked by the previous GC. +# TYPE go_gc_heap_live_bytes gauge +go_gc_heap_live_bytes 1.6361688e+07 +# HELP go_gc_heap_objects_objects Number of objects, live or unswept, occupying heap memory. +# TYPE go_gc_heap_objects_objects gauge +go_gc_heap_objects_objects 138954 +# HELP go_gc_heap_tiny_allocs_objects_total Count of small allocations that are packed together into blocks. These allocations are counted separately from other allocations because each individual allocation is not tracked by the runtime, only their block. Each block is already accounted for in allocs-by-size and frees-by-size. +# TYPE go_gc_heap_tiny_allocs_objects_total counter +go_gc_heap_tiny_allocs_objects_total 198929 +# HELP go_gc_limiter_last_enabled_gc_cycle GC cycle the last time the GC CPU limiter was enabled. This metric is useful for diagnosing the root cause of an out-of-memory error, because the limiter trades memory for CPU time when the GC's CPU time gets too high. This is most likely to occur with use of SetMemoryLimit. The first GC cycle is cycle 1, so a value of 0 indicates that it was never enabled. +# TYPE go_gc_limiter_last_enabled_gc_cycle gauge +go_gc_limiter_last_enabled_gc_cycle 0 +# HELP go_gc_pauses_seconds Distribution of individual GC-related stop-the-world pause latencies. Bucket counts increase monotonically. +# TYPE go_gc_pauses_seconds histogram +go_gc_pauses_seconds_bucket{le="6.399999999999999e-08"} 0 +go_gc_pauses_seconds_bucket{le="6.399999999999999e-07"} 0 +go_gc_pauses_seconds_bucket{le="7.167999999999999e-06"} 2 +go_gc_pauses_seconds_bucket{le="8.191999999999999e-05"} 31 +go_gc_pauses_seconds_bucket{le="0.0009175039999999999"} 58 +go_gc_pauses_seconds_bucket{le="0.010485759999999998"} 58 +go_gc_pauses_seconds_bucket{le="0.11744051199999998"} 58 +go_gc_pauses_seconds_bucket{le="+Inf"} 58 +go_gc_pauses_seconds_sum 0.0024209920000000003 +go_gc_pauses_seconds_count 58 +# HELP go_gc_scan_globals_bytes The total amount of global variable space that is scannable. +# TYPE go_gc_scan_globals_bytes gauge +go_gc_scan_globals_bytes 502376 +# HELP go_gc_scan_heap_bytes The total amount of heap space that is scannable. +# TYPE go_gc_scan_heap_bytes gauge +go_gc_scan_heap_bytes 1.7478304e+07 +# HELP go_gc_scan_stack_bytes The number of bytes of stack that were scanned last GC cycle. +# TYPE go_gc_scan_stack_bytes gauge +go_gc_scan_stack_bytes 1.455808e+06 +# HELP go_gc_scan_total_bytes The total amount space that is scannable. Sum of all metrics in /gc/scan. +# TYPE go_gc_scan_total_bytes gauge +go_gc_scan_total_bytes 1.9436488e+07 +# HELP go_gc_stack_starting_size_bytes The stack size of new goroutines. +# TYPE go_gc_stack_starting_size_bytes gauge +go_gc_stack_starting_size_bytes 4096 +# HELP go_godebug_non_default_behavior_execerrdot_events_total The number of non-default behaviors executed by the os/exec package due to a non-default GODEBUG=execerrdot=... setting. +# TYPE go_godebug_non_default_behavior_execerrdot_events_total counter +go_godebug_non_default_behavior_execerrdot_events_total 0 +# HELP go_godebug_non_default_behavior_gocachehash_events_total The number of non-default behaviors executed by the cmd/go package due to a non-default GODEBUG=gocachehash=... setting. +# TYPE go_godebug_non_default_behavior_gocachehash_events_total counter +go_godebug_non_default_behavior_gocachehash_events_total 0 +# HELP go_godebug_non_default_behavior_gocachetest_events_total The number of non-default behaviors executed by the cmd/go package due to a non-default GODEBUG=gocachetest=... setting. +# TYPE go_godebug_non_default_behavior_gocachetest_events_total counter +go_godebug_non_default_behavior_gocachetest_events_total 0 +# HELP go_godebug_non_default_behavior_gocacheverify_events_total The number of non-default behaviors executed by the cmd/go package due to a non-default GODEBUG=gocacheverify=... setting. +# TYPE go_godebug_non_default_behavior_gocacheverify_events_total counter +go_godebug_non_default_behavior_gocacheverify_events_total 0 +# HELP go_godebug_non_default_behavior_http2client_events_total The number of non-default behaviors executed by the net/http package due to a non-default GODEBUG=http2client=... setting. +# TYPE go_godebug_non_default_behavior_http2client_events_total counter +go_godebug_non_default_behavior_http2client_events_total 0 +# HELP go_godebug_non_default_behavior_http2server_events_total The number of non-default behaviors executed by the net/http package due to a non-default GODEBUG=http2server=... setting. +# TYPE go_godebug_non_default_behavior_http2server_events_total counter +go_godebug_non_default_behavior_http2server_events_total 0 +# HELP go_godebug_non_default_behavior_installgoroot_events_total The number of non-default behaviors executed by the go/build package due to a non-default GODEBUG=installgoroot=... setting. +# TYPE go_godebug_non_default_behavior_installgoroot_events_total counter +go_godebug_non_default_behavior_installgoroot_events_total 0 +# HELP go_godebug_non_default_behavior_jstmpllitinterp_events_total The number of non-default behaviors executed by the html/template package due to a non-default GODEBUG=jstmpllitinterp=... setting. +# TYPE go_godebug_non_default_behavior_jstmpllitinterp_events_total counter +go_godebug_non_default_behavior_jstmpllitinterp_events_total 0 +# HELP go_godebug_non_default_behavior_multipartmaxheaders_events_total The number of non-default behaviors executed by the mime/multipart package due to a non-default GODEBUG=multipartmaxheaders=... setting. +# TYPE go_godebug_non_default_behavior_multipartmaxheaders_events_total counter +go_godebug_non_default_behavior_multipartmaxheaders_events_total 0 +# HELP go_godebug_non_default_behavior_multipartmaxparts_events_total The number of non-default behaviors executed by the mime/multipart package due to a non-default GODEBUG=multipartmaxparts=... setting. +# TYPE go_godebug_non_default_behavior_multipartmaxparts_events_total counter +go_godebug_non_default_behavior_multipartmaxparts_events_total 0 +# HELP go_godebug_non_default_behavior_multipathtcp_events_total The number of non-default behaviors executed by the net package due to a non-default GODEBUG=multipathtcp=... setting. +# TYPE go_godebug_non_default_behavior_multipathtcp_events_total counter +go_godebug_non_default_behavior_multipathtcp_events_total 0 +# HELP go_godebug_non_default_behavior_panicnil_events_total The number of non-default behaviors executed by the runtime package due to a non-default GODEBUG=panicnil=... setting. +# TYPE go_godebug_non_default_behavior_panicnil_events_total counter +go_godebug_non_default_behavior_panicnil_events_total 0 +# HELP go_godebug_non_default_behavior_randautoseed_events_total The number of non-default behaviors executed by the math/rand package due to a non-default GODEBUG=randautoseed=... setting. +# TYPE go_godebug_non_default_behavior_randautoseed_events_total counter +go_godebug_non_default_behavior_randautoseed_events_total 0 +# HELP go_godebug_non_default_behavior_tarinsecurepath_events_total The number of non-default behaviors executed by the archive/tar package due to a non-default GODEBUG=tarinsecurepath=... setting. +# TYPE go_godebug_non_default_behavior_tarinsecurepath_events_total counter +go_godebug_non_default_behavior_tarinsecurepath_events_total 0 +# HELP go_godebug_non_default_behavior_tlsmaxrsasize_events_total The number of non-default behaviors executed by the crypto/tls package due to a non-default GODEBUG=tlsmaxrsasize=... setting. +# TYPE go_godebug_non_default_behavior_tlsmaxrsasize_events_total counter +go_godebug_non_default_behavior_tlsmaxrsasize_events_total 0 +# HELP go_godebug_non_default_behavior_x509sha1_events_total The number of non-default behaviors executed by the crypto/x509 package due to a non-default GODEBUG=x509sha1=... setting. +# TYPE go_godebug_non_default_behavior_x509sha1_events_total counter +go_godebug_non_default_behavior_x509sha1_events_total 0 +# HELP go_godebug_non_default_behavior_x509usefallbackroots_events_total The number of non-default behaviors executed by the crypto/x509 package due to a non-default GODEBUG=x509usefallbackroots=... setting. +# TYPE go_godebug_non_default_behavior_x509usefallbackroots_events_total counter +go_godebug_non_default_behavior_x509usefallbackroots_events_total 0 +# HELP go_godebug_non_default_behavior_zipinsecurepath_events_total The number of non-default behaviors executed by the archive/zip package due to a non-default GODEBUG=zipinsecurepath=... setting. +# TYPE go_godebug_non_default_behavior_zipinsecurepath_events_total counter +go_godebug_non_default_behavior_zipinsecurepath_events_total 0 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 1126 +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +go_info{version="go1.21.5"} 1 +# HELP go_memory_classes_heap_free_bytes Memory that is completely free and eligible to be returned to the underlying system, but has not been. This metric is the runtime's estimate of free address space that is backed by physical memory. +# TYPE go_memory_classes_heap_free_bytes gauge +go_memory_classes_heap_free_bytes 1.384448e+06 +# HELP go_memory_classes_heap_objects_bytes Memory occupied by live objects and dead objects that have not yet been marked free by the garbage collector. +# TYPE go_memory_classes_heap_objects_bytes gauge +go_memory_classes_heap_objects_bytes 2.157112e+07 +# HELP go_memory_classes_heap_released_bytes Memory that is completely free and has been returned to the underlying system. This metric is the runtime's estimate of free address space that is still mapped into the process, but is not backed by physical memory. +# TYPE go_memory_classes_heap_released_bytes gauge +go_memory_classes_heap_released_bytes 8.749056e+06 +# HELP go_memory_classes_heap_stacks_bytes Memory allocated from the heap that is reserved for stack space, whether or not it is currently in-use. Currently, this represents all stack memory for goroutines. It also includes all OS thread stacks in non-cgo programs. Note that stacks may be allocated differently in the future, and this may change. +# TYPE go_memory_classes_heap_stacks_bytes gauge +go_memory_classes_heap_stacks_bytes 6.946816e+06 +# HELP go_memory_classes_heap_unused_bytes Memory that is reserved for heap objects but is not currently used to hold heap objects. +# TYPE go_memory_classes_heap_unused_bytes gauge +go_memory_classes_heap_unused_bytes 7.485904e+06 +# HELP go_memory_classes_metadata_mcache_free_bytes Memory that is reserved for runtime mcache structures, but not in-use. +# TYPE go_memory_classes_metadata_mcache_free_bytes gauge +go_memory_classes_metadata_mcache_free_bytes 12000 +# HELP go_memory_classes_metadata_mcache_inuse_bytes Memory that is occupied by runtime mcache structures that are currently being used. +# TYPE go_memory_classes_metadata_mcache_inuse_bytes gauge +go_memory_classes_metadata_mcache_inuse_bytes 19200 +# HELP go_memory_classes_metadata_mspan_free_bytes Memory that is reserved for runtime mspan structures, but not in-use. +# TYPE go_memory_classes_metadata_mspan_free_bytes gauge +go_memory_classes_metadata_mspan_free_bytes 93408 +# HELP go_memory_classes_metadata_mspan_inuse_bytes Memory that is occupied by runtime mspan structures that are currently being used. +# TYPE go_memory_classes_metadata_mspan_inuse_bytes gauge +go_memory_classes_metadata_mspan_inuse_bytes 558432 +# HELP go_memory_classes_metadata_other_bytes Memory that is reserved for or used to hold runtime metadata. +# TYPE go_memory_classes_metadata_other_bytes gauge +go_memory_classes_metadata_other_bytes 4.99332e+06 +# HELP go_memory_classes_os_stacks_bytes Stack memory allocated by the underlying operating system. In non-cgo programs this metric is currently zero. This may change in the future.In cgo programs this metric includes OS thread stacks allocated directly from the OS. Currently, this only accounts for one stack in c-shared and c-archive build modes, and other sources of stacks from the OS are not measured. This too may change in the future. +# TYPE go_memory_classes_os_stacks_bytes gauge +go_memory_classes_os_stacks_bytes 0 +# HELP go_memory_classes_other_bytes Memory used by execution trace buffers, structures for debugging the runtime, finalizer and profiler specials, and more. +# TYPE go_memory_classes_other_bytes gauge +go_memory_classes_other_bytes 3.114019e+06 +# HELP go_memory_classes_profiling_buckets_bytes Memory that is used by the stack trace hash map used for profiling. +# TYPE go_memory_classes_profiling_buckets_bytes gauge +go_memory_classes_profiling_buckets_bytes 1.536925e+06 +# HELP go_memory_classes_total_bytes All memory mapped by the Go runtime into the current process as read-write. Note that this does not include memory mapped by code called via cgo or via the syscall package. Sum of all metrics in /memory/classes. +# TYPE go_memory_classes_total_bytes gauge +go_memory_classes_total_bytes 5.6464648e+07 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 2.157112e+07 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 1.95122176e+08 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 1.536925e+06 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total 1.814704e+06 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes 4.99332e+06 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes 2.157112e+07 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes 1.0133504e+07 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes 2.9057024e+07 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects 138954 +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes gauge +go_memstats_heap_released_bytes 8.749056e+06 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes 3.9190528e+07 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds 1.7048969121171913e+09 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total 0 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total 1.953658e+06 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes 19200 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes 31200 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes 558432 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 651840 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes 3.468156e+07 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes 3.114019e+06 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes 6.946816e+06 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes 6.946816e+06 +# HELP go_memstats_sys_bytes Number of bytes obtained from system. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes 5.6464648e+07 +# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. +# TYPE go_sched_gomaxprocs_threads gauge +go_sched_gomaxprocs_threads 16 +# HELP go_sched_goroutines_goroutines Count of live goroutines. +# TYPE go_sched_goroutines_goroutines gauge +go_sched_goroutines_goroutines 1126 +# HELP go_sched_latencies_seconds Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running. Bucket counts increase monotonically. +# TYPE go_sched_latencies_seconds histogram +go_sched_latencies_seconds_bucket{le="6.399999999999999e-08"} 4662 +go_sched_latencies_seconds_bucket{le="6.399999999999999e-07"} 5508 +go_sched_latencies_seconds_bucket{le="7.167999999999999e-06"} 11724 +go_sched_latencies_seconds_bucket{le="8.191999999999999e-05"} 51675 +go_sched_latencies_seconds_bucket{le="0.0009175039999999999"} 52881 +go_sched_latencies_seconds_bucket{le="0.010485759999999998"} 52896 +go_sched_latencies_seconds_bucket{le="0.11744051199999998"} 52896 +go_sched_latencies_seconds_bucket{le="+Inf"} 52896 +go_sched_latencies_seconds_sum 0.40295923199999995 +go_sched_latencies_seconds_count 52896 +# HELP go_sync_mutex_wait_total_seconds_total Approximate cumulative time goroutines have spent blocked on a sync.Mutex or sync.RWMutex. This metric is useful for identifying global changes in lock contention. Collect a mutex or block profile using the runtime/pprof package for more detailed contention data. +# TYPE go_sync_mutex_wait_total_seconds_total counter +go_sync_mutex_wait_total_seconds_total 0.076525904 +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +go_threads 18 +# HELP hidden_metrics_total [BETA] The count of hidden metrics. +# TYPE hidden_metrics_total counter +hidden_metrics_total 0 +# HELP kubernetes_build_info [ALPHA] A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running. +# TYPE kubernetes_build_info gauge +kubernetes_build_info{build_date="2023-12-14T19:18:17Z",compiler="gc",git_commit="3f7a50f38688eb332e2a1b013678c6435d539ae6",git_tree_state="clean",git_version="v1.29.0",go_version="go1.21.5",major="1",minor="29",platform="linux/amd64"} 1 +# HELP kubernetes_feature_enabled [BETA] This metric records the data about the stage and enablement of a k8s feature. +# TYPE kubernetes_feature_enabled gauge +kubernetes_feature_enabled{name="APIListChunking",stage=""} 1 +kubernetes_feature_enabled{name="APIPriorityAndFairness",stage=""} 1 +kubernetes_feature_enabled{name="APIResponseCompression",stage="BETA"} 1 +kubernetes_feature_enabled{name="APISelfSubjectReview",stage=""} 1 +kubernetes_feature_enabled{name="APIServerIdentity",stage="BETA"} 1 +kubernetes_feature_enabled{name="APIServerTracing",stage="BETA"} 1 +kubernetes_feature_enabled{name="AdmissionWebhookMatchConditions",stage="BETA"} 1 +kubernetes_feature_enabled{name="AggregatedDiscoveryEndpoint",stage="BETA"} 1 +kubernetes_feature_enabled{name="AllAlpha",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="AllBeta",stage="BETA"} 0 +kubernetes_feature_enabled{name="AllowServiceLBStatusOnNonLB",stage="DEPRECATED"} 0 +kubernetes_feature_enabled{name="AnyVolumeDataSource",stage="BETA"} 1 +kubernetes_feature_enabled{name="AppArmor",stage="BETA"} 1 +kubernetes_feature_enabled{name="CPUManager",stage=""} 1 +kubernetes_feature_enabled{name="CPUManagerPolicyAlphaOptions",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="CPUManagerPolicyBetaOptions",stage="BETA"} 1 +kubernetes_feature_enabled{name="CPUManagerPolicyOptions",stage="BETA"} 1 +kubernetes_feature_enabled{name="CRDValidationRatcheting",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="CSIMigrationAzureFile",stage=""} 1 +kubernetes_feature_enabled{name="CSIMigrationPortworx",stage="BETA"} 0 +kubernetes_feature_enabled{name="CSIMigrationRBD",stage="DEPRECATED"} 0 +kubernetes_feature_enabled{name="CSINodeExpandSecret",stage=""} 1 +kubernetes_feature_enabled{name="CSIVolumeHealth",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="CloudControllerManagerWebhook",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="CloudDualStackNodeIPs",stage="BETA"} 1 +kubernetes_feature_enabled{name="ClusterTrustBundle",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ClusterTrustBundleProjection",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ComponentSLIs",stage="BETA"} 1 +kubernetes_feature_enabled{name="ConsistentHTTPGetHandlers",stage=""} 1 +kubernetes_feature_enabled{name="ConsistentListFromCache",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ContainerCheckpoint",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ContextualLogging",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="CronJobsScheduledAnnotation",stage="BETA"} 1 +kubernetes_feature_enabled{name="CrossNamespaceVolumeDataSource",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="CustomCPUCFSQuotaPeriod",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="CustomResourceValidationExpressions",stage=""} 1 +kubernetes_feature_enabled{name="DefaultHostNetworkHostPortsInPodTemplates",stage="DEPRECATED"} 0 +kubernetes_feature_enabled{name="DevicePluginCDIDevices",stage="BETA"} 1 +kubernetes_feature_enabled{name="DisableCloudProviders",stage="BETA"} 1 +kubernetes_feature_enabled{name="DisableKubeletCloudCredentialProviders",stage="BETA"} 1 +kubernetes_feature_enabled{name="DisableNodeKubeProxyVersion",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="DynamicResourceAllocation",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="EfficientWatchResumption",stage=""} 1 +kubernetes_feature_enabled{name="ElasticIndexedJob",stage="BETA"} 1 +kubernetes_feature_enabled{name="EventedPLEG",stage="BETA"} 0 +kubernetes_feature_enabled{name="ExecProbeTimeout",stage=""} 1 +kubernetes_feature_enabled{name="ExpandedDNSConfig",stage=""} 1 +kubernetes_feature_enabled{name="ExperimentalHostUserNamespaceDefaulting",stage="DEPRECATED"} 0 +kubernetes_feature_enabled{name="GracefulNodeShutdown",stage="BETA"} 1 +kubernetes_feature_enabled{name="GracefulNodeShutdownBasedOnPodPriority",stage="BETA"} 1 +kubernetes_feature_enabled{name="HPAContainerMetrics",stage="BETA"} 1 +kubernetes_feature_enabled{name="HPAScaleToZero",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="HonorPVReclaimPolicy",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="IPTablesOwnershipCleanup",stage=""} 1 +kubernetes_feature_enabled{name="ImageMaximumGCAge",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="InPlacePodVerticalScaling",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="InTreePluginAWSUnregister",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="InTreePluginAzureDiskUnregister",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="InTreePluginAzureFileUnregister",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="InTreePluginGCEUnregister",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="InTreePluginOpenStackUnregister",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="InTreePluginPortworxUnregister",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="InTreePluginRBDUnregister",stage="DEPRECATED"} 0 +kubernetes_feature_enabled{name="InTreePluginvSphereUnregister",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="JobBackoffLimitPerIndex",stage="BETA"} 1 +kubernetes_feature_enabled{name="JobPodFailurePolicy",stage="BETA"} 1 +kubernetes_feature_enabled{name="JobPodReplacementPolicy",stage="BETA"} 1 +kubernetes_feature_enabled{name="JobReadyPods",stage=""} 1 +kubernetes_feature_enabled{name="KMSv1",stage="DEPRECATED"} 0 +kubernetes_feature_enabled{name="KMSv2",stage=""} 1 +kubernetes_feature_enabled{name="KMSv2KDF",stage=""} 1 +kubernetes_feature_enabled{name="KubeProxyDrainingTerminatingNodes",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="KubeletCgroupDriverFromCRI",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="KubeletInUserNamespace",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="KubeletPodResources",stage=""} 1 +kubernetes_feature_enabled{name="KubeletPodResourcesDynamicResources",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="KubeletPodResourcesGet",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="KubeletPodResourcesGetAllocatable",stage=""} 1 +kubernetes_feature_enabled{name="KubeletSeparateDiskGC",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="KubeletTracing",stage="BETA"} 1 +kubernetes_feature_enabled{name="LegacyServiceAccountTokenCleanUp",stage="BETA"} 1 +kubernetes_feature_enabled{name="LegacyServiceAccountTokenTracking",stage=""} 1 +kubernetes_feature_enabled{name="LoadBalancerIPMode",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="LocalStorageCapacityIsolationFSQuotaMonitoring",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="LogarithmicScaleDown",stage="BETA"} 1 +kubernetes_feature_enabled{name="LoggingAlphaOptions",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="LoggingBetaOptions",stage="BETA"} 1 +kubernetes_feature_enabled{name="MatchLabelKeysInPodAffinity",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="MatchLabelKeysInPodTopologySpread",stage="BETA"} 1 +kubernetes_feature_enabled{name="MaxUnavailableStatefulSet",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="MemoryManager",stage="BETA"} 1 +kubernetes_feature_enabled{name="MemoryQoS",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="MinDomainsInPodTopologySpread",stage="BETA"} 1 +kubernetes_feature_enabled{name="MinimizeIPTablesRestore",stage=""} 1 +kubernetes_feature_enabled{name="MultiCIDRServiceAllocator",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="NFTablesProxyMode",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="NewVolumeManagerReconstruction",stage="BETA"} 1 +kubernetes_feature_enabled{name="NodeInclusionPolicyInPodTopologySpread",stage="BETA"} 1 +kubernetes_feature_enabled{name="NodeLogQuery",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="NodeOutOfServiceVolumeDetach",stage=""} 1 +kubernetes_feature_enabled{name="NodeSwap",stage="BETA"} 0 +kubernetes_feature_enabled{name="OpenAPIEnums",stage="BETA"} 1 +kubernetes_feature_enabled{name="PDBUnhealthyPodEvictionPolicy",stage="BETA"} 1 +kubernetes_feature_enabled{name="PersistentVolumeLastPhaseTransitionTime",stage="BETA"} 1 +kubernetes_feature_enabled{name="PodAndContainerStatsFromCRI",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="PodDeletionCost",stage="BETA"} 1 +kubernetes_feature_enabled{name="PodDisruptionConditions",stage="BETA"} 1 +kubernetes_feature_enabled{name="PodHostIPs",stage="BETA"} 1 +kubernetes_feature_enabled{name="PodIndexLabel",stage="BETA"} 1 +kubernetes_feature_enabled{name="PodLifecycleSleepAction",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="PodReadyToStartContainersCondition",stage="BETA"} 1 +kubernetes_feature_enabled{name="PodSchedulingReadiness",stage="BETA"} 1 +kubernetes_feature_enabled{name="ProcMountType",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ProxyTerminatingEndpoints",stage=""} 1 +kubernetes_feature_enabled{name="QOSReserved",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ReadWriteOncePod",stage=""} 1 +kubernetes_feature_enabled{name="RecoverVolumeExpansionFailure",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="RemainingItemCount",stage=""} 1 +kubernetes_feature_enabled{name="RemoveSelfLink",stage=""} 1 +kubernetes_feature_enabled{name="RotateKubeletServerCertificate",stage="BETA"} 1 +kubernetes_feature_enabled{name="RuntimeClassInImageCriApi",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="SELinuxMountReadWriteOncePod",stage="BETA"} 1 +kubernetes_feature_enabled{name="SchedulerQueueingHints",stage="BETA"} 0 +kubernetes_feature_enabled{name="SecurityContextDeny",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="SeparateTaintEvictionController",stage="BETA"} 1 +kubernetes_feature_enabled{name="ServerSideApply",stage=""} 1 +kubernetes_feature_enabled{name="ServerSideFieldValidation",stage=""} 1 +kubernetes_feature_enabled{name="ServiceAccountTokenJTI",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceAccountTokenNodeBinding",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceAccountTokenNodeBindingValidation",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceAccountTokenPodNodeInfo",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceNodePortStaticSubrange",stage=""} 1 +kubernetes_feature_enabled{name="SidecarContainers",stage="BETA"} 1 +kubernetes_feature_enabled{name="SizeMemoryBackedVolumes",stage="BETA"} 1 +kubernetes_feature_enabled{name="SkipReadOnlyValidationGCE",stage="DEPRECATED"} 1 +kubernetes_feature_enabled{name="StableLoadBalancerNodeSet",stage="BETA"} 1 +kubernetes_feature_enabled{name="StatefulSetAutoDeletePVC",stage="BETA"} 1 +kubernetes_feature_enabled{name="StatefulSetStartOrdinal",stage="BETA"} 1 +kubernetes_feature_enabled{name="StorageVersionAPI",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="StorageVersionHash",stage="BETA"} 1 +kubernetes_feature_enabled{name="StructuredAuthenticationConfiguration",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="StructuredAuthorizationConfiguration",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="TopologyAwareHints",stage="BETA"} 1 +kubernetes_feature_enabled{name="TopologyManagerPolicyAlphaOptions",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="TopologyManagerPolicyBetaOptions",stage="BETA"} 1 +kubernetes_feature_enabled{name="TopologyManagerPolicyOptions",stage="BETA"} 1 +kubernetes_feature_enabled{name="TranslateStreamCloseWebsocketRequests",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="UnauthenticatedHTTP2DOSMitigation",stage="BETA"} 1 +kubernetes_feature_enabled{name="UnknownVersionInteroperabilityProxy",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="UserNamespacesPodSecurityStandards",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="UserNamespacesSupport",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ValidatingAdmissionPolicy",stage="BETA"} 0 +kubernetes_feature_enabled{name="VolumeAttributesClass",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="VolumeCapacityPriority",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="WatchBookmark",stage=""} 1 +kubernetes_feature_enabled{name="WatchList",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="WinDSR",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="WinOverlay",stage="BETA"} 1 +kubernetes_feature_enabled{name="WindowsHostNetwork",stage="ALPHA"} 1 +kubernetes_feature_enabled{name="ZeroLimitedNominalConcurrencyShares",stage="BETA"} 0 +# HELP leader_election_master_status [ALPHA] Gauge of if the reporting system is master of the relevant lease, 0 indicates backup, 1 indicates master. 'name' is the string used to identify the lease. Please make sure to group by name. +# TYPE leader_election_master_status gauge +leader_election_master_status{name="kube-controller-manager"} 1 +# HELP node_collector_evictions_total [STABLE] Number of Node evictions that happened since current instance of NodeController started. +# TYPE node_collector_evictions_total counter +node_collector_evictions_total{zone=""} 0 +# HELP node_collector_unhealthy_nodes_in_zone [ALPHA] Gauge measuring number of not Ready Nodes per zones. +# TYPE node_collector_unhealthy_nodes_in_zone gauge +node_collector_unhealthy_nodes_in_zone{zone=""} 0 +# HELP node_collector_update_all_nodes_health_duration_seconds [ALPHA] Duration in seconds for NodeController to update the health of all nodes. +# TYPE node_collector_update_all_nodes_health_duration_seconds histogram +node_collector_update_all_nodes_health_duration_seconds_bucket{le="0.01"} 441 +node_collector_update_all_nodes_health_duration_seconds_bucket{le="0.04"} 441 +node_collector_update_all_nodes_health_duration_seconds_bucket{le="0.16"} 441 +node_collector_update_all_nodes_health_duration_seconds_bucket{le="0.64"} 441 +node_collector_update_all_nodes_health_duration_seconds_bucket{le="2.56"} 441 +node_collector_update_all_nodes_health_duration_seconds_bucket{le="10.24"} 441 +node_collector_update_all_nodes_health_duration_seconds_bucket{le="40.96"} 441 +node_collector_update_all_nodes_health_duration_seconds_bucket{le="163.84"} 441 +node_collector_update_all_nodes_health_duration_seconds_bucket{le="+Inf"} 441 +node_collector_update_all_nodes_health_duration_seconds_sum 0.08619570099999996 +node_collector_update_all_nodes_health_duration_seconds_count 441 +# HELP node_collector_update_node_health_duration_seconds [ALPHA] Duration in seconds for NodeController to update the health of a single node. +# TYPE node_collector_update_node_health_duration_seconds histogram +node_collector_update_node_health_duration_seconds_bucket{le="0.001"} 441 +node_collector_update_node_health_duration_seconds_bucket{le="0.004"} 441 +node_collector_update_node_health_duration_seconds_bucket{le="0.016"} 441 +node_collector_update_node_health_duration_seconds_bucket{le="0.064"} 441 +node_collector_update_node_health_duration_seconds_bucket{le="0.256"} 441 +node_collector_update_node_health_duration_seconds_bucket{le="1.024"} 441 +node_collector_update_node_health_duration_seconds_bucket{le="4.096"} 441 +node_collector_update_node_health_duration_seconds_bucket{le="16.384"} 441 +node_collector_update_node_health_duration_seconds_bucket{le="+Inf"} 441 +node_collector_update_node_health_duration_seconds_sum 0.05145579200000004 +node_collector_update_node_health_duration_seconds_count 441 +# HELP node_collector_zone_health [ALPHA] Gauge measuring percentage of healthy nodes per zone. +# TYPE node_collector_zone_health gauge +node_collector_zone_health{zone=""} 100 +# HELP node_collector_zone_size [ALPHA] Gauge measuring number of registered Nodes per zones. +# TYPE node_collector_zone_size gauge +node_collector_zone_size{zone=""} 1 +# HELP node_ipam_controller_cidrset_allocation_tries_per_request [ALPHA] Number of endpoints added on each Service sync +# TYPE node_ipam_controller_cidrset_allocation_tries_per_request histogram +node_ipam_controller_cidrset_allocation_tries_per_request_bucket{clusterCIDR="10.244.0.0/16",le="1"} 1 +node_ipam_controller_cidrset_allocation_tries_per_request_bucket{clusterCIDR="10.244.0.0/16",le="5"} 1 +node_ipam_controller_cidrset_allocation_tries_per_request_bucket{clusterCIDR="10.244.0.0/16",le="25"} 1 +node_ipam_controller_cidrset_allocation_tries_per_request_bucket{clusterCIDR="10.244.0.0/16",le="125"} 1 +node_ipam_controller_cidrset_allocation_tries_per_request_bucket{clusterCIDR="10.244.0.0/16",le="625"} 1 +node_ipam_controller_cidrset_allocation_tries_per_request_bucket{clusterCIDR="10.244.0.0/16",le="+Inf"} 1 +node_ipam_controller_cidrset_allocation_tries_per_request_sum{clusterCIDR="10.244.0.0/16"} 0 +node_ipam_controller_cidrset_allocation_tries_per_request_count{clusterCIDR="10.244.0.0/16"} 1 +# HELP node_ipam_controller_cidrset_cidrs_allocations_total [ALPHA] Counter measuring total number of CIDR allocations. +# TYPE node_ipam_controller_cidrset_cidrs_allocations_total counter +node_ipam_controller_cidrset_cidrs_allocations_total{clusterCIDR="10.244.0.0/16"} 1 +# HELP node_ipam_controller_cidrset_usage_cidrs [ALPHA] Gauge measuring percentage of allocated CIDRs. +# TYPE node_ipam_controller_cidrset_usage_cidrs gauge +node_ipam_controller_cidrset_usage_cidrs{clusterCIDR="10.244.0.0/16"} 0.00390625 +# HELP node_ipam_controller_cirdset_max_cidrs [ALPHA] Maximum number of CIDRs that can be allocated. +# TYPE node_ipam_controller_cirdset_max_cidrs gauge +node_ipam_controller_cirdset_max_cidrs{clusterCIDR="10.244.0.0/16"} 256 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 58.19 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 1.048576e+06 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 18 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 1.092608e+08 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.70489476711e+09 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 1.337397248e+09 +# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. +# TYPE process_virtual_memory_max_bytes gauge +process_virtual_memory_max_bytes 1.8446744073709552e+19 +# HELP registered_metrics_total [BETA] The count of registered metrics broken by stability level and deprecation version. +# TYPE registered_metrics_total counter +registered_metrics_total{deprecated_version="",stability_level="ALPHA"} 138 +registered_metrics_total{deprecated_version="",stability_level="BETA"} 4 +registered_metrics_total{deprecated_version="",stability_level="STABLE"} 11 +# HELP replicaset_controller_sorting_deletion_age_ratio [ALPHA] The ratio of chosen deleted pod's ages to the current youngest pod's age (at the time). Should be <2. The intent of this metric is to measure the rough efficacy of the LogarithmicScaleDown feature gate's effect on the sorting (and deletion) of pods when a replicaset scales down. This only considers Ready pods when calculating and reporting. +# TYPE replicaset_controller_sorting_deletion_age_ratio histogram +replicaset_controller_sorting_deletion_age_ratio_bucket{le="0.25"} 0 +replicaset_controller_sorting_deletion_age_ratio_bucket{le="0.5"} 0 +replicaset_controller_sorting_deletion_age_ratio_bucket{le="1"} 0 +replicaset_controller_sorting_deletion_age_ratio_bucket{le="2"} 0 +replicaset_controller_sorting_deletion_age_ratio_bucket{le="4"} 0 +replicaset_controller_sorting_deletion_age_ratio_bucket{le="8"} 0 +replicaset_controller_sorting_deletion_age_ratio_bucket{le="+Inf"} 0 +replicaset_controller_sorting_deletion_age_ratio_sum 0 +replicaset_controller_sorting_deletion_age_ratio_count 0 +# HELP rest_client_exec_plugin_certificate_rotation_age [ALPHA] Histogram of the number of seconds the last auth exec plugin client certificate lived before being rotated. If auth exec plugin client certificates are unused, histogram will contain no data. +# TYPE rest_client_exec_plugin_certificate_rotation_age histogram +rest_client_exec_plugin_certificate_rotation_age_bucket{le="600"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="1800"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="3600"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="14400"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="86400"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="604800"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="2.592e+06"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="7.776e+06"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="1.5552e+07"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="3.1104e+07"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="1.24416e+08"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="+Inf"} 0 +rest_client_exec_plugin_certificate_rotation_age_sum 0 +rest_client_exec_plugin_certificate_rotation_age_count 0 +# HELP rest_client_exec_plugin_ttl_seconds [ALPHA] Gauge of the shortest TTL (time-to-live) of the client certificate(s) managed by the auth exec plugin. The value is in seconds until certificate expiry (negative if already expired). If auth exec plugins are unused or manage no TLS certificates, the value will be +INF. +# TYPE rest_client_exec_plugin_ttl_seconds gauge +rest_client_exec_plugin_ttl_seconds +Inf +# HELP rest_client_rate_limiter_duration_seconds [ALPHA] Client side rate limiter latency in seconds. Broken down by verb, and host. +# TYPE rest_client_rate_limiter_duration_seconds histogram +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.005"} 1487 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.025"} 1489 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.1"} 1528 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.25"} 1533 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.5"} 1544 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="1"} 1546 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="2"} 1546 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="4"} 1546 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="8"} 1546 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="15"} 1546 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="30"} 1546 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="60"} 1546 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="+Inf"} 1546 +rest_client_rate_limiter_duration_seconds_sum{host="172.18.0.2:6443",verb="GET"} 7.7935272619999925 +rest_client_rate_limiter_duration_seconds_count{host="172.18.0.2:6443",verb="GET"} 1546 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.005"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.025"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.1"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.25"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.5"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="1"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="2"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="4"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="8"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="15"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="30"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="60"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="+Inf"} 10 +rest_client_rate_limiter_duration_seconds_sum{host="172.18.0.2:6443",verb="PATCH"} 5.7598e-05 +rest_client_rate_limiter_duration_seconds_count{host="172.18.0.2:6443",verb="PATCH"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.005"} 57 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.025"} 57 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.1"} 74 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.25"} 75 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.5"} 84 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="1"} 84 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="2"} 84 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="4"} 84 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="8"} 84 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="15"} 84 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="30"} 84 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="60"} 84 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="+Inf"} 84 +rest_client_rate_limiter_duration_seconds_sum{host="172.18.0.2:6443",verb="POST"} 4.6114257960000025 +rest_client_rate_limiter_duration_seconds_count{host="172.18.0.2:6443",verb="POST"} 84 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.005"} 1128 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.025"} 1128 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.1"} 1128 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.25"} 1128 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.5"} 1128 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="1"} 1128 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="2"} 1128 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="4"} 1128 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="8"} 1128 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="15"} 1128 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="30"} 1128 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="60"} 1128 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="+Inf"} 1128 +rest_client_rate_limiter_duration_seconds_sum{host="172.18.0.2:6443",verb="PUT"} 0.0029605479999999973 +rest_client_rate_limiter_duration_seconds_count{host="172.18.0.2:6443",verb="PUT"} 1128 +# HELP rest_client_request_duration_seconds [ALPHA] Request latency in seconds. Broken down by verb, and host. +# TYPE rest_client_request_duration_seconds histogram +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.005"} 1414 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.025"} 1482 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.1"} 1523 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.25"} 1531 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.5"} 1543 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="1"} 1545 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="2"} 1545 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="4"} 1546 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="8"} 1546 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="15"} 1546 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="30"} 1546 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="60"} 1546 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="+Inf"} 1546 +rest_client_request_duration_seconds_sum{host="172.18.0.2:6443",verb="GET"} 16.967002752999996 +rest_client_request_duration_seconds_count{host="172.18.0.2:6443",verb="GET"} 1546 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.005"} 0 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.025"} 4 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.1"} 6 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.25"} 6 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.5"} 6 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="1"} 10 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="2"} 10 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="4"} 10 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="8"} 10 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="15"} 10 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="30"} 10 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="60"} 10 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="+Inf"} 10 +rest_client_request_duration_seconds_sum{host="172.18.0.2:6443",verb="PATCH"} 3.209190288 +rest_client_request_duration_seconds_count{host="172.18.0.2:6443",verb="PATCH"} 10 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.005"} 10 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.025"} 46 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.1"} 64 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.25"} 66 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.5"} 77 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="1"} 84 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="2"} 84 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="4"} 84 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="8"} 84 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="15"} 84 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="30"} 84 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="60"} 84 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="+Inf"} 84 +rest_client_request_duration_seconds_sum{host="172.18.0.2:6443",verb="POST"} 11.851414454 +rest_client_request_duration_seconds_count{host="172.18.0.2:6443",verb="POST"} 84 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.005"} 1 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.025"} 1114 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.1"} 1125 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.25"} 1127 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.5"} 1127 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="1"} 1128 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="2"} 1128 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="4"} 1128 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="8"} 1128 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="15"} 1128 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="30"} 1128 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="60"} 1128 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="+Inf"} 1128 +rest_client_request_duration_seconds_sum{host="172.18.0.2:6443",verb="PUT"} 15.549394524000004 +rest_client_request_duration_seconds_count{host="172.18.0.2:6443",verb="PUT"} 1128 +# HELP rest_client_request_size_bytes [ALPHA] Request size in bytes. Broken down by verb and host. +# TYPE rest_client_request_size_bytes histogram +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="64"} 1546 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="256"} 1546 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="512"} 1546 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1024"} 1546 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="4096"} 1546 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="16384"} 1546 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="65536"} 1546 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="262144"} 1546 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1.048576e+06"} 1546 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="4.194304e+06"} 1546 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1.6777216e+07"} 1546 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="+Inf"} 1546 +rest_client_request_size_bytes_sum{host="172.18.0.2:6443",verb="GET"} 0 +rest_client_request_size_bytes_count{host="172.18.0.2:6443",verb="GET"} 1546 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="64"} 1 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="256"} 3 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="512"} 4 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1024"} 4 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="4096"} 10 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="16384"} 10 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="65536"} 10 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="262144"} 10 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1.048576e+06"} 10 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="4.194304e+06"} 10 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1.6777216e+07"} 10 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="+Inf"} 10 +rest_client_request_size_bytes_sum{host="172.18.0.2:6443",verb="PATCH"} 18531 +rest_client_request_size_bytes_count{host="172.18.0.2:6443",verb="PATCH"} 10 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="64"} 1 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="256"} 55 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="512"} 66 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1024"} 70 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="4096"} 84 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="16384"} 84 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="65536"} 84 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="262144"} 84 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1.048576e+06"} 84 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="4.194304e+06"} 84 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1.6777216e+07"} 84 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="+Inf"} 84 +rest_client_request_size_bytes_sum{host="172.18.0.2:6443",verb="POST"} 34927 +rest_client_request_size_bytes_count{host="172.18.0.2:6443",verb="POST"} 84 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="64"} 0 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="256"} 0 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="512"} 1099 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1024"} 1104 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="4096"} 1128 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="16384"} 1128 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="65536"} 1128 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="262144"} 1128 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1.048576e+06"} 1128 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="4.194304e+06"} 1128 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1.6777216e+07"} 1128 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="+Inf"} 1128 +rest_client_request_size_bytes_sum{host="172.18.0.2:6443",verb="PUT"} 534678 +rest_client_request_size_bytes_count{host="172.18.0.2:6443",verb="PUT"} 1128 +# HELP rest_client_requests_total [ALPHA] Number of HTTP requests, partitioned by status code, method, and host. +# TYPE rest_client_requests_total counter +rest_client_requests_total{code="200",host="172.18.0.2:6443",method="GET"} 1756 +rest_client_requests_total{code="200",host="172.18.0.2:6443",method="PATCH"} 10 +rest_client_requests_total{code="200",host="172.18.0.2:6443",method="PUT"} 1125 +rest_client_requests_total{code="201",host="172.18.0.2:6443",method="POST"} 84 +rest_client_requests_total{code="403",host="172.18.0.2:6443",method="GET"} 1 +rest_client_requests_total{code="404",host="172.18.0.2:6443",method="GET"} 34 +rest_client_requests_total{code="409",host="172.18.0.2:6443",method="PUT"} 3 +# HELP rest_client_response_size_bytes [ALPHA] Response size in bytes. Broken down by verb and host. +# TYPE rest_client_response_size_bytes histogram +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="64"} 21 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="256"} 88 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="512"} 1223 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1024"} 1226 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="4096"} 1233 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="16384"} 1543 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="65536"} 1546 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="262144"} 1546 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1.048576e+06"} 1546 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="4.194304e+06"} 1546 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1.6777216e+07"} 1546 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="+Inf"} 1546 +rest_client_response_size_bytes_sum{host="172.18.0.2:6443",verb="GET"} 4.16515e+06 +rest_client_response_size_bytes_count{host="172.18.0.2:6443",verb="GET"} 1546 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="64"} 0 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="256"} 0 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="512"} 0 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1024"} 1 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="4096"} 10 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="16384"} 10 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="65536"} 10 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="262144"} 10 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1.048576e+06"} 10 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="4.194304e+06"} 10 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1.6777216e+07"} 10 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="+Inf"} 10 +rest_client_response_size_bytes_sum{host="172.18.0.2:6443",verb="PATCH"} 28839 +rest_client_response_size_bytes_count{host="172.18.0.2:6443",verb="PATCH"} 10 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="64"} 0 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="256"} 38 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="512"} 39 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1024"} 51 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="4096"} 82 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="16384"} 84 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="65536"} 84 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="262144"} 84 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1.048576e+06"} 84 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="4.194304e+06"} 84 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1.6777216e+07"} 84 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="+Inf"} 84 +rest_client_response_size_bytes_sum{host="172.18.0.2:6443",verb="POST"} 78889 +rest_client_response_size_bytes_count{host="172.18.0.2:6443",verb="POST"} 84 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="64"} 0 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="256"} 1 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="512"} 1101 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1024"} 1103 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="4096"} 1123 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="16384"} 1128 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="65536"} 1128 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="262144"} 1128 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1.048576e+06"} 1128 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="4.194304e+06"} 1128 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1.6777216e+07"} 1128 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="+Inf"} 1128 +rest_client_response_size_bytes_sum{host="172.18.0.2:6443",verb="PUT"} 578751 +rest_client_response_size_bytes_count{host="172.18.0.2:6443",verb="PUT"} 1128 +# HELP rest_client_transport_cache_entries [ALPHA] Number of transport entries in the internal cache. +# TYPE rest_client_transport_cache_entries gauge +rest_client_transport_cache_entries 3 +# HELP rest_client_transport_create_calls_total [ALPHA] Number of calls to get a new transport, partitioned by the result of the operation hit: obtained from the cache, miss: created and added to the cache, uncacheable: created and not cached +# TYPE rest_client_transport_create_calls_total counter +rest_client_transport_create_calls_total{result="hit"} 52 +rest_client_transport_create_calls_total{result="miss"} 3 +# HELP retroactive_storageclass_errors_total [ALPHA] Total number of failed retroactive StorageClass assignments to persistent volume claim +# TYPE retroactive_storageclass_errors_total counter +retroactive_storageclass_errors_total 0 +# HELP retroactive_storageclass_total [ALPHA] Total number of retroactive StorageClass assignments to persistent volume claim +# TYPE retroactive_storageclass_total counter +retroactive_storageclass_total 0 +# HELP root_ca_cert_publisher_sync_duration_seconds [ALPHA] Number of namespace syncs happened in root ca cert publisher. +# TYPE root_ca_cert_publisher_sync_duration_seconds histogram +root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="0.001"} 0 +root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="0.002"} 0 +root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="0.004"} 0 +root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="0.008"} 0 +root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="0.016"} 4 +root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="0.032"} 4 +root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="0.064"} 4 +root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="0.128"} 4 +root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="0.256"} 4 +root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="0.512"} 4 +root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="1.024"} 5 +root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="2.048"} 5 +root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="4.096"} 5 +root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="8.192"} 5 +root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="16.384"} 5 +root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="+Inf"} 5 +root_ca_cert_publisher_sync_duration_seconds_sum{code="200"} 0.7155615430000001 +root_ca_cert_publisher_sync_duration_seconds_count{code="200"} 5 +# HELP root_ca_cert_publisher_sync_total [ALPHA] Number of namespace syncs happened in root ca cert publisher. +# TYPE root_ca_cert_publisher_sync_total counter +root_ca_cert_publisher_sync_total{code="200"} 5 +# HELP running_managed_controllers [ALPHA] Indicates where instances of a controller are currently running +# TYPE running_managed_controllers gauge +running_managed_controllers{manager="kube-controller-manager",name="nodeipam"} 1 +# HELP service_controller_loadbalancer_sync_total [ALPHA] A metric counting the amount of times any load balancer has been configured, as an effect of service/node changes on the cluster +# TYPE service_controller_loadbalancer_sync_total counter +service_controller_loadbalancer_sync_total 0 +# HELP service_controller_nodesync_error_total [ALPHA] A metric counting the amount of times any load balancer has been configured and errored, as an effect of node changes on the cluster +# TYPE service_controller_nodesync_error_total counter +service_controller_nodesync_error_total 0 +# HELP service_controller_nodesync_latency_seconds [ALPHA] A metric measuring the latency for nodesync which updates loadbalancer hosts on cluster node updates. +# TYPE service_controller_nodesync_latency_seconds histogram +service_controller_nodesync_latency_seconds_bucket{le="1"} 0 +service_controller_nodesync_latency_seconds_bucket{le="2"} 0 +service_controller_nodesync_latency_seconds_bucket{le="4"} 0 +service_controller_nodesync_latency_seconds_bucket{le="8"} 0 +service_controller_nodesync_latency_seconds_bucket{le="16"} 0 +service_controller_nodesync_latency_seconds_bucket{le="32"} 0 +service_controller_nodesync_latency_seconds_bucket{le="64"} 0 +service_controller_nodesync_latency_seconds_bucket{le="128"} 0 +service_controller_nodesync_latency_seconds_bucket{le="256"} 0 +service_controller_nodesync_latency_seconds_bucket{le="512"} 0 +service_controller_nodesync_latency_seconds_bucket{le="1024"} 0 +service_controller_nodesync_latency_seconds_bucket{le="2048"} 0 +service_controller_nodesync_latency_seconds_bucket{le="4096"} 0 +service_controller_nodesync_latency_seconds_bucket{le="8192"} 0 +service_controller_nodesync_latency_seconds_bucket{le="16384"} 0 +service_controller_nodesync_latency_seconds_bucket{le="+Inf"} 0 +service_controller_nodesync_latency_seconds_sum 0 +service_controller_nodesync_latency_seconds_count 0 +# HELP service_controller_update_loadbalancer_host_latency_seconds [ALPHA] A metric measuring the latency for updating each load balancer hosts. +# TYPE service_controller_update_loadbalancer_host_latency_seconds histogram +service_controller_update_loadbalancer_host_latency_seconds_bucket{le="1"} 0 +service_controller_update_loadbalancer_host_latency_seconds_bucket{le="2"} 0 +service_controller_update_loadbalancer_host_latency_seconds_bucket{le="4"} 0 +service_controller_update_loadbalancer_host_latency_seconds_bucket{le="8"} 0 +service_controller_update_loadbalancer_host_latency_seconds_bucket{le="16"} 0 +service_controller_update_loadbalancer_host_latency_seconds_bucket{le="32"} 0 +service_controller_update_loadbalancer_host_latency_seconds_bucket{le="64"} 0 +service_controller_update_loadbalancer_host_latency_seconds_bucket{le="128"} 0 +service_controller_update_loadbalancer_host_latency_seconds_bucket{le="256"} 0 +service_controller_update_loadbalancer_host_latency_seconds_bucket{le="512"} 0 +service_controller_update_loadbalancer_host_latency_seconds_bucket{le="1024"} 0 +service_controller_update_loadbalancer_host_latency_seconds_bucket{le="2048"} 0 +service_controller_update_loadbalancer_host_latency_seconds_bucket{le="4096"} 0 +service_controller_update_loadbalancer_host_latency_seconds_bucket{le="8192"} 0 +service_controller_update_loadbalancer_host_latency_seconds_bucket{le="16384"} 0 +service_controller_update_loadbalancer_host_latency_seconds_bucket{le="+Inf"} 0 +service_controller_update_loadbalancer_host_latency_seconds_sum 0 +service_controller_update_loadbalancer_host_latency_seconds_count 0 +# HELP taint_eviction_controller_pod_deletion_duration_seconds [ALPHA] Latency, in seconds, between the time when a taint effect has been activated for the Pod and its deletion via TaintEvictionController. +# TYPE taint_eviction_controller_pod_deletion_duration_seconds histogram +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="0.005"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="0.025"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="0.1"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="0.5"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="1"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="2.5"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="10"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="30"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="60"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="120"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="180"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="240"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="+Inf"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_sum 0 +taint_eviction_controller_pod_deletion_duration_seconds_count 0 +# HELP taint_eviction_controller_pod_deletions_total [ALPHA] Total number of Pods deleted by TaintEvictionController since its start. +# TYPE taint_eviction_controller_pod_deletions_total counter +taint_eviction_controller_pod_deletions_total 0 +# HELP ttl_after_finished_controller_job_deletion_duration_seconds [ALPHA] The time it took to delete the job since it became eligible for deletion +# TYPE ttl_after_finished_controller_job_deletion_duration_seconds histogram +ttl_after_finished_controller_job_deletion_duration_seconds_bucket{le="0.1"} 0 +ttl_after_finished_controller_job_deletion_duration_seconds_bucket{le="0.2"} 0 +ttl_after_finished_controller_job_deletion_duration_seconds_bucket{le="0.4"} 0 +ttl_after_finished_controller_job_deletion_duration_seconds_bucket{le="0.8"} 0 +ttl_after_finished_controller_job_deletion_duration_seconds_bucket{le="1.6"} 0 +ttl_after_finished_controller_job_deletion_duration_seconds_bucket{le="3.2"} 0 +ttl_after_finished_controller_job_deletion_duration_seconds_bucket{le="6.4"} 0 +ttl_after_finished_controller_job_deletion_duration_seconds_bucket{le="12.8"} 0 +ttl_after_finished_controller_job_deletion_duration_seconds_bucket{le="25.6"} 0 +ttl_after_finished_controller_job_deletion_duration_seconds_bucket{le="51.2"} 0 +ttl_after_finished_controller_job_deletion_duration_seconds_bucket{le="102.4"} 0 +ttl_after_finished_controller_job_deletion_duration_seconds_bucket{le="204.8"} 0 +ttl_after_finished_controller_job_deletion_duration_seconds_bucket{le="409.6"} 0 +ttl_after_finished_controller_job_deletion_duration_seconds_bucket{le="819.2"} 0 +ttl_after_finished_controller_job_deletion_duration_seconds_bucket{le="+Inf"} 0 +ttl_after_finished_controller_job_deletion_duration_seconds_sum 0 +ttl_after_finished_controller_job_deletion_duration_seconds_count 0 +# HELP workqueue_adds_total [ALPHA] Total number of adds handled by workqueue +# TYPE workqueue_adds_total counter +workqueue_adds_total{name="ClusterRoleAggregator"} 19 +workqueue_adds_total{name="DynamicCABundle-client-ca-bundle"} 2 +workqueue_adds_total{name="DynamicCABundle-csr-controller"} 8 +workqueue_adds_total{name="DynamicCABundle-request-header"} 2 +workqueue_adds_total{name="DynamicServingCertificateController"} 38 +workqueue_adds_total{name="bootstrap_signer_queue"} 2 +workqueue_adds_total{name="certificate"} 0 +workqueue_adds_total{name="claims"} 0 +workqueue_adds_total{name="cronjob"} 0 +workqueue_adds_total{name="daemonset"} 17 +workqueue_adds_total{name="deployment"} 20 +workqueue_adds_total{name="disruption"} 0 +workqueue_adds_total{name="disruption_recheck"} 0 +workqueue_adds_total{name="endpoint"} 6 +workqueue_adds_total{name="endpoint_slice"} 7 +workqueue_adds_total{name="endpoint_slice_mirroring"} 4 +workqueue_adds_total{name="ephemeral_volume"} 0 +workqueue_adds_total{name="garbage_collector_attempt_to_delete"} 1 +workqueue_adds_total{name="garbage_collector_attempt_to_orphan"} 0 +workqueue_adds_total{name="garbage_collector_graph_changes"} 2979 +workqueue_adds_total{name="horizontalpodautoscaler"} 0 +workqueue_adds_total{name="job"} 0 +workqueue_adds_total{name="job_orphan_pod"} 0 +workqueue_adds_total{name="namespace"} 0 +workqueue_adds_total{name="node"} 1 +workqueue_adds_total{name="node_lifecycle_controller"} 12 +workqueue_adds_total{name="node_lifecycle_controller_pods"} 10 +workqueue_adds_total{name="noexec_taint_node"} 1 +workqueue_adds_total{name="noexec_taint_pod"} 16 +workqueue_adds_total{name="orphaned_pods_nodes"} 0 +workqueue_adds_total{name="pvcprotection"} 0 +workqueue_adds_total{name="pvcs"} 0 +workqueue_adds_total{name="pvprotection"} 0 +workqueue_adds_total{name="replicaset"} 18 +workqueue_adds_total{name="replicationmanager"} 0 +workqueue_adds_total{name="resource_quota_controller_resource_changes"} 0 +workqueue_adds_total{name="resourcequota_primary"} 0 +workqueue_adds_total{name="resourcequota_priority"} 0 +workqueue_adds_total{name="root_ca_cert_publisher"} 5 +workqueue_adds_total{name="service"} 0 +workqueue_adds_total{name="serviceaccount"} 5 +workqueue_adds_total{name="serviceaccount_tokens_secret"} 0 +workqueue_adds_total{name="serviceaccount_tokens_service"} 43 +workqueue_adds_total{name="stale_pod_disruption"} 0 +workqueue_adds_total{name="statefulset"} 0 +workqueue_adds_total{name="token_cleaner"} 1 +workqueue_adds_total{name="ttl_jobs_to_delete"} 0 +workqueue_adds_total{name="ttlcontroller"} 12 +workqueue_adds_total{name="volume_expand"} 0 +workqueue_adds_total{name="volumes"} 0 +# HELP workqueue_depth [ALPHA] Current depth of workqueue +# TYPE workqueue_depth gauge +workqueue_depth{name="ClusterRoleAggregator"} 0 +workqueue_depth{name="DynamicCABundle-client-ca-bundle"} 0 +workqueue_depth{name="DynamicCABundle-csr-controller"} 0 +workqueue_depth{name="DynamicCABundle-request-header"} 0 +workqueue_depth{name="DynamicServingCertificateController"} 0 +workqueue_depth{name="bootstrap_signer_queue"} 0 +workqueue_depth{name="certificate"} 0 +workqueue_depth{name="claims"} 0 +workqueue_depth{name="cronjob"} 0 +workqueue_depth{name="daemonset"} 0 +workqueue_depth{name="deployment"} 0 +workqueue_depth{name="disruption"} 0 +workqueue_depth{name="disruption_recheck"} 0 +workqueue_depth{name="endpoint"} 0 +workqueue_depth{name="endpoint_slice"} 0 +workqueue_depth{name="endpoint_slice_mirroring"} 0 +workqueue_depth{name="ephemeral_volume"} 0 +workqueue_depth{name="garbage_collector_attempt_to_delete"} 0 +workqueue_depth{name="garbage_collector_attempt_to_orphan"} 0 +workqueue_depth{name="garbage_collector_graph_changes"} 0 +workqueue_depth{name="horizontalpodautoscaler"} 0 +workqueue_depth{name="job"} 0 +workqueue_depth{name="job_orphan_pod"} 0 +workqueue_depth{name="namespace"} 0 +workqueue_depth{name="node"} 1 +workqueue_depth{name="node_lifecycle_controller"} 0 +workqueue_depth{name="node_lifecycle_controller_pods"} 0 +workqueue_depth{name="noexec_taint_node"} 0 +workqueue_depth{name="noexec_taint_pod"} 0 +workqueue_depth{name="orphaned_pods_nodes"} 0 +workqueue_depth{name="pvcprotection"} 0 +workqueue_depth{name="pvcs"} 0 +workqueue_depth{name="pvprotection"} 0 +workqueue_depth{name="replicaset"} 0 +workqueue_depth{name="replicationmanager"} 0 +workqueue_depth{name="resource_quota_controller_resource_changes"} 0 +workqueue_depth{name="resourcequota_primary"} 0 +workqueue_depth{name="resourcequota_priority"} 0 +workqueue_depth{name="root_ca_cert_publisher"} 0 +workqueue_depth{name="service"} 0 +workqueue_depth{name="serviceaccount"} 0 +workqueue_depth{name="serviceaccount_tokens_secret"} 0 +workqueue_depth{name="serviceaccount_tokens_service"} 0 +workqueue_depth{name="stale_pod_disruption"} 0 +workqueue_depth{name="statefulset"} 0 +workqueue_depth{name="token_cleaner"} 0 +workqueue_depth{name="ttl_jobs_to_delete"} 0 +workqueue_depth{name="ttlcontroller"} 0 +workqueue_depth{name="volume_expand"} 0 +workqueue_depth{name="volumes"} 0 +# HELP workqueue_longest_running_processor_seconds [ALPHA] How many seconds has the longest running processor for workqueue been running. +# TYPE workqueue_longest_running_processor_seconds gauge +workqueue_longest_running_processor_seconds{name="ClusterRoleAggregator"} 0 +workqueue_longest_running_processor_seconds{name="DynamicCABundle-client-ca-bundle"} 0 +workqueue_longest_running_processor_seconds{name="DynamicCABundle-csr-controller"} 0 +workqueue_longest_running_processor_seconds{name="DynamicCABundle-request-header"} 0 +workqueue_longest_running_processor_seconds{name="DynamicServingCertificateController"} 0 +workqueue_longest_running_processor_seconds{name="bootstrap_signer_queue"} 0 +workqueue_longest_running_processor_seconds{name="certificate"} 0 +workqueue_longest_running_processor_seconds{name="claims"} 0 +workqueue_longest_running_processor_seconds{name="cronjob"} 0 +workqueue_longest_running_processor_seconds{name="daemonset"} 0 +workqueue_longest_running_processor_seconds{name="deployment"} 0 +workqueue_longest_running_processor_seconds{name="disruption"} 0 +workqueue_longest_running_processor_seconds{name="disruption_recheck"} 0 +workqueue_longest_running_processor_seconds{name="endpoint"} 0 +workqueue_longest_running_processor_seconds{name="endpoint_slice"} 0 +workqueue_longest_running_processor_seconds{name="endpoint_slice_mirroring"} 0 +workqueue_longest_running_processor_seconds{name="ephemeral_volume"} 0 +workqueue_longest_running_processor_seconds{name="garbage_collector_attempt_to_delete"} 0 +workqueue_longest_running_processor_seconds{name="garbage_collector_attempt_to_orphan"} 0 +workqueue_longest_running_processor_seconds{name="garbage_collector_graph_changes"} 0 +workqueue_longest_running_processor_seconds{name="horizontalpodautoscaler"} 0 +workqueue_longest_running_processor_seconds{name="job"} 0 +workqueue_longest_running_processor_seconds{name="job_orphan_pod"} 0 +workqueue_longest_running_processor_seconds{name="namespace"} 0 +workqueue_longest_running_processor_seconds{name="node"} 0 +workqueue_longest_running_processor_seconds{name="node_lifecycle_controller"} 0 +workqueue_longest_running_processor_seconds{name="node_lifecycle_controller_pods"} 0 +workqueue_longest_running_processor_seconds{name="noexec_taint_node"} 0 +workqueue_longest_running_processor_seconds{name="noexec_taint_pod"} 0 +workqueue_longest_running_processor_seconds{name="orphaned_pods_nodes"} 0 +workqueue_longest_running_processor_seconds{name="pvcprotection"} 0 +workqueue_longest_running_processor_seconds{name="pvcs"} 0 +workqueue_longest_running_processor_seconds{name="pvprotection"} 0 +workqueue_longest_running_processor_seconds{name="replicaset"} 0 +workqueue_longest_running_processor_seconds{name="replicationmanager"} 0 +workqueue_longest_running_processor_seconds{name="resource_quota_controller_resource_changes"} 0 +workqueue_longest_running_processor_seconds{name="resourcequota_primary"} 0 +workqueue_longest_running_processor_seconds{name="resourcequota_priority"} 0 +workqueue_longest_running_processor_seconds{name="root_ca_cert_publisher"} 0 +workqueue_longest_running_processor_seconds{name="service"} 0 +workqueue_longest_running_processor_seconds{name="serviceaccount"} 0 +workqueue_longest_running_processor_seconds{name="serviceaccount_tokens_secret"} 0 +workqueue_longest_running_processor_seconds{name="serviceaccount_tokens_service"} 0 +workqueue_longest_running_processor_seconds{name="stale_pod_disruption"} 0 +workqueue_longest_running_processor_seconds{name="statefulset"} 0 +workqueue_longest_running_processor_seconds{name="token_cleaner"} 0 +workqueue_longest_running_processor_seconds{name="ttl_jobs_to_delete"} 0 +workqueue_longest_running_processor_seconds{name="ttlcontroller"} 0 +workqueue_longest_running_processor_seconds{name="volume_expand"} 0 +workqueue_longest_running_processor_seconds{name="volumes"} 0 +# HELP workqueue_queue_duration_seconds [ALPHA] How long in seconds an item stays in workqueue before being requested. +# TYPE workqueue_queue_duration_seconds histogram +workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="9.999999999999999e-06"} 1 +workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="9.999999999999999e-05"} 7 +workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="0.001"} 11 +workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="0.01"} 12 +workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="0.1"} 19 +workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="1"} 19 +workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="10"} 19 +workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="+Inf"} 19 +workqueue_queue_duration_seconds_sum{name="ClusterRoleAggregator"} 0.29662182300000006 +workqueue_queue_duration_seconds_count{name="ClusterRoleAggregator"} 19 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="9.999999999999999e-05"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="0.001"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="0.01"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="0.1"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="1"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="10"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="+Inf"} 2 +workqueue_queue_duration_seconds_sum{name="DynamicCABundle-client-ca-bundle"} 0.00010036499999999999 +workqueue_queue_duration_seconds_count{name="DynamicCABundle-client-ca-bundle"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="9.999999999999999e-05"} 4 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="0.001"} 5 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="0.01"} 8 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="0.1"} 8 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="1"} 8 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="10"} 8 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="+Inf"} 8 +workqueue_queue_duration_seconds_sum{name="DynamicCABundle-csr-controller"} 0.005034077 +workqueue_queue_duration_seconds_count{name="DynamicCABundle-csr-controller"} 8 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="9.999999999999999e-05"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="0.001"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="0.01"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="0.1"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="1"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="10"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="+Inf"} 2 +workqueue_queue_duration_seconds_sum{name="DynamicCABundle-request-header"} 0.00012161999999999999 +workqueue_queue_duration_seconds_count{name="DynamicCABundle-request-header"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="9.999999999999999e-05"} 37 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.001"} 38 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.01"} 38 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.1"} 38 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="1"} 38 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="10"} 38 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="+Inf"} 38 +workqueue_queue_duration_seconds_sum{name="DynamicServingCertificateController"} 0.0008584600000000002 +workqueue_queue_duration_seconds_count{name="DynamicServingCertificateController"} 38 +workqueue_queue_duration_seconds_bucket{name="bootstrap_signer_queue",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="bootstrap_signer_queue",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="bootstrap_signer_queue",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="bootstrap_signer_queue",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="bootstrap_signer_queue",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="bootstrap_signer_queue",le="0.001"} 1 +workqueue_queue_duration_seconds_bucket{name="bootstrap_signer_queue",le="0.01"} 1 +workqueue_queue_duration_seconds_bucket{name="bootstrap_signer_queue",le="0.1"} 1 +workqueue_queue_duration_seconds_bucket{name="bootstrap_signer_queue",le="1"} 1 +workqueue_queue_duration_seconds_bucket{name="bootstrap_signer_queue",le="10"} 1 +workqueue_queue_duration_seconds_bucket{name="bootstrap_signer_queue",le="+Inf"} 2 +workqueue_queue_duration_seconds_sum{name="bootstrap_signer_queue"} 11.600700546 +workqueue_queue_duration_seconds_count{name="bootstrap_signer_queue"} 2 +workqueue_queue_duration_seconds_bucket{name="certificate",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="certificate",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="certificate",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="certificate",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="certificate",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="certificate",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="certificate",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="certificate",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="certificate",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="certificate",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="certificate",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="certificate"} 0 +workqueue_queue_duration_seconds_count{name="certificate"} 0 +workqueue_queue_duration_seconds_bucket{name="claims",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="claims",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="claims",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="claims",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="claims",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="claims",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="claims",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="claims",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="claims",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="claims",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="claims",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="claims"} 0 +workqueue_queue_duration_seconds_count{name="claims"} 0 +workqueue_queue_duration_seconds_bucket{name="cronjob",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="cronjob",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="cronjob",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="cronjob",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="cronjob",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="cronjob",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="cronjob",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="cronjob",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="cronjob",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="cronjob",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="cronjob",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="cronjob"} 0 +workqueue_queue_duration_seconds_count{name="cronjob"} 0 +workqueue_queue_duration_seconds_bucket{name="daemonset",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="daemonset",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="daemonset",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="daemonset",le="9.999999999999999e-06"} 8 +workqueue_queue_duration_seconds_bucket{name="daemonset",le="9.999999999999999e-05"} 8 +workqueue_queue_duration_seconds_bucket{name="daemonset",le="0.001"} 12 +workqueue_queue_duration_seconds_bucket{name="daemonset",le="0.01"} 12 +workqueue_queue_duration_seconds_bucket{name="daemonset",le="0.1"} 17 +workqueue_queue_duration_seconds_bucket{name="daemonset",le="1"} 17 +workqueue_queue_duration_seconds_bucket{name="daemonset",le="10"} 17 +workqueue_queue_duration_seconds_bucket{name="daemonset",le="+Inf"} 17 +workqueue_queue_duration_seconds_sum{name="daemonset"} 0.22901414399999995 +workqueue_queue_duration_seconds_count{name="daemonset"} 17 +workqueue_queue_duration_seconds_bucket{name="deployment",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="deployment",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="deployment",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="deployment",le="9.999999999999999e-06"} 5 +workqueue_queue_duration_seconds_bucket{name="deployment",le="9.999999999999999e-05"} 12 +workqueue_queue_duration_seconds_bucket{name="deployment",le="0.001"} 14 +workqueue_queue_duration_seconds_bucket{name="deployment",le="0.01"} 14 +workqueue_queue_duration_seconds_bucket{name="deployment",le="0.1"} 20 +workqueue_queue_duration_seconds_bucket{name="deployment",le="1"} 20 +workqueue_queue_duration_seconds_bucket{name="deployment",le="10"} 20 +workqueue_queue_duration_seconds_bucket{name="deployment",le="+Inf"} 20 +workqueue_queue_duration_seconds_sum{name="deployment"} 0.160956561 +workqueue_queue_duration_seconds_count{name="deployment"} 20 +workqueue_queue_duration_seconds_bucket{name="disruption",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="disruption",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="disruption",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="disruption",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="disruption",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="disruption",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="disruption",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="disruption",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="disruption",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="disruption",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="disruption",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="disruption"} 0 +workqueue_queue_duration_seconds_count{name="disruption"} 0 +workqueue_queue_duration_seconds_bucket{name="disruption_recheck",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="disruption_recheck",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="disruption_recheck",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="disruption_recheck",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="disruption_recheck",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="disruption_recheck",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="disruption_recheck",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="disruption_recheck",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="disruption_recheck",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="disruption_recheck",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="disruption_recheck",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="disruption_recheck"} 0 +workqueue_queue_duration_seconds_count{name="disruption_recheck"} 0 +workqueue_queue_duration_seconds_bucket{name="endpoint",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="endpoint",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="endpoint",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="endpoint",le="9.999999999999999e-06"} 4 +workqueue_queue_duration_seconds_bucket{name="endpoint",le="9.999999999999999e-05"} 4 +workqueue_queue_duration_seconds_bucket{name="endpoint",le="0.001"} 4 +workqueue_queue_duration_seconds_bucket{name="endpoint",le="0.01"} 4 +workqueue_queue_duration_seconds_bucket{name="endpoint",le="0.1"} 6 +workqueue_queue_duration_seconds_bucket{name="endpoint",le="1"} 6 +workqueue_queue_duration_seconds_bucket{name="endpoint",le="10"} 6 +workqueue_queue_duration_seconds_bucket{name="endpoint",le="+Inf"} 6 +workqueue_queue_duration_seconds_sum{name="endpoint"} 0.16378205300000004 +workqueue_queue_duration_seconds_count{name="endpoint"} 6 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="9.999999999999999e-06"} 4 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="9.999999999999999e-05"} 5 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="0.001"} 5 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="0.01"} 5 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="0.1"} 5 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="1"} 7 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="10"} 7 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="+Inf"} 7 +workqueue_queue_duration_seconds_sum{name="endpoint_slice"} 0.359466593 +workqueue_queue_duration_seconds_count{name="endpoint_slice"} 7 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="9.999999999999999e-06"} 2 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="9.999999999999999e-05"} 3 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="0.001"} 3 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="0.01"} 3 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="0.1"} 3 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="1"} 4 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="10"} 4 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="+Inf"} 4 +workqueue_queue_duration_seconds_sum{name="endpoint_slice_mirroring"} 0.100958319 +workqueue_queue_duration_seconds_count{name="endpoint_slice_mirroring"} 4 +workqueue_queue_duration_seconds_bucket{name="ephemeral_volume",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="ephemeral_volume",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="ephemeral_volume",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="ephemeral_volume",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="ephemeral_volume",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="ephemeral_volume",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="ephemeral_volume",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="ephemeral_volume",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="ephemeral_volume",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="ephemeral_volume",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="ephemeral_volume",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="ephemeral_volume"} 0 +workqueue_queue_duration_seconds_count{name="ephemeral_volume"} 0 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="1"} 1 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="10"} 1 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="+Inf"} 1 +workqueue_queue_duration_seconds_sum{name="garbage_collector_attempt_to_delete"} 0.636190164 +workqueue_queue_duration_seconds_count{name="garbage_collector_attempt_to_delete"} 1 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="garbage_collector_attempt_to_orphan"} 0 +workqueue_queue_duration_seconds_count{name="garbage_collector_attempt_to_orphan"} 0 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="9.999999999999999e-06"} 2629 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="9.999999999999999e-05"} 2927 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="0.001"} 2979 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="0.01"} 2979 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="0.1"} 2979 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="1"} 2979 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="10"} 2979 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="+Inf"} 2979 +workqueue_queue_duration_seconds_sum{name="garbage_collector_graph_changes"} 0.032251662000000084 +workqueue_queue_duration_seconds_count{name="garbage_collector_graph_changes"} 2979 +workqueue_queue_duration_seconds_bucket{name="horizontalpodautoscaler",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="horizontalpodautoscaler",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="horizontalpodautoscaler",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="horizontalpodautoscaler",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="horizontalpodautoscaler",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="horizontalpodautoscaler",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="horizontalpodautoscaler",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="horizontalpodautoscaler",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="horizontalpodautoscaler",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="horizontalpodautoscaler",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="horizontalpodautoscaler",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="horizontalpodautoscaler"} 0 +workqueue_queue_duration_seconds_count{name="horizontalpodautoscaler"} 0 +workqueue_queue_duration_seconds_bucket{name="job",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="job",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="job",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="job",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="job",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="job",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="job",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="job",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="job",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="job",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="job",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="job"} 0 +workqueue_queue_duration_seconds_count{name="job"} 0 +workqueue_queue_duration_seconds_bucket{name="job_orphan_pod",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="job_orphan_pod",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="job_orphan_pod",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="job_orphan_pod",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="job_orphan_pod",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="job_orphan_pod",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="job_orphan_pod",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="job_orphan_pod",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="job_orphan_pod",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="job_orphan_pod",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="job_orphan_pod",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="job_orphan_pod"} 0 +workqueue_queue_duration_seconds_count{name="job_orphan_pod"} 0 +workqueue_queue_duration_seconds_bucket{name="namespace",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="namespace",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="namespace",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="namespace",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="namespace",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="namespace",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="namespace",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="namespace",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="namespace",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="namespace",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="namespace",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="namespace"} 0 +workqueue_queue_duration_seconds_count{name="namespace"} 0 +workqueue_queue_duration_seconds_bucket{name="node",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="node",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="node",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="node",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="node",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="node",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="node",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="node",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="node",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="node",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="node",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="node"} 0 +workqueue_queue_duration_seconds_count{name="node"} 0 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="9.999999999999999e-06"} 9 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="9.999999999999999e-05"} 10 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="0.001"} 11 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="0.01"} 11 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="0.1"} 12 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="1"} 12 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="10"} 12 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="+Inf"} 12 +workqueue_queue_duration_seconds_sum{name="node_lifecycle_controller"} 0.09350184499999999 +workqueue_queue_duration_seconds_count{name="node_lifecycle_controller"} 12 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="9.999999999999999e-06"} 4 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="9.999999999999999e-05"} 6 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="0.001"} 6 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="0.01"} 6 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="0.1"} 10 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="1"} 10 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="10"} 10 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="+Inf"} 10 +workqueue_queue_duration_seconds_sum{name="node_lifecycle_controller_pods"} 0.374331337 +workqueue_queue_duration_seconds_count{name="node_lifecycle_controller_pods"} 10 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_node",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_node",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_node",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_node",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_node",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_node",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_node",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_node",le="0.1"} 1 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_node",le="1"} 1 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_node",le="10"} 1 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_node",le="+Inf"} 1 +workqueue_queue_duration_seconds_sum{name="noexec_taint_node"} 0.068335756 +workqueue_queue_duration_seconds_count{name="noexec_taint_node"} 1 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="9.999999999999999e-06"} 9 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="9.999999999999999e-05"} 12 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="0.001"} 12 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="0.01"} 12 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="0.1"} 16 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="1"} 16 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="10"} 16 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="+Inf"} 16 +workqueue_queue_duration_seconds_sum{name="noexec_taint_pod"} 0.275766187 +workqueue_queue_duration_seconds_count{name="noexec_taint_pod"} 16 +workqueue_queue_duration_seconds_bucket{name="orphaned_pods_nodes",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="orphaned_pods_nodes",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="orphaned_pods_nodes",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="orphaned_pods_nodes",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="orphaned_pods_nodes",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="orphaned_pods_nodes",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="orphaned_pods_nodes",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="orphaned_pods_nodes",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="orphaned_pods_nodes",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="orphaned_pods_nodes",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="orphaned_pods_nodes",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="orphaned_pods_nodes"} 0 +workqueue_queue_duration_seconds_count{name="orphaned_pods_nodes"} 0 +workqueue_queue_duration_seconds_bucket{name="pvcprotection",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="pvcprotection",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="pvcprotection",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="pvcprotection",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="pvcprotection",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="pvcprotection",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="pvcprotection",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="pvcprotection",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="pvcprotection",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="pvcprotection",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="pvcprotection",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="pvcprotection"} 0 +workqueue_queue_duration_seconds_count{name="pvcprotection"} 0 +workqueue_queue_duration_seconds_bucket{name="pvcs",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="pvcs",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="pvcs",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="pvcs",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="pvcs",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="pvcs",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="pvcs",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="pvcs",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="pvcs",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="pvcs",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="pvcs",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="pvcs"} 0 +workqueue_queue_duration_seconds_count{name="pvcs"} 0 +workqueue_queue_duration_seconds_bucket{name="pvprotection",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="pvprotection",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="pvprotection",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="pvprotection",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="pvprotection",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="pvprotection",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="pvprotection",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="pvprotection",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="pvprotection",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="pvprotection",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="pvprotection",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="pvprotection"} 0 +workqueue_queue_duration_seconds_count{name="pvprotection"} 0 +workqueue_queue_duration_seconds_bucket{name="replicaset",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="replicaset",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="replicaset",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="replicaset",le="9.999999999999999e-06"} 9 +workqueue_queue_duration_seconds_bucket{name="replicaset",le="9.999999999999999e-05"} 12 +workqueue_queue_duration_seconds_bucket{name="replicaset",le="0.001"} 14 +workqueue_queue_duration_seconds_bucket{name="replicaset",le="0.01"} 14 +workqueue_queue_duration_seconds_bucket{name="replicaset",le="0.1"} 18 +workqueue_queue_duration_seconds_bucket{name="replicaset",le="1"} 18 +workqueue_queue_duration_seconds_bucket{name="replicaset",le="10"} 18 +workqueue_queue_duration_seconds_bucket{name="replicaset",le="+Inf"} 18 +workqueue_queue_duration_seconds_sum{name="replicaset"} 0.16475134600000005 +workqueue_queue_duration_seconds_count{name="replicaset"} 18 +workqueue_queue_duration_seconds_bucket{name="replicationmanager",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="replicationmanager",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="replicationmanager",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="replicationmanager",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="replicationmanager",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="replicationmanager",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="replicationmanager",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="replicationmanager",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="replicationmanager",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="replicationmanager",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="replicationmanager",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="replicationmanager"} 0 +workqueue_queue_duration_seconds_count{name="replicationmanager"} 0 +workqueue_queue_duration_seconds_bucket{name="resource_quota_controller_resource_changes",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="resource_quota_controller_resource_changes",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="resource_quota_controller_resource_changes",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="resource_quota_controller_resource_changes",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="resource_quota_controller_resource_changes",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="resource_quota_controller_resource_changes",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="resource_quota_controller_resource_changes",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="resource_quota_controller_resource_changes",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="resource_quota_controller_resource_changes",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="resource_quota_controller_resource_changes",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="resource_quota_controller_resource_changes",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="resource_quota_controller_resource_changes"} 0 +workqueue_queue_duration_seconds_count{name="resource_quota_controller_resource_changes"} 0 +workqueue_queue_duration_seconds_bucket{name="resourcequota_primary",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="resourcequota_primary",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="resourcequota_primary",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="resourcequota_primary",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="resourcequota_primary",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="resourcequota_primary",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="resourcequota_primary",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="resourcequota_primary",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="resourcequota_primary",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="resourcequota_primary",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="resourcequota_primary",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="resourcequota_primary"} 0 +workqueue_queue_duration_seconds_count{name="resourcequota_primary"} 0 +workqueue_queue_duration_seconds_bucket{name="resourcequota_priority",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="resourcequota_priority",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="resourcequota_priority",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="resourcequota_priority",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="resourcequota_priority",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="resourcequota_priority",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="resourcequota_priority",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="resourcequota_priority",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="resourcequota_priority",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="resourcequota_priority",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="resourcequota_priority",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="resourcequota_priority"} 0 +workqueue_queue_duration_seconds_count{name="resourcequota_priority"} 0 +workqueue_queue_duration_seconds_bucket{name="root_ca_cert_publisher",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="root_ca_cert_publisher",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="root_ca_cert_publisher",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="root_ca_cert_publisher",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="root_ca_cert_publisher",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="root_ca_cert_publisher",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="root_ca_cert_publisher",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="root_ca_cert_publisher",le="0.1"} 1 +workqueue_queue_duration_seconds_bucket{name="root_ca_cert_publisher",le="1"} 5 +workqueue_queue_duration_seconds_bucket{name="root_ca_cert_publisher",le="10"} 5 +workqueue_queue_duration_seconds_bucket{name="root_ca_cert_publisher",le="+Inf"} 5 +workqueue_queue_duration_seconds_sum{name="root_ca_cert_publisher"} 2.900853618 +workqueue_queue_duration_seconds_count{name="root_ca_cert_publisher"} 5 +workqueue_queue_duration_seconds_bucket{name="service",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="service",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="service",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="service",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="service",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="service",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="service",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="service",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="service",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="service",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="service",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="service"} 0 +workqueue_queue_duration_seconds_count{name="service"} 0 +workqueue_queue_duration_seconds_bucket{name="serviceaccount",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="serviceaccount",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="serviceaccount",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="serviceaccount",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="serviceaccount",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="serviceaccount",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="serviceaccount",le="0.01"} 1 +workqueue_queue_duration_seconds_bucket{name="serviceaccount",le="0.1"} 1 +workqueue_queue_duration_seconds_bucket{name="serviceaccount",le="1"} 5 +workqueue_queue_duration_seconds_bucket{name="serviceaccount",le="10"} 5 +workqueue_queue_duration_seconds_bucket{name="serviceaccount",le="+Inf"} 5 +workqueue_queue_duration_seconds_sum{name="serviceaccount"} 0.876180307 +workqueue_queue_duration_seconds_count{name="serviceaccount"} 5 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="serviceaccount_tokens_secret"} 0 +workqueue_queue_duration_seconds_count{name="serviceaccount_tokens_secret"} 0 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="9.999999999999999e-06"} 28 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="9.999999999999999e-05"} 39 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="0.001"} 39 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="0.01"} 40 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="0.1"} 43 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="1"} 43 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="10"} 43 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="+Inf"} 43 +workqueue_queue_duration_seconds_sum{name="serviceaccount_tokens_service"} 0.14209613899999995 +workqueue_queue_duration_seconds_count{name="serviceaccount_tokens_service"} 43 +workqueue_queue_duration_seconds_bucket{name="stale_pod_disruption",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="stale_pod_disruption",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="stale_pod_disruption",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="stale_pod_disruption",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="stale_pod_disruption",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="stale_pod_disruption",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="stale_pod_disruption",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="stale_pod_disruption",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="stale_pod_disruption",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="stale_pod_disruption",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="stale_pod_disruption",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="stale_pod_disruption"} 0 +workqueue_queue_duration_seconds_count{name="stale_pod_disruption"} 0 +workqueue_queue_duration_seconds_bucket{name="statefulset",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="statefulset",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="statefulset",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="statefulset",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="statefulset",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="statefulset",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="statefulset",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="statefulset",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="statefulset",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="statefulset",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="statefulset",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="statefulset"} 0 +workqueue_queue_duration_seconds_count{name="statefulset"} 0 +workqueue_queue_duration_seconds_bucket{name="token_cleaner",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="token_cleaner",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="token_cleaner",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="token_cleaner",le="9.999999999999999e-06"} 1 +workqueue_queue_duration_seconds_bucket{name="token_cleaner",le="9.999999999999999e-05"} 1 +workqueue_queue_duration_seconds_bucket{name="token_cleaner",le="0.001"} 1 +workqueue_queue_duration_seconds_bucket{name="token_cleaner",le="0.01"} 1 +workqueue_queue_duration_seconds_bucket{name="token_cleaner",le="0.1"} 1 +workqueue_queue_duration_seconds_bucket{name="token_cleaner",le="1"} 1 +workqueue_queue_duration_seconds_bucket{name="token_cleaner",le="10"} 1 +workqueue_queue_duration_seconds_bucket{name="token_cleaner",le="+Inf"} 1 +workqueue_queue_duration_seconds_sum{name="token_cleaner"} 6.897e-06 +workqueue_queue_duration_seconds_count{name="token_cleaner"} 1 +workqueue_queue_duration_seconds_bucket{name="ttl_jobs_to_delete",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="ttl_jobs_to_delete",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="ttl_jobs_to_delete",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="ttl_jobs_to_delete",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="ttl_jobs_to_delete",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="ttl_jobs_to_delete",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="ttl_jobs_to_delete",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="ttl_jobs_to_delete",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="ttl_jobs_to_delete",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="ttl_jobs_to_delete",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="ttl_jobs_to_delete",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="ttl_jobs_to_delete"} 0 +workqueue_queue_duration_seconds_count{name="ttl_jobs_to_delete"} 0 +workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="9.999999999999999e-06"} 9 +workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="9.999999999999999e-05"} 10 +workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="0.001"} 10 +workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="0.01"} 10 +workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="0.1"} 11 +workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="1"} 12 +workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="10"} 12 +workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="+Inf"} 12 +workqueue_queue_duration_seconds_sum{name="ttlcontroller"} 0.776090998 +workqueue_queue_duration_seconds_count{name="ttlcontroller"} 12 +workqueue_queue_duration_seconds_bucket{name="volume_expand",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="volume_expand",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="volume_expand",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="volume_expand",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="volume_expand",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="volume_expand",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="volume_expand",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="volume_expand",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="volume_expand",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="volume_expand",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="volume_expand",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="volume_expand"} 0 +workqueue_queue_duration_seconds_count{name="volume_expand"} 0 +workqueue_queue_duration_seconds_bucket{name="volumes",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="volumes",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="volumes",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="volumes",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="volumes",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="volumes",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="volumes",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="volumes",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="volumes",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="volumes",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="volumes",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="volumes"} 0 +workqueue_queue_duration_seconds_count{name="volumes"} 0 +# HELP workqueue_retries_total [ALPHA] Total number of retries handled by workqueue +# TYPE workqueue_retries_total counter +workqueue_retries_total{name="ClusterRoleAggregator"} 0 +workqueue_retries_total{name="DynamicCABundle-client-ca-bundle"} 0 +workqueue_retries_total{name="DynamicCABundle-csr-controller"} 0 +workqueue_retries_total{name="DynamicCABundle-request-header"} 0 +workqueue_retries_total{name="DynamicServingCertificateController"} 0 +workqueue_retries_total{name="bootstrap_signer_queue"} 0 +workqueue_retries_total{name="certificate"} 0 +workqueue_retries_total{name="cronjob"} 0 +workqueue_retries_total{name="daemonset"} 0 +workqueue_retries_total{name="deployment"} 9 +workqueue_retries_total{name="disruption"} 0 +workqueue_retries_total{name="disruption_recheck"} 0 +workqueue_retries_total{name="endpoint"} 4 +workqueue_retries_total{name="endpoint_slice"} 6 +workqueue_retries_total{name="endpoint_slice_mirroring"} 0 +workqueue_retries_total{name="ephemeral_volume"} 0 +workqueue_retries_total{name="garbage_collector_attempt_to_delete"} 0 +workqueue_retries_total{name="garbage_collector_attempt_to_orphan"} 0 +workqueue_retries_total{name="garbage_collector_graph_changes"} 0 +workqueue_retries_total{name="horizontalpodautoscaler"} 0 +workqueue_retries_total{name="job"} 0 +workqueue_retries_total{name="job_orphan_pod"} 0 +workqueue_retries_total{name="namespace"} 0 +workqueue_retries_total{name="node"} 0 +workqueue_retries_total{name="node_lifecycle_controller_pods"} 0 +workqueue_retries_total{name="orphaned_pods_nodes"} 0 +workqueue_retries_total{name="pvcprotection"} 0 +workqueue_retries_total{name="pvcs"} 0 +workqueue_retries_total{name="pvprotection"} 0 +workqueue_retries_total{name="replicaset"} 0 +workqueue_retries_total{name="replicationmanager"} 0 +workqueue_retries_total{name="resource_quota_controller_resource_changes"} 0 +workqueue_retries_total{name="resourcequota_primary"} 0 +workqueue_retries_total{name="resourcequota_priority"} 0 +workqueue_retries_total{name="root_ca_cert_publisher"} 0 +workqueue_retries_total{name="service"} 0 +workqueue_retries_total{name="serviceaccount"} 0 +workqueue_retries_total{name="serviceaccount_tokens_secret"} 0 +workqueue_retries_total{name="serviceaccount_tokens_service"} 0 +workqueue_retries_total{name="stale_pod_disruption"} 0 +workqueue_retries_total{name="statefulset"} 0 +workqueue_retries_total{name="token_cleaner"} 1 +workqueue_retries_total{name="ttl_jobs_to_delete"} 0 +workqueue_retries_total{name="ttlcontroller"} 0 +workqueue_retries_total{name="volume_expand"} 0 +# HELP workqueue_unfinished_work_seconds [ALPHA] How many seconds of work has done that is in progress and hasn't been observed by work_duration. Large values indicate stuck threads. One can deduce the number of stuck threads by observing the rate at which this increases. +# TYPE workqueue_unfinished_work_seconds gauge +workqueue_unfinished_work_seconds{name="ClusterRoleAggregator"} 0 +workqueue_unfinished_work_seconds{name="DynamicCABundle-client-ca-bundle"} 0 +workqueue_unfinished_work_seconds{name="DynamicCABundle-csr-controller"} 0 +workqueue_unfinished_work_seconds{name="DynamicCABundle-request-header"} 0 +workqueue_unfinished_work_seconds{name="DynamicServingCertificateController"} 0 +workqueue_unfinished_work_seconds{name="bootstrap_signer_queue"} 0 +workqueue_unfinished_work_seconds{name="certificate"} 0 +workqueue_unfinished_work_seconds{name="claims"} 0 +workqueue_unfinished_work_seconds{name="cronjob"} 0 +workqueue_unfinished_work_seconds{name="daemonset"} 0 +workqueue_unfinished_work_seconds{name="deployment"} 0 +workqueue_unfinished_work_seconds{name="disruption"} 0 +workqueue_unfinished_work_seconds{name="disruption_recheck"} 0 +workqueue_unfinished_work_seconds{name="endpoint"} 0 +workqueue_unfinished_work_seconds{name="endpoint_slice"} 0 +workqueue_unfinished_work_seconds{name="endpoint_slice_mirroring"} 0 +workqueue_unfinished_work_seconds{name="ephemeral_volume"} 0 +workqueue_unfinished_work_seconds{name="garbage_collector_attempt_to_delete"} 0 +workqueue_unfinished_work_seconds{name="garbage_collector_attempt_to_orphan"} 0 +workqueue_unfinished_work_seconds{name="garbage_collector_graph_changes"} 0 +workqueue_unfinished_work_seconds{name="horizontalpodautoscaler"} 0 +workqueue_unfinished_work_seconds{name="job"} 0 +workqueue_unfinished_work_seconds{name="job_orphan_pod"} 0 +workqueue_unfinished_work_seconds{name="namespace"} 0 +workqueue_unfinished_work_seconds{name="node"} 0 +workqueue_unfinished_work_seconds{name="node_lifecycle_controller"} 0 +workqueue_unfinished_work_seconds{name="node_lifecycle_controller_pods"} 0 +workqueue_unfinished_work_seconds{name="noexec_taint_node"} 0 +workqueue_unfinished_work_seconds{name="noexec_taint_pod"} 0 +workqueue_unfinished_work_seconds{name="orphaned_pods_nodes"} 0 +workqueue_unfinished_work_seconds{name="pvcprotection"} 0 +workqueue_unfinished_work_seconds{name="pvcs"} 0 +workqueue_unfinished_work_seconds{name="pvprotection"} 0 +workqueue_unfinished_work_seconds{name="replicaset"} 0 +workqueue_unfinished_work_seconds{name="replicationmanager"} 0 +workqueue_unfinished_work_seconds{name="resource_quota_controller_resource_changes"} 0 +workqueue_unfinished_work_seconds{name="resourcequota_primary"} 0 +workqueue_unfinished_work_seconds{name="resourcequota_priority"} 0 +workqueue_unfinished_work_seconds{name="root_ca_cert_publisher"} 0 +workqueue_unfinished_work_seconds{name="service"} 0 +workqueue_unfinished_work_seconds{name="serviceaccount"} 0 +workqueue_unfinished_work_seconds{name="serviceaccount_tokens_secret"} 0 +workqueue_unfinished_work_seconds{name="serviceaccount_tokens_service"} 0 +workqueue_unfinished_work_seconds{name="stale_pod_disruption"} 0 +workqueue_unfinished_work_seconds{name="statefulset"} 0 +workqueue_unfinished_work_seconds{name="token_cleaner"} 0 +workqueue_unfinished_work_seconds{name="ttl_jobs_to_delete"} 0 +workqueue_unfinished_work_seconds{name="ttlcontroller"} 0 +workqueue_unfinished_work_seconds{name="volume_expand"} 0 +workqueue_unfinished_work_seconds{name="volumes"} 0 +# HELP workqueue_work_duration_seconds [ALPHA] How long in seconds processing an item from workqueue takes. +# TYPE workqueue_work_duration_seconds histogram +workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="9.999999999999999e-05"} 1 +workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="0.001"} 12 +workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="0.01"} 12 +workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="0.1"} 16 +workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="1"} 19 +workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="10"} 19 +workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="+Inf"} 19 +workqueue_work_duration_seconds_sum{name="ClusterRoleAggregator"} 2.4599846950000006 +workqueue_work_duration_seconds_count{name="ClusterRoleAggregator"} 19 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="9.999999999999999e-05"} 1 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="0.001"} 2 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="0.01"} 2 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="0.1"} 2 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="1"} 2 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="10"} 2 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="+Inf"} 2 +workqueue_work_duration_seconds_sum{name="DynamicCABundle-client-ca-bundle"} 0.000147692 +workqueue_work_duration_seconds_count{name="DynamicCABundle-client-ca-bundle"} 2 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="0.001"} 3 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="0.01"} 8 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="0.1"} 8 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="1"} 8 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="10"} 8 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="+Inf"} 8 +workqueue_work_duration_seconds_sum{name="DynamicCABundle-csr-controller"} 0.008532069 +workqueue_work_duration_seconds_count{name="DynamicCABundle-csr-controller"} 8 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="9.999999999999999e-05"} 1 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="0.001"} 2 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="0.01"} 2 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="0.1"} 2 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="1"} 2 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="10"} 2 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="+Inf"} 2 +workqueue_work_duration_seconds_sum{name="DynamicCABundle-request-header"} 0.000143932 +workqueue_work_duration_seconds_count{name="DynamicCABundle-request-header"} 2 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="9.999999999999999e-05"} 38 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.001"} 38 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.01"} 38 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.1"} 38 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="1"} 38 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="10"} 38 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="+Inf"} 38 +workqueue_work_duration_seconds_sum{name="DynamicServingCertificateController"} 0.0014076110000000002 +workqueue_work_duration_seconds_count{name="DynamicServingCertificateController"} 38 +workqueue_work_duration_seconds_bucket{name="bootstrap_signer_queue",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="bootstrap_signer_queue",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="bootstrap_signer_queue",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="bootstrap_signer_queue",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="bootstrap_signer_queue",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="bootstrap_signer_queue",le="0.001"} 1 +workqueue_work_duration_seconds_bucket{name="bootstrap_signer_queue",le="0.01"} 1 +workqueue_work_duration_seconds_bucket{name="bootstrap_signer_queue",le="0.1"} 1 +workqueue_work_duration_seconds_bucket{name="bootstrap_signer_queue",le="1"} 2 +workqueue_work_duration_seconds_bucket{name="bootstrap_signer_queue",le="10"} 2 +workqueue_work_duration_seconds_bucket{name="bootstrap_signer_queue",le="+Inf"} 2 +workqueue_work_duration_seconds_sum{name="bootstrap_signer_queue"} 0.562168307 +workqueue_work_duration_seconds_count{name="bootstrap_signer_queue"} 2 +workqueue_work_duration_seconds_bucket{name="certificate",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="certificate",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="certificate",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="certificate",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="certificate",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="certificate",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="certificate",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="certificate",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="certificate",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="certificate",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="certificate",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="certificate"} 0 +workqueue_work_duration_seconds_count{name="certificate"} 0 +workqueue_work_duration_seconds_bucket{name="claims",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="claims",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="claims",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="claims",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="claims",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="claims",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="claims",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="claims",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="claims",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="claims",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="claims",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="claims"} 0 +workqueue_work_duration_seconds_count{name="claims"} 0 +workqueue_work_duration_seconds_bucket{name="cronjob",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="cronjob",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="cronjob",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="cronjob",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="cronjob",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="cronjob",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="cronjob",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="cronjob",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="cronjob",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="cronjob",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="cronjob",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="cronjob"} 0 +workqueue_work_duration_seconds_count{name="cronjob"} 0 +workqueue_work_duration_seconds_bucket{name="daemonset",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="daemonset",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="daemonset",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="daemonset",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="daemonset",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="daemonset",le="0.001"} 9 +workqueue_work_duration_seconds_bucket{name="daemonset",le="0.01"} 11 +workqueue_work_duration_seconds_bucket{name="daemonset",le="0.1"} 15 +workqueue_work_duration_seconds_bucket{name="daemonset",le="1"} 17 +workqueue_work_duration_seconds_bucket{name="daemonset",le="10"} 17 +workqueue_work_duration_seconds_bucket{name="daemonset",le="+Inf"} 17 +workqueue_work_duration_seconds_sum{name="daemonset"} 1.9677030530000001 +workqueue_work_duration_seconds_count{name="daemonset"} 17 +workqueue_work_duration_seconds_bucket{name="deployment",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="deployment",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="deployment",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="deployment",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="deployment",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="deployment",le="0.001"} 10 +workqueue_work_duration_seconds_bucket{name="deployment",le="0.01"} 11 +workqueue_work_duration_seconds_bucket{name="deployment",le="0.1"} 18 +workqueue_work_duration_seconds_bucket{name="deployment",le="1"} 20 +workqueue_work_duration_seconds_bucket{name="deployment",le="10"} 20 +workqueue_work_duration_seconds_bucket{name="deployment",le="+Inf"} 20 +workqueue_work_duration_seconds_sum{name="deployment"} 1.680529541 +workqueue_work_duration_seconds_count{name="deployment"} 20 +workqueue_work_duration_seconds_bucket{name="disruption",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="disruption",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="disruption",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="disruption",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="disruption",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="disruption",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="disruption",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="disruption",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="disruption",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="disruption",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="disruption",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="disruption"} 0 +workqueue_work_duration_seconds_count{name="disruption"} 0 +workqueue_work_duration_seconds_bucket{name="disruption_recheck",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="disruption_recheck",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="disruption_recheck",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="disruption_recheck",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="disruption_recheck",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="disruption_recheck",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="disruption_recheck",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="disruption_recheck",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="disruption_recheck",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="disruption_recheck",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="disruption_recheck",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="disruption_recheck"} 0 +workqueue_work_duration_seconds_count{name="disruption_recheck"} 0 +workqueue_work_duration_seconds_bucket{name="endpoint",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="endpoint",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="endpoint",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="endpoint",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="endpoint",le="9.999999999999999e-05"} 3 +workqueue_work_duration_seconds_bucket{name="endpoint",le="0.001"} 3 +workqueue_work_duration_seconds_bucket{name="endpoint",le="0.01"} 3 +workqueue_work_duration_seconds_bucket{name="endpoint",le="0.1"} 5 +workqueue_work_duration_seconds_bucket{name="endpoint",le="1"} 6 +workqueue_work_duration_seconds_bucket{name="endpoint",le="10"} 6 +workqueue_work_duration_seconds_bucket{name="endpoint",le="+Inf"} 6 +workqueue_work_duration_seconds_sum{name="endpoint"} 0.878932805 +workqueue_work_duration_seconds_count{name="endpoint"} 6 +workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="9.999999999999999e-05"} 1 +workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="0.001"} 4 +workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="0.01"} 4 +workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="0.1"} 6 +workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="1"} 7 +workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="10"} 7 +workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="+Inf"} 7 +workqueue_work_duration_seconds_sum{name="endpoint_slice"} 0.895027736 +workqueue_work_duration_seconds_count{name="endpoint_slice"} 7 +workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="9.999999999999999e-05"} 4 +workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="0.001"} 4 +workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="0.01"} 4 +workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="0.1"} 4 +workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="1"} 4 +workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="10"} 4 +workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="+Inf"} 4 +workqueue_work_duration_seconds_sum{name="endpoint_slice_mirroring"} 0.000188079 +workqueue_work_duration_seconds_count{name="endpoint_slice_mirroring"} 4 +workqueue_work_duration_seconds_bucket{name="ephemeral_volume",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="ephemeral_volume",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="ephemeral_volume",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="ephemeral_volume",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="ephemeral_volume",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="ephemeral_volume",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="ephemeral_volume",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="ephemeral_volume",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="ephemeral_volume",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="ephemeral_volume",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="ephemeral_volume",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="ephemeral_volume"} 0 +workqueue_work_duration_seconds_count{name="ephemeral_volume"} 0 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="9.999999999999999e-05"} 1 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="0.001"} 1 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="0.01"} 1 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="0.1"} 1 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="1"} 1 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="10"} 1 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="+Inf"} 1 +workqueue_work_duration_seconds_sum{name="garbage_collector_attempt_to_delete"} 6.8602e-05 +workqueue_work_duration_seconds_count{name="garbage_collector_attempt_to_delete"} 1 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="garbage_collector_attempt_to_orphan"} 0 +workqueue_work_duration_seconds_count{name="garbage_collector_attempt_to_orphan"} 0 +workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="9.999999999999999e-06"} 665 +workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="9.999999999999999e-05"} 2971 +workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="0.001"} 2979 +workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="0.01"} 2979 +workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="0.1"} 2979 +workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="1"} 2979 +workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="10"} 2979 +workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="+Inf"} 2979 +workqueue_work_duration_seconds_sum{name="garbage_collector_graph_changes"} 0.04551231999999994 +workqueue_work_duration_seconds_count{name="garbage_collector_graph_changes"} 2979 +workqueue_work_duration_seconds_bucket{name="horizontalpodautoscaler",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="horizontalpodautoscaler",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="horizontalpodautoscaler",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="horizontalpodautoscaler",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="horizontalpodautoscaler",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="horizontalpodautoscaler",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="horizontalpodautoscaler",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="horizontalpodautoscaler",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="horizontalpodautoscaler",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="horizontalpodautoscaler",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="horizontalpodautoscaler",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="horizontalpodautoscaler"} 0 +workqueue_work_duration_seconds_count{name="horizontalpodautoscaler"} 0 +workqueue_work_duration_seconds_bucket{name="job",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="job",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="job",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="job",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="job",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="job",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="job",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="job",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="job",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="job",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="job",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="job"} 0 +workqueue_work_duration_seconds_count{name="job"} 0 +workqueue_work_duration_seconds_bucket{name="job_orphan_pod",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="job_orphan_pod",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="job_orphan_pod",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="job_orphan_pod",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="job_orphan_pod",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="job_orphan_pod",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="job_orphan_pod",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="job_orphan_pod",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="job_orphan_pod",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="job_orphan_pod",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="job_orphan_pod",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="job_orphan_pod"} 0 +workqueue_work_duration_seconds_count{name="job_orphan_pod"} 0 +workqueue_work_duration_seconds_bucket{name="namespace",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="namespace",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="namespace",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="namespace",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="namespace",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="namespace",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="namespace",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="namespace",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="namespace",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="namespace",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="namespace",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="namespace"} 0 +workqueue_work_duration_seconds_count{name="namespace"} 0 +workqueue_work_duration_seconds_bucket{name="node",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="node",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="node",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="node",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="node",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="node",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="node",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="node",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="node",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="node",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="node",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="node"} 0 +workqueue_work_duration_seconds_count{name="node"} 0 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="9.999999999999999e-06"} 2 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="9.999999999999999e-05"} 11 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="0.001"} 11 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="0.01"} 11 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="0.1"} 12 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="1"} 12 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="10"} 12 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="+Inf"} 12 +workqueue_work_duration_seconds_sum{name="node_lifecycle_controller"} 0.020806040000000005 +workqueue_work_duration_seconds_count{name="node_lifecycle_controller"} 12 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="9.999999999999999e-06"} 1 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="9.999999999999999e-05"} 7 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="0.001"} 10 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="0.01"} 10 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="0.1"} 10 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="1"} 10 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="10"} 10 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="+Inf"} 10 +workqueue_work_duration_seconds_sum{name="node_lifecycle_controller_pods"} 0.0005371999999999999 +workqueue_work_duration_seconds_count{name="node_lifecycle_controller_pods"} 10 +workqueue_work_duration_seconds_bucket{name="noexec_taint_node",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="noexec_taint_node",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="noexec_taint_node",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="noexec_taint_node",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="noexec_taint_node",le="9.999999999999999e-05"} 1 +workqueue_work_duration_seconds_bucket{name="noexec_taint_node",le="0.001"} 1 +workqueue_work_duration_seconds_bucket{name="noexec_taint_node",le="0.01"} 1 +workqueue_work_duration_seconds_bucket{name="noexec_taint_node",le="0.1"} 1 +workqueue_work_duration_seconds_bucket{name="noexec_taint_node",le="1"} 1 +workqueue_work_duration_seconds_bucket{name="noexec_taint_node",le="10"} 1 +workqueue_work_duration_seconds_bucket{name="noexec_taint_node",le="+Inf"} 1 +workqueue_work_duration_seconds_sum{name="noexec_taint_node"} 4.2807e-05 +workqueue_work_duration_seconds_count{name="noexec_taint_node"} 1 +workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="9.999999999999999e-05"} 14 +workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="0.001"} 16 +workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="0.01"} 16 +workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="0.1"} 16 +workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="1"} 16 +workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="10"} 16 +workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="+Inf"} 16 +workqueue_work_duration_seconds_sum{name="noexec_taint_pod"} 0.0005546540000000001 +workqueue_work_duration_seconds_count{name="noexec_taint_pod"} 16 +workqueue_work_duration_seconds_bucket{name="orphaned_pods_nodes",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="orphaned_pods_nodes",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="orphaned_pods_nodes",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="orphaned_pods_nodes",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="orphaned_pods_nodes",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="orphaned_pods_nodes",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="orphaned_pods_nodes",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="orphaned_pods_nodes",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="orphaned_pods_nodes",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="orphaned_pods_nodes",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="orphaned_pods_nodes",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="orphaned_pods_nodes"} 0 +workqueue_work_duration_seconds_count{name="orphaned_pods_nodes"} 0 +workqueue_work_duration_seconds_bucket{name="pvcprotection",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="pvcprotection",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="pvcprotection",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="pvcprotection",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="pvcprotection",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="pvcprotection",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="pvcprotection",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="pvcprotection",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="pvcprotection",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="pvcprotection",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="pvcprotection",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="pvcprotection"} 0 +workqueue_work_duration_seconds_count{name="pvcprotection"} 0 +workqueue_work_duration_seconds_bucket{name="pvcs",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="pvcs",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="pvcs",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="pvcs",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="pvcs",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="pvcs",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="pvcs",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="pvcs",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="pvcs",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="pvcs",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="pvcs",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="pvcs"} 0 +workqueue_work_duration_seconds_count{name="pvcs"} 0 +workqueue_work_duration_seconds_bucket{name="pvprotection",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="pvprotection",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="pvprotection",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="pvprotection",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="pvprotection",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="pvprotection",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="pvprotection",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="pvprotection",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="pvprotection",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="pvprotection",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="pvprotection",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="pvprotection"} 0 +workqueue_work_duration_seconds_count{name="pvprotection"} 0 +workqueue_work_duration_seconds_bucket{name="replicaset",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="replicaset",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="replicaset",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="replicaset",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="replicaset",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="replicaset",le="0.001"} 11 +workqueue_work_duration_seconds_bucket{name="replicaset",le="0.01"} 11 +workqueue_work_duration_seconds_bucket{name="replicaset",le="0.1"} 16 +workqueue_work_duration_seconds_bucket{name="replicaset",le="1"} 18 +workqueue_work_duration_seconds_bucket{name="replicaset",le="10"} 18 +workqueue_work_duration_seconds_bucket{name="replicaset",le="+Inf"} 18 +workqueue_work_duration_seconds_sum{name="replicaset"} 0.941117998 +workqueue_work_duration_seconds_count{name="replicaset"} 18 +workqueue_work_duration_seconds_bucket{name="replicationmanager",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="replicationmanager",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="replicationmanager",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="replicationmanager",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="replicationmanager",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="replicationmanager",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="replicationmanager",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="replicationmanager",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="replicationmanager",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="replicationmanager",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="replicationmanager",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="replicationmanager"} 0 +workqueue_work_duration_seconds_count{name="replicationmanager"} 0 +workqueue_work_duration_seconds_bucket{name="resource_quota_controller_resource_changes",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="resource_quota_controller_resource_changes",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="resource_quota_controller_resource_changes",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="resource_quota_controller_resource_changes",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="resource_quota_controller_resource_changes",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="resource_quota_controller_resource_changes",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="resource_quota_controller_resource_changes",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="resource_quota_controller_resource_changes",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="resource_quota_controller_resource_changes",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="resource_quota_controller_resource_changes",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="resource_quota_controller_resource_changes",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="resource_quota_controller_resource_changes"} 0 +workqueue_work_duration_seconds_count{name="resource_quota_controller_resource_changes"} 0 +workqueue_work_duration_seconds_bucket{name="resourcequota_primary",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="resourcequota_primary",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="resourcequota_primary",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="resourcequota_primary",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="resourcequota_primary",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="resourcequota_primary",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="resourcequota_primary",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="resourcequota_primary",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="resourcequota_primary",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="resourcequota_primary",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="resourcequota_primary",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="resourcequota_primary"} 0 +workqueue_work_duration_seconds_count{name="resourcequota_primary"} 0 +workqueue_work_duration_seconds_bucket{name="resourcequota_priority",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="resourcequota_priority",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="resourcequota_priority",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="resourcequota_priority",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="resourcequota_priority",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="resourcequota_priority",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="resourcequota_priority",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="resourcequota_priority",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="resourcequota_priority",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="resourcequota_priority",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="resourcequota_priority",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="resourcequota_priority"} 0 +workqueue_work_duration_seconds_count{name="resourcequota_priority"} 0 +workqueue_work_duration_seconds_bucket{name="root_ca_cert_publisher",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="root_ca_cert_publisher",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="root_ca_cert_publisher",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="root_ca_cert_publisher",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="root_ca_cert_publisher",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="root_ca_cert_publisher",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="root_ca_cert_publisher",le="0.01"} 2 +workqueue_work_duration_seconds_bucket{name="root_ca_cert_publisher",le="0.1"} 4 +workqueue_work_duration_seconds_bucket{name="root_ca_cert_publisher",le="1"} 5 +workqueue_work_duration_seconds_bucket{name="root_ca_cert_publisher",le="10"} 5 +workqueue_work_duration_seconds_bucket{name="root_ca_cert_publisher",le="+Inf"} 5 +workqueue_work_duration_seconds_sum{name="root_ca_cert_publisher"} 0.715673963 +workqueue_work_duration_seconds_count{name="root_ca_cert_publisher"} 5 +workqueue_work_duration_seconds_bucket{name="service",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="service",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="service",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="service",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="service",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="service",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="service",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="service",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="service",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="service",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="service",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="service"} 0 +workqueue_work_duration_seconds_count{name="service"} 0 +workqueue_work_duration_seconds_bucket{name="serviceaccount",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="serviceaccount",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="serviceaccount",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="serviceaccount",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="serviceaccount",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="serviceaccount",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="serviceaccount",le="0.01"} 1 +workqueue_work_duration_seconds_bucket{name="serviceaccount",le="0.1"} 4 +workqueue_work_duration_seconds_bucket{name="serviceaccount",le="1"} 5 +workqueue_work_duration_seconds_bucket{name="serviceaccount",le="10"} 5 +workqueue_work_duration_seconds_bucket{name="serviceaccount",le="+Inf"} 5 +workqueue_work_duration_seconds_sum{name="serviceaccount"} 0.24347626800000002 +workqueue_work_duration_seconds_count{name="serviceaccount"} 5 +workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="serviceaccount_tokens_secret"} 0 +workqueue_work_duration_seconds_count{name="serviceaccount_tokens_secret"} 0 +workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_service",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_service",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_service",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_service",le="9.999999999999999e-06"} 12 +workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_service",le="9.999999999999999e-05"} 43 +workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_service",le="0.001"} 43 +workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_service",le="0.01"} 43 +workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_service",le="0.1"} 43 +workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_service",le="1"} 43 +workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_service",le="10"} 43 +workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_service",le="+Inf"} 43 +workqueue_work_duration_seconds_sum{name="serviceaccount_tokens_service"} 0.000620875 +workqueue_work_duration_seconds_count{name="serviceaccount_tokens_service"} 43 +workqueue_work_duration_seconds_bucket{name="stale_pod_disruption",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="stale_pod_disruption",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="stale_pod_disruption",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="stale_pod_disruption",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="stale_pod_disruption",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="stale_pod_disruption",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="stale_pod_disruption",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="stale_pod_disruption",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="stale_pod_disruption",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="stale_pod_disruption",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="stale_pod_disruption",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="stale_pod_disruption"} 0 +workqueue_work_duration_seconds_count{name="stale_pod_disruption"} 0 +workqueue_work_duration_seconds_bucket{name="statefulset",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="statefulset",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="statefulset",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="statefulset",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="statefulset",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="statefulset",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="statefulset",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="statefulset",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="statefulset",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="statefulset",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="statefulset",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="statefulset"} 0 +workqueue_work_duration_seconds_count{name="statefulset"} 0 +workqueue_work_duration_seconds_bucket{name="token_cleaner",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="token_cleaner",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="token_cleaner",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="token_cleaner",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="token_cleaner",le="9.999999999999999e-05"} 1 +workqueue_work_duration_seconds_bucket{name="token_cleaner",le="0.001"} 1 +workqueue_work_duration_seconds_bucket{name="token_cleaner",le="0.01"} 1 +workqueue_work_duration_seconds_bucket{name="token_cleaner",le="0.1"} 1 +workqueue_work_duration_seconds_bucket{name="token_cleaner",le="1"} 1 +workqueue_work_duration_seconds_bucket{name="token_cleaner",le="10"} 1 +workqueue_work_duration_seconds_bucket{name="token_cleaner",le="+Inf"} 1 +workqueue_work_duration_seconds_sum{name="token_cleaner"} 5.1242e-05 +workqueue_work_duration_seconds_count{name="token_cleaner"} 1 +workqueue_work_duration_seconds_bucket{name="ttl_jobs_to_delete",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="ttl_jobs_to_delete",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="ttl_jobs_to_delete",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="ttl_jobs_to_delete",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="ttl_jobs_to_delete",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="ttl_jobs_to_delete",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="ttl_jobs_to_delete",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="ttl_jobs_to_delete",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="ttl_jobs_to_delete",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="ttl_jobs_to_delete",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="ttl_jobs_to_delete",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="ttl_jobs_to_delete"} 0 +workqueue_work_duration_seconds_count{name="ttl_jobs_to_delete"} 0 +workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="9.999999999999999e-06"} 6 +workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="9.999999999999999e-05"} 11 +workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="0.001"} 11 +workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="0.01"} 11 +workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="0.1"} 11 +workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="1"} 12 +workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="10"} 12 +workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="+Inf"} 12 +workqueue_work_duration_seconds_sum{name="ttlcontroller"} 0.7475187799999997 +workqueue_work_duration_seconds_count{name="ttlcontroller"} 12 +workqueue_work_duration_seconds_bucket{name="volume_expand",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="volume_expand",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="volume_expand",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="volume_expand",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="volume_expand",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="volume_expand",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="volume_expand",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="volume_expand",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="volume_expand",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="volume_expand",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="volume_expand",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="volume_expand"} 0 +workqueue_work_duration_seconds_count{name="volume_expand"} 0 +workqueue_work_duration_seconds_bucket{name="volumes",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="volumes",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="volumes",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="volumes",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="volumes",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="volumes",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="volumes",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="volumes",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="volumes",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="volumes",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="volumes",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="volumes"} 0 +workqueue_work_duration_seconds_count{name="volumes"} 0 diff --git a/metricbeat/module/kubernetes/controllermanager/_meta/test/metrics.1.29.expected b/metricbeat/module/kubernetes/controllermanager/_meta/test/metrics.1.29.expected new file mode 100644 index 000000000000..69d7921256b5 --- /dev/null +++ b/metricbeat/module/kubernetes/controllermanager/_meta/test/metrics.1.29.expected @@ -0,0 +1,2269 @@ +[ + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "endpoint_slice_mirroring", + "workqueue": { + "adds": { + "count": 4 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "job", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "ClusterRoleAggregator", + "workqueue": { + "adds": { + "count": 19 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "replicationmanager", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "duration": { + "us": { + "bucket": { + "+Inf": 84, + "100000": 64, + "1000000": 84, + "15000000": 84, + "2000000": 84, + "25000": 46, + "250000": 66, + "30000000": 84, + "4000000": 84, + "5000": 10, + "500000": 77, + "60000000": 84, + "8000000": 84 + }, + "count": 84, + "sum": 11851414.454 + } + }, + "size": { + "bytes": { + "bucket": { + "+Inf": 84, + "1024": 70, + "1048576": 84, + "16384": 84, + "16777216": 84, + "256": 55, + "262144": 84, + "4096": 84, + "4194304": 84, + "512": 66, + "64": 1, + "65536": 84 + }, + "count": 84, + "sum": 34927 + } + } + }, + "response": { + "size": { + "bytes": { + "bucket": { + "+Inf": 84, + "1024": 51, + "1048576": 84, + "16384": 84, + "16777216": 84, + "256": 38, + "262144": 84, + "4096": 82, + "4194304": 84, + "512": 39, + "64": 0, + "65536": 84 + }, + "count": 84, + "sum": 78889 + } + } + } + }, + "host": "172.18.0.2:6443", + "verb": "POST" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "count": 3 + } + }, + "code": "409", + "host": "172.18.0.2:6443", + "method": "PUT" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "deployment", + "workqueue": { + "adds": { + "count": 20 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 9 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "bootstrap_signer_queue", + "workqueue": { + "adds": { + "count": 2 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "claims", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "count": 1125 + } + }, + "code": "200", + "host": "172.18.0.2:6443", + "method": "PUT" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "node_lifecycle_controller_pods", + "workqueue": { + "adds": { + "count": 10 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "cronjob", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "count": 10 + } + }, + "code": "200", + "host": "172.18.0.2:6443", + "method": "PATCH" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "noexec_taint_pod", + "workqueue": { + "adds": { + "count": 16 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "disruption_recheck", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "node": { + "collector": { + "count": 1, + "eviction": { + "count": 0 + }, + "health": { + "pct": 100 + }, + "unhealthy": { + "count": 0 + } + } + }, + "process": { + "cpu": { + "sec": 58 + }, + "fds": { + "max": { + "count": 1048576 + }, + "open": { + "count": 18 + } + }, + "memory": { + "resident": { + "bytes": 109260800 + }, + "virtual": { + "bytes": 1337397248 + } + }, + "started": { + "sec": 1704894767.11 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "disruption", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "pvcs", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "count": 84 + } + }, + "code": "201", + "host": "172.18.0.2:6443", + "method": "POST" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "DynamicCABundle-request-header", + "workqueue": { + "adds": { + "count": 2 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "endpoint_slice", + "workqueue": { + "adds": { + "count": 7 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 6 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "certificate", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "node_lifecycle_controller", + "workqueue": { + "adds": { + "count": 12 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "statefulset", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "garbage_collector_graph_changes", + "workqueue": { + "adds": { + "count": 2979 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "leader": { + "is_master": true + }, + "name": "kube-controller-manager" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "root_ca_cert_publisher", + "workqueue": { + "adds": { + "count": 5 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "node", + "workqueue": { + "adds": { + "count": 1 + }, + "depth": { + "count": 1 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "endpoint", + "workqueue": { + "adds": { + "count": 6 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 4 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "duration": { + "us": { + "bucket": { + "+Inf": 10, + "100000": 6, + "1000000": 10, + "15000000": 10, + "2000000": 10, + "25000": 4, + "250000": 6, + "30000000": 10, + "4000000": 10, + "5000": 0, + "500000": 6, + "60000000": 10, + "8000000": 10 + }, + "count": 10, + "sum": 3209190.2879999997 + } + }, + "size": { + "bytes": { + "bucket": { + "+Inf": 10, + "1024": 4, + "1048576": 10, + "16384": 10, + "16777216": 10, + "256": 3, + "262144": 10, + "4096": 10, + "4194304": 10, + "512": 4, + "64": 1, + "65536": 10 + }, + "count": 10, + "sum": 18531 + } + } + }, + "response": { + "size": { + "bytes": { + "bucket": { + "+Inf": 10, + "1024": 1, + "1048576": 10, + "16384": 10, + "16777216": 10, + "256": 0, + "262144": 10, + "4096": 10, + "4194304": 10, + "512": 0, + "64": 0, + "65536": 10 + }, + "count": 10, + "sum": 28839 + } + } + } + }, + "host": "172.18.0.2:6443", + "verb": "PATCH" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "ttl_jobs_to_delete", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "serviceaccount_tokens_secret", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "job_orphan_pod", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "volume_expand", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "ephemeral_volume", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "daemonset", + "workqueue": { + "adds": { + "count": 17 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "count": 34 + } + }, + "code": "404", + "host": "172.18.0.2:6443", + "method": "GET" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "garbage_collector_attempt_to_orphan", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "duration": { + "us": { + "bucket": { + "+Inf": 1128, + "100000": 1125, + "1000000": 1128, + "15000000": 1128, + "2000000": 1128, + "25000": 1114, + "250000": 1127, + "30000000": 1128, + "4000000": 1128, + "5000": 1, + "500000": 1127, + "60000000": 1128, + "8000000": 1128 + }, + "count": 1128, + "sum": 15549394.524000004 + } + }, + "size": { + "bytes": { + "bucket": { + "+Inf": 1128, + "1024": 1104, + "1048576": 1128, + "16384": 1128, + "16777216": 1128, + "256": 0, + "262144": 1128, + "4096": 1128, + "4194304": 1128, + "512": 1099, + "64": 0, + "65536": 1128 + }, + "count": 1128, + "sum": 534678 + } + } + }, + "response": { + "size": { + "bytes": { + "bucket": { + "+Inf": 1128, + "1024": 1103, + "1048576": 1128, + "16384": 1128, + "16777216": 1128, + "256": 1, + "262144": 1128, + "4096": 1123, + "4194304": 1128, + "512": 1101, + "64": 0, + "65536": 1128 + }, + "count": 1128, + "sum": 578751 + } + } + } + }, + "host": "172.18.0.2:6443", + "verb": "PUT" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "serviceaccount_tokens_service", + "workqueue": { + "adds": { + "count": 43 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "token_cleaner", + "workqueue": { + "adds": { + "count": 1 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 1 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "DynamicServingCertificateController", + "workqueue": { + "adds": { + "count": 38 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "stale_pod_disruption", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "resource_quota_controller_resource_changes", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "serviceaccount", + "workqueue": { + "adds": { + "count": 5 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "namespace", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "horizontalpodautoscaler", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "replicaset", + "workqueue": { + "adds": { + "count": 18 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "pvprotection", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "count": 1756 + } + }, + "code": "200", + "host": "172.18.0.2:6443", + "method": "GET" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "duration": { + "us": { + "bucket": { + "+Inf": 1546, + "100000": 1523, + "1000000": 1545, + "15000000": 1546, + "2000000": 1545, + "25000": 1482, + "250000": 1531, + "30000000": 1546, + "4000000": 1546, + "5000": 1414, + "500000": 1543, + "60000000": 1546, + "8000000": 1546 + }, + "count": 1546, + "sum": 16967002.752999995 + } + }, + "size": { + "bytes": { + "bucket": { + "+Inf": 1546, + "1024": 1546, + "1048576": 1546, + "16384": 1546, + "16777216": 1546, + "256": 1546, + "262144": 1546, + "4096": 1546, + "4194304": 1546, + "512": 1546, + "64": 1546, + "65536": 1546 + }, + "count": 1546, + "sum": 0 + } + } + }, + "response": { + "size": { + "bytes": { + "bucket": { + "+Inf": 1546, + "1024": 1226, + "1048576": 1546, + "16384": 1543, + "16777216": 1546, + "256": 88, + "262144": 1546, + "4096": 1233, + "4194304": 1546, + "512": 1223, + "64": 21, + "65536": 1546 + }, + "count": 1546, + "sum": 4165150 + } + } + } + }, + "host": "172.18.0.2:6443", + "verb": "GET" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "volumes", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "noexec_taint_node", + "workqueue": { + "adds": { + "count": 1 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "pvcprotection", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "resourcequota_primary", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "garbage_collector_attempt_to_delete", + "workqueue": { + "adds": { + "count": 1 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "count": 1 + } + }, + "code": "403", + "host": "172.18.0.2:6443", + "method": "GET" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "resourcequota_priority", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "ttlcontroller", + "workqueue": { + "adds": { + "count": 12 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "orphaned_pods_nodes", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "DynamicCABundle-csr-controller", + "workqueue": { + "adds": { + "count": 8 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "service", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "DynamicCABundle-client-ca-bundle", + "workqueue": { + "adds": { + "count": 2 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + } +] \ No newline at end of file diff --git a/metricbeat/module/kubernetes/controllermanager/_meta/testdata/docs.plain b/metricbeat/module/kubernetes/controllermanager/_meta/testdata/docs.plain index 8641f0404027..7ef3f5e465b2 100644 --- a/metricbeat/module/kubernetes/controllermanager/_meta/testdata/docs.plain +++ b/metricbeat/module/kubernetes/controllermanager/_meta/testdata/docs.plain @@ -1,6 +1,21 @@ # HELP aggregator_discovery_aggregation_count_total [ALPHA] Counter of number of times discovery was aggregated # TYPE aggregator_discovery_aggregation_count_total counter aggregator_discovery_aggregation_count_total 0 +# HELP apiextensions_apiserver_validation_ratcheting_seconds [ALPHA] Time for comparison of old to new for the purposes of CRDValidationRatcheting during an UPDATE in seconds. +# TYPE apiextensions_apiserver_validation_ratcheting_seconds histogram +apiextensions_apiserver_validation_ratcheting_seconds_bucket{le="1e-05"} 0 +apiextensions_apiserver_validation_ratcheting_seconds_bucket{le="4e-05"} 0 +apiextensions_apiserver_validation_ratcheting_seconds_bucket{le="0.00016"} 0 +apiextensions_apiserver_validation_ratcheting_seconds_bucket{le="0.00064"} 0 +apiextensions_apiserver_validation_ratcheting_seconds_bucket{le="0.00256"} 0 +apiextensions_apiserver_validation_ratcheting_seconds_bucket{le="0.01024"} 0 +apiextensions_apiserver_validation_ratcheting_seconds_bucket{le="0.04096"} 0 +apiextensions_apiserver_validation_ratcheting_seconds_bucket{le="0.16384"} 0 +apiextensions_apiserver_validation_ratcheting_seconds_bucket{le="0.65536"} 0 +apiextensions_apiserver_validation_ratcheting_seconds_bucket{le="2.62144"} 0 +apiextensions_apiserver_validation_ratcheting_seconds_bucket{le="+Inf"} 0 +apiextensions_apiserver_validation_ratcheting_seconds_sum 0 +apiextensions_apiserver_validation_ratcheting_seconds_count 0 # HELP apiserver_audit_event_total [ALPHA] Counter of audit events generated and sent to the audit backend. # TYPE apiserver_audit_event_total counter apiserver_audit_event_total 0 @@ -69,7 +84,7 @@ apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="3"} 1 apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="5"} 1 apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="10"} 1 apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="+Inf"} 1 -apiserver_delegated_authn_request_duration_seconds_sum{code="201"} 0.013829824 +apiserver_delegated_authn_request_duration_seconds_sum{code="201"} 0.015306199 apiserver_delegated_authn_request_duration_seconds_count{code="201"} 1 # HELP apiserver_delegated_authn_request_total [ALPHA] Number of HTTP requests partitioned by status code. # TYPE apiserver_delegated_authn_request_total counter @@ -85,7 +100,7 @@ apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="3"} 1 apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="5"} 1 apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="10"} 1 apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="+Inf"} 1 -apiserver_delegated_authz_request_duration_seconds_sum{code="201"} 0.002822977 +apiserver_delegated_authz_request_duration_seconds_sum{code="201"} 0.003696981 apiserver_delegated_authz_request_duration_seconds_count{code="201"} 1 # HELP apiserver_delegated_authz_request_total [ALPHA] Number of HTTP requests partitioned by status code. # TYPE apiserver_delegated_authz_request_total counter @@ -126,30 +141,30 @@ apiserver_webhooks_x509_insecure_sha1_total 0 apiserver_webhooks_x509_missing_san_total 0 # HELP authenticated_user_requests [ALPHA] Counter of authenticated requests broken out by username. # TYPE authenticated_user_requests counter -authenticated_user_requests{username="other"} 10 +authenticated_user_requests{username="other"} 221 # HELP authentication_attempts [ALPHA] Counter of authenticated attempts. # TYPE authentication_attempts counter -authentication_attempts{result="success"} 10 +authentication_attempts{result="success"} 221 # HELP authentication_duration_seconds [ALPHA] Authentication duration in seconds broken out by result. # TYPE authentication_duration_seconds histogram -authentication_duration_seconds_bucket{result="success",le="0.001"} 10 -authentication_duration_seconds_bucket{result="success",le="0.002"} 10 -authentication_duration_seconds_bucket{result="success",le="0.004"} 10 -authentication_duration_seconds_bucket{result="success",le="0.008"} 10 -authentication_duration_seconds_bucket{result="success",le="0.016"} 10 -authentication_duration_seconds_bucket{result="success",le="0.032"} 10 -authentication_duration_seconds_bucket{result="success",le="0.064"} 10 -authentication_duration_seconds_bucket{result="success",le="0.128"} 10 -authentication_duration_seconds_bucket{result="success",le="0.256"} 10 -authentication_duration_seconds_bucket{result="success",le="0.512"} 10 -authentication_duration_seconds_bucket{result="success",le="1.024"} 10 -authentication_duration_seconds_bucket{result="success",le="2.048"} 10 -authentication_duration_seconds_bucket{result="success",le="4.096"} 10 -authentication_duration_seconds_bucket{result="success",le="8.192"} 10 -authentication_duration_seconds_bucket{result="success",le="16.384"} 10 -authentication_duration_seconds_bucket{result="success",le="+Inf"} 10 -authentication_duration_seconds_sum{result="success"} 0.00037915399999999993 -authentication_duration_seconds_count{result="success"} 10 +authentication_duration_seconds_bucket{result="success",le="0.001"} 221 +authentication_duration_seconds_bucket{result="success",le="0.002"} 221 +authentication_duration_seconds_bucket{result="success",le="0.004"} 221 +authentication_duration_seconds_bucket{result="success",le="0.008"} 221 +authentication_duration_seconds_bucket{result="success",le="0.016"} 221 +authentication_duration_seconds_bucket{result="success",le="0.032"} 221 +authentication_duration_seconds_bucket{result="success",le="0.064"} 221 +authentication_duration_seconds_bucket{result="success",le="0.128"} 221 +authentication_duration_seconds_bucket{result="success",le="0.256"} 221 +authentication_duration_seconds_bucket{result="success",le="0.512"} 221 +authentication_duration_seconds_bucket{result="success",le="1.024"} 221 +authentication_duration_seconds_bucket{result="success",le="2.048"} 221 +authentication_duration_seconds_bucket{result="success",le="4.096"} 221 +authentication_duration_seconds_bucket{result="success",le="8.192"} 221 +authentication_duration_seconds_bucket{result="success",le="16.384"} 221 +authentication_duration_seconds_bucket{result="success",le="+Inf"} 221 +authentication_duration_seconds_sum{result="success"} 0.010239524999999996 +authentication_duration_seconds_count{result="success"} 221 # HELP authentication_token_cache_active_fetch_count [ALPHA] # TYPE authentication_token_cache_active_fetch_count gauge authentication_token_cache_active_fetch_count{status="blocked"} 0 @@ -171,34 +186,37 @@ authentication_token_cache_request_duration_seconds_bucket{status="miss",le="2.5 authentication_token_cache_request_duration_seconds_bucket{status="miss",le="5"} 1 authentication_token_cache_request_duration_seconds_bucket{status="miss",le="10"} 1 authentication_token_cache_request_duration_seconds_bucket{status="miss",le="+Inf"} 1 -authentication_token_cache_request_duration_seconds_sum{status="miss"} 0.014 +authentication_token_cache_request_duration_seconds_sum{status="miss"} 0.015 authentication_token_cache_request_duration_seconds_count{status="miss"} 1 # HELP authentication_token_cache_request_total [ALPHA] # TYPE authentication_token_cache_request_total counter authentication_token_cache_request_total{status="miss"} 1 # HELP authorization_attempts_total [ALPHA] Counter of authorization attempts broken down by result. It can be either 'allowed', 'denied', 'no-opinion' or 'error'. # TYPE authorization_attempts_total counter -authorization_attempts_total{result="allowed"} 10 +authorization_attempts_total{result="allowed"} 221 # HELP authorization_duration_seconds [ALPHA] Authorization duration in seconds broken out by result. # TYPE authorization_duration_seconds histogram -authorization_duration_seconds_bucket{result="allowed",le="0.001"} 10 -authorization_duration_seconds_bucket{result="allowed",le="0.002"} 10 -authorization_duration_seconds_bucket{result="allowed",le="0.004"} 10 -authorization_duration_seconds_bucket{result="allowed",le="0.008"} 10 -authorization_duration_seconds_bucket{result="allowed",le="0.016"} 10 -authorization_duration_seconds_bucket{result="allowed",le="0.032"} 10 -authorization_duration_seconds_bucket{result="allowed",le="0.064"} 10 -authorization_duration_seconds_bucket{result="allowed",le="0.128"} 10 -authorization_duration_seconds_bucket{result="allowed",le="0.256"} 10 -authorization_duration_seconds_bucket{result="allowed",le="0.512"} 10 -authorization_duration_seconds_bucket{result="allowed",le="1.024"} 10 -authorization_duration_seconds_bucket{result="allowed",le="2.048"} 10 -authorization_duration_seconds_bucket{result="allowed",le="4.096"} 10 -authorization_duration_seconds_bucket{result="allowed",le="8.192"} 10 -authorization_duration_seconds_bucket{result="allowed",le="16.384"} 10 -authorization_duration_seconds_bucket{result="allowed",le="+Inf"} 10 -authorization_duration_seconds_sum{result="allowed"} 7.873299999999999e-05 -authorization_duration_seconds_count{result="allowed"} 10 +authorization_duration_seconds_bucket{result="allowed",le="0.001"} 221 +authorization_duration_seconds_bucket{result="allowed",le="0.002"} 221 +authorization_duration_seconds_bucket{result="allowed",le="0.004"} 221 +authorization_duration_seconds_bucket{result="allowed",le="0.008"} 221 +authorization_duration_seconds_bucket{result="allowed",le="0.016"} 221 +authorization_duration_seconds_bucket{result="allowed",le="0.032"} 221 +authorization_duration_seconds_bucket{result="allowed",le="0.064"} 221 +authorization_duration_seconds_bucket{result="allowed",le="0.128"} 221 +authorization_duration_seconds_bucket{result="allowed",le="0.256"} 221 +authorization_duration_seconds_bucket{result="allowed",le="0.512"} 221 +authorization_duration_seconds_bucket{result="allowed",le="1.024"} 221 +authorization_duration_seconds_bucket{result="allowed",le="2.048"} 221 +authorization_duration_seconds_bucket{result="allowed",le="4.096"} 221 +authorization_duration_seconds_bucket{result="allowed",le="8.192"} 221 +authorization_duration_seconds_bucket{result="allowed",le="16.384"} 221 +authorization_duration_seconds_bucket{result="allowed",le="+Inf"} 221 +authorization_duration_seconds_sum{result="allowed"} 0.001831606000000001 +authorization_duration_seconds_count{result="allowed"} 221 +# HELP cardinality_enforcement_unexpected_categorizations_total [ALPHA] The count of unexpected categorizations during cardinality enforcement. +# TYPE cardinality_enforcement_unexpected_categorizations_total counter +cardinality_enforcement_unexpected_categorizations_total 0 # HELP cronjob_controller_job_creation_skew_duration_seconds [STABLE] Time between when a cronjob is scheduled to be run, and when the corresponding job is created # TYPE cronjob_controller_job_creation_skew_duration_seconds histogram cronjob_controller_job_creation_skew_duration_seconds_bucket{le="1"} 0 @@ -220,95 +238,95 @@ disabled_metrics_total 0 # HELP endpoint_slice_controller_changes [ALPHA] Number of EndpointSlice changes # TYPE endpoint_slice_controller_changes counter endpoint_slice_controller_changes{operation="create"} 1 -endpoint_slice_controller_changes{operation="update"} 4 +endpoint_slice_controller_changes{operation="update"} 2 # HELP endpoint_slice_controller_desired_endpoint_slices [ALPHA] Number of EndpointSlices that would exist with perfect endpoint allocation # TYPE endpoint_slice_controller_desired_endpoint_slices gauge endpoint_slice_controller_desired_endpoint_slices 1 # HELP endpoint_slice_controller_endpoints_added_per_sync [ALPHA] Number of endpoints added on each Service sync # TYPE endpoint_slice_controller_endpoints_added_per_sync histogram -endpoint_slice_controller_endpoints_added_per_sync_bucket{le="2"} 9 -endpoint_slice_controller_endpoints_added_per_sync_bucket{le="4"} 9 -endpoint_slice_controller_endpoints_added_per_sync_bucket{le="8"} 9 -endpoint_slice_controller_endpoints_added_per_sync_bucket{le="16"} 9 -endpoint_slice_controller_endpoints_added_per_sync_bucket{le="32"} 9 -endpoint_slice_controller_endpoints_added_per_sync_bucket{le="64"} 9 -endpoint_slice_controller_endpoints_added_per_sync_bucket{le="128"} 9 -endpoint_slice_controller_endpoints_added_per_sync_bucket{le="256"} 9 -endpoint_slice_controller_endpoints_added_per_sync_bucket{le="512"} 9 -endpoint_slice_controller_endpoints_added_per_sync_bucket{le="1024"} 9 -endpoint_slice_controller_endpoints_added_per_sync_bucket{le="2048"} 9 -endpoint_slice_controller_endpoints_added_per_sync_bucket{le="4096"} 9 -endpoint_slice_controller_endpoints_added_per_sync_bucket{le="8192"} 9 -endpoint_slice_controller_endpoints_added_per_sync_bucket{le="16384"} 9 -endpoint_slice_controller_endpoints_added_per_sync_bucket{le="32768"} 9 -endpoint_slice_controller_endpoints_added_per_sync_bucket{le="+Inf"} 9 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="2"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="4"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="8"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="16"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="32"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="64"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="128"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="256"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="512"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="1024"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="2048"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="4096"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="8192"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="16384"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="32768"} 6 +endpoint_slice_controller_endpoints_added_per_sync_bucket{le="+Inf"} 6 endpoint_slice_controller_endpoints_added_per_sync_sum 2 -endpoint_slice_controller_endpoints_added_per_sync_count 9 +endpoint_slice_controller_endpoints_added_per_sync_count 6 # HELP endpoint_slice_controller_endpoints_desired [ALPHA] Number of endpoints desired # TYPE endpoint_slice_controller_endpoints_desired gauge endpoint_slice_controller_endpoints_desired 2 # HELP endpoint_slice_controller_endpoints_removed_per_sync [ALPHA] Number of endpoints removed on each Service sync # TYPE endpoint_slice_controller_endpoints_removed_per_sync histogram -endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="2"} 9 -endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="4"} 9 -endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="8"} 9 -endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="16"} 9 -endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="32"} 9 -endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="64"} 9 -endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="128"} 9 -endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="256"} 9 -endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="512"} 9 -endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="1024"} 9 -endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="2048"} 9 -endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="4096"} 9 -endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="8192"} 9 -endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="16384"} 9 -endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="32768"} 9 -endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="+Inf"} 9 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="2"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="4"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="8"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="16"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="32"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="64"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="128"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="256"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="512"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="1024"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="2048"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="4096"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="8192"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="16384"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="32768"} 6 +endpoint_slice_controller_endpoints_removed_per_sync_bucket{le="+Inf"} 6 endpoint_slice_controller_endpoints_removed_per_sync_sum 0 -endpoint_slice_controller_endpoints_removed_per_sync_count 9 +endpoint_slice_controller_endpoints_removed_per_sync_count 6 # HELP endpoint_slice_controller_endpointslices_changed_per_sync [ALPHA] Number of EndpointSlices changed on each Service sync # TYPE endpoint_slice_controller_endpointslices_changed_per_sync histogram -endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="0.005"} 4 -endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="0.01"} 4 -endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="0.025"} 4 -endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="0.05"} 4 -endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="0.1"} 4 -endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="0.25"} 4 -endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="0.5"} 4 -endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="1"} 9 -endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="2.5"} 9 -endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="5"} 9 -endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="10"} 9 -endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="+Inf"} 9 -endpoint_slice_controller_endpointslices_changed_per_sync_sum{topology="Disabled"} 5 -endpoint_slice_controller_endpointslices_changed_per_sync_count{topology="Disabled"} 9 +endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="0.005"} 3 +endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="0.01"} 3 +endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="0.025"} 3 +endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="0.05"} 3 +endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="0.1"} 3 +endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="0.25"} 3 +endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="0.5"} 3 +endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="1"} 6 +endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="2.5"} 6 +endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="5"} 6 +endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="10"} 6 +endpoint_slice_controller_endpointslices_changed_per_sync_bucket{topology="Disabled",le="+Inf"} 6 +endpoint_slice_controller_endpointslices_changed_per_sync_sum{topology="Disabled"} 3 +endpoint_slice_controller_endpointslices_changed_per_sync_count{topology="Disabled"} 6 # HELP endpoint_slice_controller_num_endpoint_slices [ALPHA] Number of EndpointSlices # TYPE endpoint_slice_controller_num_endpoint_slices gauge endpoint_slice_controller_num_endpoint_slices 1 # HELP endpoint_slice_controller_syncs [ALPHA] Number of EndpointSlice syncs # TYPE endpoint_slice_controller_syncs counter -endpoint_slice_controller_syncs{result="success"} 10 +endpoint_slice_controller_syncs{result="success"} 7 # HELP endpoint_slice_mirroring_controller_endpoints_sync_duration [ALPHA] Duration of syncEndpoints() in seconds # TYPE endpoint_slice_mirroring_controller_endpoints_sync_duration histogram -endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.001"} 6 -endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.002"} 6 -endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.004"} 6 -endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.008"} 6 -endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.016"} 6 -endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.032"} 6 -endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.064"} 6 -endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.128"} 6 -endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.256"} 6 -endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.512"} 6 -endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="1.024"} 6 -endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="2.048"} 6 -endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="4.096"} 6 -endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="8.192"} 6 -endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="16.384"} 6 -endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="+Inf"} 6 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.001"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.002"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.004"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.008"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.016"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.032"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.064"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.128"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.256"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="0.512"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="1.024"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="2.048"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="4.096"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="8.192"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="16.384"} 4 +endpoint_slice_mirroring_controller_endpoints_sync_duration_bucket{le="+Inf"} 4 endpoint_slice_mirroring_controller_endpoints_sync_duration_sum 0 -endpoint_slice_mirroring_controller_endpoints_sync_duration_count 6 +endpoint_slice_mirroring_controller_endpoints_sync_duration_count 4 # HELP ephemeral_volume_controller_create_failures_total [ALPHA] Number of PersistenVolumeClaims creation requests # TYPE ephemeral_volume_controller_create_failures_total counter ephemeral_volume_controller_create_failures_total 0 @@ -323,277 +341,352 @@ garbagecollector_controller_resources_sync_error_total 0 go_cgo_go_to_c_calls_calls_total 0 # HELP go_cpu_classes_gc_mark_assist_cpu_seconds_total Estimated total CPU time goroutines spent performing GC tasks to assist the GC and prevent it from falling behind the application. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. # TYPE go_cpu_classes_gc_mark_assist_cpu_seconds_total counter -go_cpu_classes_gc_mark_assist_cpu_seconds_total 0.013102469 -# HELP go_cpu_classes_gc_mark_dedicated_cpu_seconds_total Estimated total CPU time spent performing GC tasks on processors (as defined by GOMAXPROCS) dedicated to those tasks. This includes time spent with the world stopped due to the GC. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +go_cpu_classes_gc_mark_assist_cpu_seconds_total 0.009980877 +# HELP go_cpu_classes_gc_mark_dedicated_cpu_seconds_total Estimated total CPU time spent performing GC tasks on processors (as defined by GOMAXPROCS) dedicated to those tasks. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. # TYPE go_cpu_classes_gc_mark_dedicated_cpu_seconds_total counter -go_cpu_classes_gc_mark_dedicated_cpu_seconds_total 0.062076559 +go_cpu_classes_gc_mark_dedicated_cpu_seconds_total 0.615466956 # HELP go_cpu_classes_gc_mark_idle_cpu_seconds_total Estimated total CPU time spent performing GC tasks on spare CPU resources that the Go scheduler could not otherwise find a use for. This should be subtracted from the total GC CPU time to obtain a measure of compulsory GC CPU time. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. # TYPE go_cpu_classes_gc_mark_idle_cpu_seconds_total counter -go_cpu_classes_gc_mark_idle_cpu_seconds_total 0.066144316 +go_cpu_classes_gc_mark_idle_cpu_seconds_total 0.10748798 # HELP go_cpu_classes_gc_pause_cpu_seconds_total Estimated total CPU time spent with the application paused by the GC. Even if only one thread is running during the pause, this is computed as GOMAXPROCS times the pause latency because nothing else can be executing. This is the exact sum of samples in /gc/pause:seconds if each sample is multiplied by GOMAXPROCS at the time it is taken. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. # TYPE go_cpu_classes_gc_pause_cpu_seconds_total counter -go_cpu_classes_gc_pause_cpu_seconds_total 0.018310032 +go_cpu_classes_gc_pause_cpu_seconds_total 0.086410944 # HELP go_cpu_classes_gc_total_cpu_seconds_total Estimated total CPU time spent performing GC tasks. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. Sum of all metrics in /cpu/classes/gc. # TYPE go_cpu_classes_gc_total_cpu_seconds_total counter -go_cpu_classes_gc_total_cpu_seconds_total 0.159633376 +go_cpu_classes_gc_total_cpu_seconds_total 0.819346757 # HELP go_cpu_classes_idle_cpu_seconds_total Estimated total available CPU time not spent executing any Go or Go runtime code. In other words, the part of /cpu/classes/total:cpu-seconds that was unused. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. # TYPE go_cpu_classes_idle_cpu_seconds_total counter -go_cpu_classes_idle_cpu_seconds_total 279.099504409 +go_cpu_classes_idle_cpu_seconds_total 34257.660255826 # HELP go_cpu_classes_scavenge_assist_cpu_seconds_total Estimated total CPU time spent returning unused memory to the underlying platform in response eagerly in response to memory pressure. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. # TYPE go_cpu_classes_scavenge_assist_cpu_seconds_total counter -go_cpu_classes_scavenge_assist_cpu_seconds_total 8.38e-07 +go_cpu_classes_scavenge_assist_cpu_seconds_total 4.56e-07 # HELP go_cpu_classes_scavenge_background_cpu_seconds_total Estimated total CPU time spent performing background tasks to return unused memory to the underlying platform. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. # TYPE go_cpu_classes_scavenge_background_cpu_seconds_total counter -go_cpu_classes_scavenge_background_cpu_seconds_total 6.624e-06 +go_cpu_classes_scavenge_background_cpu_seconds_total 0.001950821 # HELP go_cpu_classes_scavenge_total_cpu_seconds_total Estimated total CPU time spent performing tasks that return unused memory to the underlying platform. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. Sum of all metrics in /cpu/classes/scavenge. # TYPE go_cpu_classes_scavenge_total_cpu_seconds_total counter -go_cpu_classes_scavenge_total_cpu_seconds_total 7.462e-06 +go_cpu_classes_scavenge_total_cpu_seconds_total 0.001951277 # HELP go_cpu_classes_total_cpu_seconds_total Estimated total available CPU time for user Go code or the Go runtime, as defined by GOMAXPROCS. In other words, GOMAXPROCS integrated over the wall-clock duration this process has been executing for. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. Sum of all metrics in /cpu/classes. # TYPE go_cpu_classes_total_cpu_seconds_total counter -go_cpu_classes_total_cpu_seconds_total 319.685244856 +go_cpu_classes_total_cpu_seconds_total 34309.383945808 # HELP go_cpu_classes_user_cpu_seconds_total Estimated total CPU time spent running user Go code. This may also include some small amount of time spent in the Go runtime. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. # TYPE go_cpu_classes_user_cpu_seconds_total counter -go_cpu_classes_user_cpu_seconds_total 40.426099609 +go_cpu_classes_user_cpu_seconds_total 50.902391948 # HELP go_gc_cycles_automatic_gc_cycles_total Count of completed GC cycles generated by the Go runtime. # TYPE go_gc_cycles_automatic_gc_cycles_total counter -go_gc_cycles_automatic_gc_cycles_total 13 +go_gc_cycles_automatic_gc_cycles_total 29 # HELP go_gc_cycles_forced_gc_cycles_total Count of completed GC cycles forced by the application. # TYPE go_gc_cycles_forced_gc_cycles_total counter go_gc_cycles_forced_gc_cycles_total 0 # HELP go_gc_cycles_total_gc_cycles_total Count of all completed GC cycles. # TYPE go_gc_cycles_total_gc_cycles_total counter -go_gc_cycles_total_gc_cycles_total 13 +go_gc_cycles_total_gc_cycles_total 29 # HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. # TYPE go_gc_duration_seconds summary -go_gc_duration_seconds{quantile="0"} 7.1623e-05 -go_gc_duration_seconds{quantile="0.25"} 9.9297e-05 -go_gc_duration_seconds{quantile="0.5"} 0.000128816 -go_gc_duration_seconds{quantile="0.75"} 0.000277543 -go_gc_duration_seconds{quantile="1"} 0.000313554 -go_gc_duration_seconds_sum 0.002288754 -go_gc_duration_seconds_count 13 -# HELP go_gc_heap_allocs_by_size_bytes Distribution of heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. +go_gc_duration_seconds{quantile="0"} 7.1885e-05 +go_gc_duration_seconds{quantile="0.25"} 0.00010273 +go_gc_duration_seconds{quantile="0.5"} 0.000147269 +go_gc_duration_seconds{quantile="0.75"} 0.000227305 +go_gc_duration_seconds{quantile="1"} 0.000506175 +go_gc_duration_seconds_sum 0.005400684 +go_gc_duration_seconds_count 29 +# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. +# TYPE go_gc_gogc_percent gauge +go_gc_gogc_percent 100 +# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. +# TYPE go_gc_gomemlimit_bytes gauge +go_gc_gomemlimit_bytes 9.223372036854776e+18 +# HELP go_gc_heap_allocs_by_size_bytes Distribution of heap allocations by approximate size. Bucket counts increase monotonically. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. # TYPE go_gc_heap_allocs_by_size_bytes histogram -go_gc_heap_allocs_by_size_bytes_bucket{le="8.999999999999998"} 18561 -go_gc_heap_allocs_by_size_bytes_bucket{le="24.999999999999996"} 216132 -go_gc_heap_allocs_by_size_bytes_bucket{le="64.99999999999999"} 354006 -go_gc_heap_allocs_by_size_bytes_bucket{le="144.99999999999997"} 434770 -go_gc_heap_allocs_by_size_bytes_bucket{le="320.99999999999994"} 501892 -go_gc_heap_allocs_by_size_bytes_bucket{le="704.9999999999999"} 513509 -go_gc_heap_allocs_by_size_bytes_bucket{le="1536.9999999999998"} 517316 -go_gc_heap_allocs_by_size_bytes_bucket{le="3200.9999999999995"} 518865 -go_gc_heap_allocs_by_size_bytes_bucket{le="6528.999999999999"} 521001 -go_gc_heap_allocs_by_size_bytes_bucket{le="13568.999999999998"} 521357 -go_gc_heap_allocs_by_size_bytes_bucket{le="27264.999999999996"} 521596 -go_gc_heap_allocs_by_size_bytes_bucket{le="+Inf"} 521745 -go_gc_heap_allocs_by_size_bytes_sum 7.3857112e+07 -go_gc_heap_allocs_by_size_bytes_count 521745 +go_gc_heap_allocs_by_size_bytes_bucket{le="8.999999999999998"} 20262 +go_gc_heap_allocs_by_size_bytes_bucket{le="24.999999999999996"} 662760 +go_gc_heap_allocs_by_size_bytes_bucket{le="64.99999999999999"} 1.23019e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="144.99999999999997"} 1.564055e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="320.99999999999994"} 1.67643e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="704.9999999999999"} 1.73359e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="1536.9999999999998"} 1.744467e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="3200.9999999999995"} 1.749584e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="6528.999999999999"} 1.752437e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="13568.999999999998"} 1.754058e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="27264.999999999996"} 1.754576e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="+Inf"} 1.754729e+06 +go_gc_heap_allocs_by_size_bytes_sum 1.95122176e+08 +go_gc_heap_allocs_by_size_bytes_count 1.754729e+06 # HELP go_gc_heap_allocs_bytes_total Cumulative sum of memory allocated to the heap by the application. # TYPE go_gc_heap_allocs_bytes_total counter -go_gc_heap_allocs_bytes_total 7.3857112e+07 +go_gc_heap_allocs_bytes_total 1.95122176e+08 # HELP go_gc_heap_allocs_objects_total Cumulative count of heap allocations triggered by the application. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. # TYPE go_gc_heap_allocs_objects_total counter -go_gc_heap_allocs_objects_total 521745 -# HELP go_gc_heap_frees_by_size_bytes Distribution of freed heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. +go_gc_heap_allocs_objects_total 1.754729e+06 +# HELP go_gc_heap_frees_by_size_bytes Distribution of freed heap allocations by approximate size. Bucket counts increase monotonically. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. # TYPE go_gc_heap_frees_by_size_bytes histogram -go_gc_heap_frees_by_size_bytes_bucket{le="8.999999999999998"} 13919 -go_gc_heap_frees_by_size_bytes_bucket{le="24.999999999999996"} 152051 -go_gc_heap_frees_by_size_bytes_bucket{le="64.99999999999999"} 242855 -go_gc_heap_frees_by_size_bytes_bucket{le="144.99999999999997"} 299574 -go_gc_heap_frees_by_size_bytes_bucket{le="320.99999999999994"} 356406 -go_gc_heap_frees_by_size_bytes_bucket{le="704.9999999999999"} 363764 -go_gc_heap_frees_by_size_bytes_bucket{le="1536.9999999999998"} 366648 -go_gc_heap_frees_by_size_bytes_bucket{le="3200.9999999999995"} 367696 -go_gc_heap_frees_by_size_bytes_bucket{le="6528.999999999999"} 369595 -go_gc_heap_frees_by_size_bytes_bucket{le="13568.999999999998"} 369791 -go_gc_heap_frees_by_size_bytes_bucket{le="27264.999999999996"} 369848 -go_gc_heap_frees_by_size_bytes_bucket{le="+Inf"} 369898 -go_gc_heap_frees_by_size_bytes_sum 5.101808e+07 -go_gc_heap_frees_by_size_bytes_count 369898 +go_gc_heap_frees_by_size_bytes_bucket{le="8.999999999999998"} 16105 +go_gc_heap_frees_by_size_bytes_bucket{le="24.999999999999996"} 609148 +go_gc_heap_frees_by_size_bytes_bucket{le="64.99999999999999"} 1.131067e+06 +go_gc_heap_frees_by_size_bytes_bucket{le="144.99999999999997"} 1.44066e+06 +go_gc_heap_frees_by_size_bytes_bucket{le="320.99999999999994"} 1.543796e+06 +go_gc_heap_frees_by_size_bytes_bucket{le="704.9999999999999"} 1.596558e+06 +go_gc_heap_frees_by_size_bytes_bucket{le="1536.9999999999998"} 1.606603e+06 +go_gc_heap_frees_by_size_bytes_bucket{le="3200.9999999999995"} 1.611257e+06 +go_gc_heap_frees_by_size_bytes_bucket{le="6528.999999999999"} 1.613916e+06 +go_gc_heap_frees_by_size_bytes_bucket{le="13568.999999999998"} 1.615381e+06 +go_gc_heap_frees_by_size_bytes_bucket{le="27264.999999999996"} 1.615713e+06 +go_gc_heap_frees_by_size_bytes_bucket{le="+Inf"} 1.615775e+06 +go_gc_heap_frees_by_size_bytes_sum 1.73551056e+08 +go_gc_heap_frees_by_size_bytes_count 1.615775e+06 # HELP go_gc_heap_frees_bytes_total Cumulative sum of heap memory freed by the garbage collector. # TYPE go_gc_heap_frees_bytes_total counter -go_gc_heap_frees_bytes_total 5.101808e+07 +go_gc_heap_frees_bytes_total 1.73551056e+08 # HELP go_gc_heap_frees_objects_total Cumulative count of heap allocations whose storage was freed by the garbage collector. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. # TYPE go_gc_heap_frees_objects_total counter -go_gc_heap_frees_objects_total 369898 +go_gc_heap_frees_objects_total 1.615775e+06 # HELP go_gc_heap_goal_bytes Heap size target for the end of the GC cycle. # TYPE go_gc_heap_goal_bytes gauge -go_gc_heap_goal_bytes 3.5172488e+07 +go_gc_heap_goal_bytes 3.468156e+07 +# HELP go_gc_heap_live_bytes Heap memory occupied by live objects that were marked by the previous GC. +# TYPE go_gc_heap_live_bytes gauge +go_gc_heap_live_bytes 1.6361688e+07 # HELP go_gc_heap_objects_objects Number of objects, live or unswept, occupying heap memory. # TYPE go_gc_heap_objects_objects gauge -go_gc_heap_objects_objects 151847 +go_gc_heap_objects_objects 138954 # HELP go_gc_heap_tiny_allocs_objects_total Count of small allocations that are packed together into blocks. These allocations are counted separately from other allocations because each individual allocation is not tracked by the runtime, only their block. Each block is already accounted for in allocs-by-size and frees-by-size. # TYPE go_gc_heap_tiny_allocs_objects_total counter -go_gc_heap_tiny_allocs_objects_total 46491 +go_gc_heap_tiny_allocs_objects_total 198929 # HELP go_gc_limiter_last_enabled_gc_cycle GC cycle the last time the GC CPU limiter was enabled. This metric is useful for diagnosing the root cause of an out-of-memory error, because the limiter trades memory for CPU time when the GC's CPU time gets too high. This is most likely to occur with use of SetMemoryLimit. The first GC cycle is cycle 1, so a value of 0 indicates that it was never enabled. # TYPE go_gc_limiter_last_enabled_gc_cycle gauge go_gc_limiter_last_enabled_gc_cycle 0 -# HELP go_gc_pauses_seconds Distribution individual GC-related stop-the-world pause latencies. +# HELP go_gc_pauses_seconds Distribution of individual GC-related stop-the-world pause latencies. Bucket counts increase monotonically. # TYPE go_gc_pauses_seconds histogram go_gc_pauses_seconds_bucket{le="6.399999999999999e-08"} 0 go_gc_pauses_seconds_bucket{le="6.399999999999999e-07"} 0 -go_gc_pauses_seconds_bucket{le="7.167999999999999e-06"} 3 -go_gc_pauses_seconds_bucket{le="8.191999999999999e-05"} 17 -go_gc_pauses_seconds_bucket{le="0.0009175039999999999"} 26 -go_gc_pauses_seconds_bucket{le="0.010485759999999998"} 26 -go_gc_pauses_seconds_bucket{le="0.11744051199999998"} 26 -go_gc_pauses_seconds_bucket{le="+Inf"} 26 -go_gc_pauses_seconds_sum 0.000839552 -go_gc_pauses_seconds_count 26 +go_gc_pauses_seconds_bucket{le="7.167999999999999e-06"} 2 +go_gc_pauses_seconds_bucket{le="8.191999999999999e-05"} 31 +go_gc_pauses_seconds_bucket{le="0.0009175039999999999"} 58 +go_gc_pauses_seconds_bucket{le="0.010485759999999998"} 58 +go_gc_pauses_seconds_bucket{le="0.11744051199999998"} 58 +go_gc_pauses_seconds_bucket{le="+Inf"} 58 +go_gc_pauses_seconds_sum 0.0024209920000000003 +go_gc_pauses_seconds_count 58 +# HELP go_gc_scan_globals_bytes The total amount of global variable space that is scannable. +# TYPE go_gc_scan_globals_bytes gauge +go_gc_scan_globals_bytes 502376 +# HELP go_gc_scan_heap_bytes The total amount of heap space that is scannable. +# TYPE go_gc_scan_heap_bytes gauge +go_gc_scan_heap_bytes 1.7478304e+07 +# HELP go_gc_scan_stack_bytes The number of bytes of stack that were scanned last GC cycle. +# TYPE go_gc_scan_stack_bytes gauge +go_gc_scan_stack_bytes 1.455808e+06 +# HELP go_gc_scan_total_bytes The total amount space that is scannable. Sum of all metrics in /gc/scan. +# TYPE go_gc_scan_total_bytes gauge +go_gc_scan_total_bytes 1.9436488e+07 # HELP go_gc_stack_starting_size_bytes The stack size of new goroutines. # TYPE go_gc_stack_starting_size_bytes gauge go_gc_stack_starting_size_bytes 4096 +# HELP go_godebug_non_default_behavior_execerrdot_events_total The number of non-default behaviors executed by the os/exec package due to a non-default GODEBUG=execerrdot=... setting. +# TYPE go_godebug_non_default_behavior_execerrdot_events_total counter +go_godebug_non_default_behavior_execerrdot_events_total 0 +# HELP go_godebug_non_default_behavior_gocachehash_events_total The number of non-default behaviors executed by the cmd/go package due to a non-default GODEBUG=gocachehash=... setting. +# TYPE go_godebug_non_default_behavior_gocachehash_events_total counter +go_godebug_non_default_behavior_gocachehash_events_total 0 +# HELP go_godebug_non_default_behavior_gocachetest_events_total The number of non-default behaviors executed by the cmd/go package due to a non-default GODEBUG=gocachetest=... setting. +# TYPE go_godebug_non_default_behavior_gocachetest_events_total counter +go_godebug_non_default_behavior_gocachetest_events_total 0 +# HELP go_godebug_non_default_behavior_gocacheverify_events_total The number of non-default behaviors executed by the cmd/go package due to a non-default GODEBUG=gocacheverify=... setting. +# TYPE go_godebug_non_default_behavior_gocacheverify_events_total counter +go_godebug_non_default_behavior_gocacheverify_events_total 0 +# HELP go_godebug_non_default_behavior_http2client_events_total The number of non-default behaviors executed by the net/http package due to a non-default GODEBUG=http2client=... setting. +# TYPE go_godebug_non_default_behavior_http2client_events_total counter +go_godebug_non_default_behavior_http2client_events_total 0 +# HELP go_godebug_non_default_behavior_http2server_events_total The number of non-default behaviors executed by the net/http package due to a non-default GODEBUG=http2server=... setting. +# TYPE go_godebug_non_default_behavior_http2server_events_total counter +go_godebug_non_default_behavior_http2server_events_total 0 +# HELP go_godebug_non_default_behavior_installgoroot_events_total The number of non-default behaviors executed by the go/build package due to a non-default GODEBUG=installgoroot=... setting. +# TYPE go_godebug_non_default_behavior_installgoroot_events_total counter +go_godebug_non_default_behavior_installgoroot_events_total 0 +# HELP go_godebug_non_default_behavior_jstmpllitinterp_events_total The number of non-default behaviors executed by the html/template package due to a non-default GODEBUG=jstmpllitinterp=... setting. +# TYPE go_godebug_non_default_behavior_jstmpllitinterp_events_total counter +go_godebug_non_default_behavior_jstmpllitinterp_events_total 0 +# HELP go_godebug_non_default_behavior_multipartmaxheaders_events_total The number of non-default behaviors executed by the mime/multipart package due to a non-default GODEBUG=multipartmaxheaders=... setting. +# TYPE go_godebug_non_default_behavior_multipartmaxheaders_events_total counter +go_godebug_non_default_behavior_multipartmaxheaders_events_total 0 +# HELP go_godebug_non_default_behavior_multipartmaxparts_events_total The number of non-default behaviors executed by the mime/multipart package due to a non-default GODEBUG=multipartmaxparts=... setting. +# TYPE go_godebug_non_default_behavior_multipartmaxparts_events_total counter +go_godebug_non_default_behavior_multipartmaxparts_events_total 0 +# HELP go_godebug_non_default_behavior_multipathtcp_events_total The number of non-default behaviors executed by the net package due to a non-default GODEBUG=multipathtcp=... setting. +# TYPE go_godebug_non_default_behavior_multipathtcp_events_total counter +go_godebug_non_default_behavior_multipathtcp_events_total 0 +# HELP go_godebug_non_default_behavior_panicnil_events_total The number of non-default behaviors executed by the runtime package due to a non-default GODEBUG=panicnil=... setting. +# TYPE go_godebug_non_default_behavior_panicnil_events_total counter +go_godebug_non_default_behavior_panicnil_events_total 0 +# HELP go_godebug_non_default_behavior_randautoseed_events_total The number of non-default behaviors executed by the math/rand package due to a non-default GODEBUG=randautoseed=... setting. +# TYPE go_godebug_non_default_behavior_randautoseed_events_total counter +go_godebug_non_default_behavior_randautoseed_events_total 0 +# HELP go_godebug_non_default_behavior_tarinsecurepath_events_total The number of non-default behaviors executed by the archive/tar package due to a non-default GODEBUG=tarinsecurepath=... setting. +# TYPE go_godebug_non_default_behavior_tarinsecurepath_events_total counter +go_godebug_non_default_behavior_tarinsecurepath_events_total 0 +# HELP go_godebug_non_default_behavior_tlsmaxrsasize_events_total The number of non-default behaviors executed by the crypto/tls package due to a non-default GODEBUG=tlsmaxrsasize=... setting. +# TYPE go_godebug_non_default_behavior_tlsmaxrsasize_events_total counter +go_godebug_non_default_behavior_tlsmaxrsasize_events_total 0 +# HELP go_godebug_non_default_behavior_x509sha1_events_total The number of non-default behaviors executed by the crypto/x509 package due to a non-default GODEBUG=x509sha1=... setting. +# TYPE go_godebug_non_default_behavior_x509sha1_events_total counter +go_godebug_non_default_behavior_x509sha1_events_total 0 +# HELP go_godebug_non_default_behavior_x509usefallbackroots_events_total The number of non-default behaviors executed by the crypto/x509 package due to a non-default GODEBUG=x509usefallbackroots=... setting. +# TYPE go_godebug_non_default_behavior_x509usefallbackroots_events_total counter +go_godebug_non_default_behavior_x509usefallbackroots_events_total 0 +# HELP go_godebug_non_default_behavior_zipinsecurepath_events_total The number of non-default behaviors executed by the archive/zip package due to a non-default GODEBUG=zipinsecurepath=... setting. +# TYPE go_godebug_non_default_behavior_zipinsecurepath_events_total counter +go_godebug_non_default_behavior_zipinsecurepath_events_total 0 # HELP go_goroutines Number of goroutines that currently exist. # TYPE go_goroutines gauge -go_goroutines 1123 +go_goroutines 1126 # HELP go_info Information about the Go environment. # TYPE go_info gauge -go_info{version="go1.20.7"} 1 +go_info{version="go1.21.5"} 1 # HELP go_memory_classes_heap_free_bytes Memory that is completely free and eligible to be returned to the underlying system, but has not been. This metric is the runtime's estimate of free address space that is backed by physical memory. # TYPE go_memory_classes_heap_free_bytes gauge -go_memory_classes_heap_free_bytes 4.227072e+06 +go_memory_classes_heap_free_bytes 1.384448e+06 # HELP go_memory_classes_heap_objects_bytes Memory occupied by live objects and dead objects that have not yet been marked free by the garbage collector. # TYPE go_memory_classes_heap_objects_bytes gauge -go_memory_classes_heap_objects_bytes 2.2839032e+07 +go_memory_classes_heap_objects_bytes 2.157112e+07 # HELP go_memory_classes_heap_released_bytes Memory that is completely free and has been returned to the underlying system. This metric is the runtime's estimate of free address space that is still mapped into the process, but is not backed by physical memory. # TYPE go_memory_classes_heap_released_bytes gauge -go_memory_classes_heap_released_bytes 851968 -# HELP go_memory_classes_heap_stacks_bytes Memory allocated from the heap that is reserved for stack space, whether or not it is currently in-use. +go_memory_classes_heap_released_bytes 8.749056e+06 +# HELP go_memory_classes_heap_stacks_bytes Memory allocated from the heap that is reserved for stack space, whether or not it is currently in-use. Currently, this represents all stack memory for goroutines. It also includes all OS thread stacks in non-cgo programs. Note that stacks may be allocated differently in the future, and this may change. # TYPE go_memory_classes_heap_stacks_bytes gauge -go_memory_classes_heap_stacks_bytes 6.029312e+06 +go_memory_classes_heap_stacks_bytes 6.946816e+06 # HELP go_memory_classes_heap_unused_bytes Memory that is reserved for heap objects but is not currently used to hold heap objects. # TYPE go_memory_classes_heap_unused_bytes gauge -go_memory_classes_heap_unused_bytes 7.995656e+06 +go_memory_classes_heap_unused_bytes 7.485904e+06 # HELP go_memory_classes_metadata_mcache_free_bytes Memory that is reserved for runtime mcache structures, but not in-use. # TYPE go_memory_classes_metadata_mcache_free_bytes gauge -go_memory_classes_metadata_mcache_free_bytes 6000 +go_memory_classes_metadata_mcache_free_bytes 12000 # HELP go_memory_classes_metadata_mcache_inuse_bytes Memory that is occupied by runtime mcache structures that are currently being used. # TYPE go_memory_classes_metadata_mcache_inuse_bytes gauge -go_memory_classes_metadata_mcache_inuse_bytes 9600 +go_memory_classes_metadata_mcache_inuse_bytes 19200 # HELP go_memory_classes_metadata_mspan_free_bytes Memory that is reserved for runtime mspan structures, but not in-use. # TYPE go_memory_classes_metadata_mspan_free_bytes gauge -go_memory_classes_metadata_mspan_free_bytes 56000 +go_memory_classes_metadata_mspan_free_bytes 93408 # HELP go_memory_classes_metadata_mspan_inuse_bytes Memory that is occupied by runtime mspan structures that are currently being used. # TYPE go_memory_classes_metadata_mspan_inuse_bytes gauge -go_memory_classes_metadata_mspan_inuse_bytes 498880 +go_memory_classes_metadata_mspan_inuse_bytes 558432 # HELP go_memory_classes_metadata_other_bytes Memory that is reserved for or used to hold runtime metadata. # TYPE go_memory_classes_metadata_other_bytes gauge -go_memory_classes_metadata_other_bytes 9.200032e+06 -# HELP go_memory_classes_os_stacks_bytes Stack memory allocated by the underlying operating system. +go_memory_classes_metadata_other_bytes 4.99332e+06 +# HELP go_memory_classes_os_stacks_bytes Stack memory allocated by the underlying operating system. In non-cgo programs this metric is currently zero. This may change in the future.In cgo programs this metric includes OS thread stacks allocated directly from the OS. Currently, this only accounts for one stack in c-shared and c-archive build modes, and other sources of stacks from the OS are not measured. This too may change in the future. # TYPE go_memory_classes_os_stacks_bytes gauge go_memory_classes_os_stacks_bytes 0 # HELP go_memory_classes_other_bytes Memory used by execution trace buffers, structures for debugging the runtime, finalizer and profiler specials, and more. # TYPE go_memory_classes_other_bytes gauge -go_memory_classes_other_bytes 1.71237e+06 +go_memory_classes_other_bytes 3.114019e+06 # HELP go_memory_classes_profiling_buckets_bytes Memory that is used by the stack trace hash map used for profiling. # TYPE go_memory_classes_profiling_buckets_bytes gauge -go_memory_classes_profiling_buckets_bytes 1.490438e+06 +go_memory_classes_profiling_buckets_bytes 1.536925e+06 # HELP go_memory_classes_total_bytes All memory mapped by the Go runtime into the current process as read-write. Note that this does not include memory mapped by code called via cgo or via the syscall package. Sum of all metrics in /memory/classes. # TYPE go_memory_classes_total_bytes gauge -go_memory_classes_total_bytes 5.491636e+07 +go_memory_classes_total_bytes 5.6464648e+07 # HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. # TYPE go_memstats_alloc_bytes gauge -go_memstats_alloc_bytes 2.2839032e+07 +go_memstats_alloc_bytes 2.157112e+07 # HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. # TYPE go_memstats_alloc_bytes_total counter -go_memstats_alloc_bytes_total 7.3857112e+07 +go_memstats_alloc_bytes_total 1.95122176e+08 # HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. # TYPE go_memstats_buck_hash_sys_bytes gauge -go_memstats_buck_hash_sys_bytes 1.490438e+06 +go_memstats_buck_hash_sys_bytes 1.536925e+06 # HELP go_memstats_frees_total Total number of frees. # TYPE go_memstats_frees_total counter -go_memstats_frees_total 416389 +go_memstats_frees_total 1.814704e+06 # HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. # TYPE go_memstats_gc_sys_bytes gauge -go_memstats_gc_sys_bytes 9.200032e+06 +go_memstats_gc_sys_bytes 4.99332e+06 # HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. # TYPE go_memstats_heap_alloc_bytes gauge -go_memstats_heap_alloc_bytes 2.2839032e+07 +go_memstats_heap_alloc_bytes 2.157112e+07 # HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. # TYPE go_memstats_heap_idle_bytes gauge -go_memstats_heap_idle_bytes 5.07904e+06 +go_memstats_heap_idle_bytes 1.0133504e+07 # HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. # TYPE go_memstats_heap_inuse_bytes gauge -go_memstats_heap_inuse_bytes 3.0834688e+07 +go_memstats_heap_inuse_bytes 2.9057024e+07 # HELP go_memstats_heap_objects Number of allocated objects. # TYPE go_memstats_heap_objects gauge -go_memstats_heap_objects 151847 +go_memstats_heap_objects 138954 # HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. # TYPE go_memstats_heap_released_bytes gauge -go_memstats_heap_released_bytes 851968 +go_memstats_heap_released_bytes 8.749056e+06 # HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. # TYPE go_memstats_heap_sys_bytes gauge -go_memstats_heap_sys_bytes 3.5913728e+07 +go_memstats_heap_sys_bytes 3.9190528e+07 # HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. # TYPE go_memstats_last_gc_time_seconds gauge -go_memstats_last_gc_time_seconds 1.698752425520854e+09 +go_memstats_last_gc_time_seconds 1.7048969121171913e+09 # HELP go_memstats_lookups_total Total number of pointer lookups. # TYPE go_memstats_lookups_total counter go_memstats_lookups_total 0 # HELP go_memstats_mallocs_total Total number of mallocs. # TYPE go_memstats_mallocs_total counter -go_memstats_mallocs_total 568236 +go_memstats_mallocs_total 1.953658e+06 # HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. # TYPE go_memstats_mcache_inuse_bytes gauge -go_memstats_mcache_inuse_bytes 9600 +go_memstats_mcache_inuse_bytes 19200 # HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. # TYPE go_memstats_mcache_sys_bytes gauge -go_memstats_mcache_sys_bytes 15600 +go_memstats_mcache_sys_bytes 31200 # HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. # TYPE go_memstats_mspan_inuse_bytes gauge -go_memstats_mspan_inuse_bytes 498880 +go_memstats_mspan_inuse_bytes 558432 # HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. # TYPE go_memstats_mspan_sys_bytes gauge -go_memstats_mspan_sys_bytes 554880 +go_memstats_mspan_sys_bytes 651840 # HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. # TYPE go_memstats_next_gc_bytes gauge -go_memstats_next_gc_bytes 3.5172488e+07 +go_memstats_next_gc_bytes 3.468156e+07 # HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. # TYPE go_memstats_other_sys_bytes gauge -go_memstats_other_sys_bytes 1.71237e+06 +go_memstats_other_sys_bytes 3.114019e+06 # HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. # TYPE go_memstats_stack_inuse_bytes gauge -go_memstats_stack_inuse_bytes 6.029312e+06 +go_memstats_stack_inuse_bytes 6.946816e+06 # HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. # TYPE go_memstats_stack_sys_bytes gauge -go_memstats_stack_sys_bytes 6.029312e+06 +go_memstats_stack_sys_bytes 6.946816e+06 # HELP go_memstats_sys_bytes Number of bytes obtained from system. # TYPE go_memstats_sys_bytes gauge -go_memstats_sys_bytes 5.491636e+07 +go_memstats_sys_bytes 5.6464648e+07 # HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. # TYPE go_sched_gomaxprocs_threads gauge -go_sched_gomaxprocs_threads 8 +go_sched_gomaxprocs_threads 16 # HELP go_sched_goroutines_goroutines Count of live goroutines. # TYPE go_sched_goroutines_goroutines gauge -go_sched_goroutines_goroutines 1122 -# HELP go_sched_latencies_seconds Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running. +go_sched_goroutines_goroutines 1126 +# HELP go_sched_latencies_seconds Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running. Bucket counts increase monotonically. # TYPE go_sched_latencies_seconds histogram -go_sched_latencies_seconds_bucket{le="6.399999999999999e-08"} 1225 -go_sched_latencies_seconds_bucket{le="6.399999999999999e-07"} 1505 -go_sched_latencies_seconds_bucket{le="7.167999999999999e-06"} 2657 -go_sched_latencies_seconds_bucket{le="8.191999999999999e-05"} 4392 -go_sched_latencies_seconds_bucket{le="0.0009175039999999999"} 4579 -go_sched_latencies_seconds_bucket{le="0.010485759999999998"} 4588 -go_sched_latencies_seconds_bucket{le="0.11744051199999998"} 4588 -go_sched_latencies_seconds_bucket{le="+Inf"} 4588 -go_sched_latencies_seconds_sum 0.036768256 -go_sched_latencies_seconds_count 4588 +go_sched_latencies_seconds_bucket{le="6.399999999999999e-08"} 4662 +go_sched_latencies_seconds_bucket{le="6.399999999999999e-07"} 5508 +go_sched_latencies_seconds_bucket{le="7.167999999999999e-06"} 11724 +go_sched_latencies_seconds_bucket{le="8.191999999999999e-05"} 51675 +go_sched_latencies_seconds_bucket{le="0.0009175039999999999"} 52881 +go_sched_latencies_seconds_bucket{le="0.010485759999999998"} 52896 +go_sched_latencies_seconds_bucket{le="0.11744051199999998"} 52896 +go_sched_latencies_seconds_bucket{le="+Inf"} 52896 +go_sched_latencies_seconds_sum 0.40295923199999995 +go_sched_latencies_seconds_count 52896 # HELP go_sync_mutex_wait_total_seconds_total Approximate cumulative time goroutines have spent blocked on a sync.Mutex or sync.RWMutex. This metric is useful for identifying global changes in lock contention. Collect a mutex or block profile using the runtime/pprof package for more detailed contention data. # TYPE go_sync_mutex_wait_total_seconds_total counter -go_sync_mutex_wait_total_seconds_total 13.275364024 +go_sync_mutex_wait_total_seconds_total 0.076525904 # HELP go_threads Number of OS threads created. # TYPE go_threads gauge -go_threads 10 +go_threads 18 # HELP hidden_metrics_total [BETA] The count of hidden metrics. # TYPE hidden_metrics_total counter hidden_metrics_total 0 # HELP kubernetes_build_info [ALPHA] A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running. # TYPE kubernetes_build_info gauge -kubernetes_build_info{build_date="2023-08-15T21:24:51Z",compiler="gc",git_commit="855e7c48de7388eb330da0f8d9d2394ee818fb8d",git_tree_state="clean",git_version="v1.28.0",go_version="go1.20.7",major="1",minor="28",platform="linux/amd64"} 1 +kubernetes_build_info{build_date="2023-12-14T19:18:17Z",compiler="gc",git_commit="3f7a50f38688eb332e2a1b013678c6435d539ae6",git_tree_state="clean",git_version="v1.29.0",go_version="go1.21.5",major="1",minor="29",platform="linux/amd64"} 1 # HELP kubernetes_feature_enabled [BETA] This metric records the data about the stage and enablement of a k8s feature. # TYPE kubernetes_feature_enabled gauge -kubernetes_feature_enabled{name="APIListChunking",stage="BETA"} 1 -kubernetes_feature_enabled{name="APIPriorityAndFairness",stage="BETA"} 1 +kubernetes_feature_enabled{name="APIListChunking",stage=""} 1 +kubernetes_feature_enabled{name="APIPriorityAndFairness",stage=""} 1 kubernetes_feature_enabled{name="APIResponseCompression",stage="BETA"} 1 kubernetes_feature_enabled{name="APISelfSubjectReview",stage=""} 1 kubernetes_feature_enabled{name="APIServerIdentity",stage="BETA"} 1 @@ -602,6 +695,7 @@ kubernetes_feature_enabled{name="AdmissionWebhookMatchConditions",stage="BETA"} kubernetes_feature_enabled{name="AggregatedDiscoveryEndpoint",stage="BETA"} 1 kubernetes_feature_enabled{name="AllAlpha",stage="ALPHA"} 0 kubernetes_feature_enabled{name="AllBeta",stage="BETA"} 0 +kubernetes_feature_enabled{name="AllowServiceLBStatusOnNonLB",stage="DEPRECATED"} 0 kubernetes_feature_enabled{name="AnyVolumeDataSource",stage="BETA"} 1 kubernetes_feature_enabled{name="AppArmor",stage="BETA"} 1 kubernetes_feature_enabled{name="CPUManager",stage=""} 1 @@ -612,27 +706,26 @@ kubernetes_feature_enabled{name="CRDValidationRatcheting",stage="ALPHA"} 0 kubernetes_feature_enabled{name="CSIMigrationAzureFile",stage=""} 1 kubernetes_feature_enabled{name="CSIMigrationPortworx",stage="BETA"} 0 kubernetes_feature_enabled{name="CSIMigrationRBD",stage="DEPRECATED"} 0 -kubernetes_feature_enabled{name="CSIMigrationvSphere",stage=""} 1 -kubernetes_feature_enabled{name="CSINodeExpandSecret",stage="BETA"} 1 +kubernetes_feature_enabled{name="CSINodeExpandSecret",stage=""} 1 kubernetes_feature_enabled{name="CSIVolumeHealth",stage="ALPHA"} 0 kubernetes_feature_enabled{name="CloudControllerManagerWebhook",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="CloudDualStackNodeIPs",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="CloudDualStackNodeIPs",stage="BETA"} 1 kubernetes_feature_enabled{name="ClusterTrustBundle",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ClusterTrustBundleProjection",stage="ALPHA"} 0 kubernetes_feature_enabled{name="ComponentSLIs",stage="BETA"} 1 kubernetes_feature_enabled{name="ConsistentHTTPGetHandlers",stage=""} 1 kubernetes_feature_enabled{name="ConsistentListFromCache",stage="ALPHA"} 0 kubernetes_feature_enabled{name="ContainerCheckpoint",stage="ALPHA"} 0 kubernetes_feature_enabled{name="ContextualLogging",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="CronJobTimeZone",stage=""} 1 kubernetes_feature_enabled{name="CronJobsScheduledAnnotation",stage="BETA"} 1 kubernetes_feature_enabled{name="CrossNamespaceVolumeDataSource",stage="ALPHA"} 0 kubernetes_feature_enabled{name="CustomCPUCFSQuotaPeriod",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="CustomResourceValidationExpressions",stage="BETA"} 1 +kubernetes_feature_enabled{name="CustomResourceValidationExpressions",stage=""} 1 kubernetes_feature_enabled{name="DefaultHostNetworkHostPortsInPodTemplates",stage="DEPRECATED"} 0 -kubernetes_feature_enabled{name="DevicePluginCDIDevices",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="DisableCloudProviders",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="DisableKubeletCloudCredentialProviders",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="DownwardAPIHugePages",stage=""} 1 +kubernetes_feature_enabled{name="DevicePluginCDIDevices",stage="BETA"} 1 +kubernetes_feature_enabled{name="DisableCloudProviders",stage="BETA"} 1 +kubernetes_feature_enabled{name="DisableKubeletCloudCredentialProviders",stage="BETA"} 1 +kubernetes_feature_enabled{name="DisableNodeKubeProxyVersion",stage="ALPHA"} 0 kubernetes_feature_enabled{name="DynamicResourceAllocation",stage="ALPHA"} 0 kubernetes_feature_enabled{name="EfficientWatchResumption",stage=""} 1 kubernetes_feature_enabled{name="ElasticIndexedJob",stage="BETA"} 1 @@ -640,13 +733,13 @@ kubernetes_feature_enabled{name="EventedPLEG",stage="BETA"} 0 kubernetes_feature_enabled{name="ExecProbeTimeout",stage=""} 1 kubernetes_feature_enabled{name="ExpandedDNSConfig",stage=""} 1 kubernetes_feature_enabled{name="ExperimentalHostUserNamespaceDefaulting",stage="DEPRECATED"} 0 -kubernetes_feature_enabled{name="GRPCContainerProbe",stage=""} 1 kubernetes_feature_enabled{name="GracefulNodeShutdown",stage="BETA"} 1 kubernetes_feature_enabled{name="GracefulNodeShutdownBasedOnPodPriority",stage="BETA"} 1 kubernetes_feature_enabled{name="HPAContainerMetrics",stage="BETA"} 1 kubernetes_feature_enabled{name="HPAScaleToZero",stage="ALPHA"} 0 kubernetes_feature_enabled{name="HonorPVReclaimPolicy",stage="ALPHA"} 0 kubernetes_feature_enabled{name="IPTablesOwnershipCleanup",stage=""} 1 +kubernetes_feature_enabled{name="ImageMaximumGCAge",stage="ALPHA"} 0 kubernetes_feature_enabled{name="InPlacePodVerticalScaling",stage="ALPHA"} 0 kubernetes_feature_enabled{name="InTreePluginAWSUnregister",stage="ALPHA"} 0 kubernetes_feature_enabled{name="InTreePluginAzureDiskUnregister",stage="ALPHA"} 0 @@ -656,15 +749,13 @@ kubernetes_feature_enabled{name="InTreePluginOpenStackUnregister",stage="ALPHA"} kubernetes_feature_enabled{name="InTreePluginPortworxUnregister",stage="ALPHA"} 0 kubernetes_feature_enabled{name="InTreePluginRBDUnregister",stage="DEPRECATED"} 0 kubernetes_feature_enabled{name="InTreePluginvSphereUnregister",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="JobBackoffLimitPerIndex",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="JobMutableNodeSchedulingDirectives",stage=""} 1 +kubernetes_feature_enabled{name="JobBackoffLimitPerIndex",stage="BETA"} 1 kubernetes_feature_enabled{name="JobPodFailurePolicy",stage="BETA"} 1 -kubernetes_feature_enabled{name="JobPodReplacementPolicy",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="JobReadyPods",stage="BETA"} 1 -kubernetes_feature_enabled{name="JobTrackingWithFinalizers",stage=""} 1 -kubernetes_feature_enabled{name="KMSv1",stage="DEPRECATED"} 1 -kubernetes_feature_enabled{name="KMSv2",stage="BETA"} 1 -kubernetes_feature_enabled{name="KMSv2KDF",stage="BETA"} 0 +kubernetes_feature_enabled{name="JobPodReplacementPolicy",stage="BETA"} 1 +kubernetes_feature_enabled{name="JobReadyPods",stage=""} 1 +kubernetes_feature_enabled{name="KMSv1",stage="DEPRECATED"} 0 +kubernetes_feature_enabled{name="KMSv2",stage=""} 1 +kubernetes_feature_enabled{name="KMSv2KDF",stage=""} 1 kubernetes_feature_enabled{name="KubeProxyDrainingTerminatingNodes",stage="ALPHA"} 0 kubernetes_feature_enabled{name="KubeletCgroupDriverFromCRI",stage="ALPHA"} 0 kubernetes_feature_enabled{name="KubeletInUserNamespace",stage="ALPHA"} 0 @@ -672,77 +763,88 @@ kubernetes_feature_enabled{name="KubeletPodResources",stage=""} 1 kubernetes_feature_enabled{name="KubeletPodResourcesDynamicResources",stage="ALPHA"} 0 kubernetes_feature_enabled{name="KubeletPodResourcesGet",stage="ALPHA"} 0 kubernetes_feature_enabled{name="KubeletPodResourcesGetAllocatable",stage=""} 1 +kubernetes_feature_enabled{name="KubeletSeparateDiskGC",stage="ALPHA"} 0 kubernetes_feature_enabled{name="KubeletTracing",stage="BETA"} 1 -kubernetes_feature_enabled{name="LegacyServiceAccountTokenCleanUp",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="LegacyServiceAccountTokenNoAutoGeneration",stage=""} 1 +kubernetes_feature_enabled{name="LegacyServiceAccountTokenCleanUp",stage="BETA"} 1 kubernetes_feature_enabled{name="LegacyServiceAccountTokenTracking",stage=""} 1 +kubernetes_feature_enabled{name="LoadBalancerIPMode",stage="ALPHA"} 0 kubernetes_feature_enabled{name="LocalStorageCapacityIsolationFSQuotaMonitoring",stage="ALPHA"} 0 kubernetes_feature_enabled{name="LogarithmicScaleDown",stage="BETA"} 1 kubernetes_feature_enabled{name="LoggingAlphaOptions",stage="ALPHA"} 0 kubernetes_feature_enabled{name="LoggingBetaOptions",stage="BETA"} 1 +kubernetes_feature_enabled{name="MatchLabelKeysInPodAffinity",stage="ALPHA"} 0 kubernetes_feature_enabled{name="MatchLabelKeysInPodTopologySpread",stage="BETA"} 1 kubernetes_feature_enabled{name="MaxUnavailableStatefulSet",stage="ALPHA"} 0 kubernetes_feature_enabled{name="MemoryManager",stage="BETA"} 1 kubernetes_feature_enabled{name="MemoryQoS",stage="ALPHA"} 0 kubernetes_feature_enabled{name="MinDomainsInPodTopologySpread",stage="BETA"} 1 kubernetes_feature_enabled{name="MinimizeIPTablesRestore",stage=""} 1 -kubernetes_feature_enabled{name="MultiCIDRRangeAllocator",stage="ALPHA"} 0 kubernetes_feature_enabled{name="MultiCIDRServiceAllocator",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="NFTablesProxyMode",stage="ALPHA"} 0 kubernetes_feature_enabled{name="NewVolumeManagerReconstruction",stage="BETA"} 1 kubernetes_feature_enabled{name="NodeInclusionPolicyInPodTopologySpread",stage="BETA"} 1 kubernetes_feature_enabled{name="NodeLogQuery",stage="ALPHA"} 0 kubernetes_feature_enabled{name="NodeOutOfServiceVolumeDetach",stage=""} 1 kubernetes_feature_enabled{name="NodeSwap",stage="BETA"} 0 kubernetes_feature_enabled{name="OpenAPIEnums",stage="BETA"} 1 -kubernetes_feature_enabled{name="OpenAPIV3",stage=""} 1 kubernetes_feature_enabled{name="PDBUnhealthyPodEvictionPolicy",stage="BETA"} 1 -kubernetes_feature_enabled{name="PersistentVolumeLastPhaseTransitionTime",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="PersistentVolumeLastPhaseTransitionTime",stage="BETA"} 1 kubernetes_feature_enabled{name="PodAndContainerStatsFromCRI",stage="ALPHA"} 0 kubernetes_feature_enabled{name="PodDeletionCost",stage="BETA"} 1 kubernetes_feature_enabled{name="PodDisruptionConditions",stage="BETA"} 1 -kubernetes_feature_enabled{name="PodHostIPs",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="PodHostIPs",stage="BETA"} 1 kubernetes_feature_enabled{name="PodIndexLabel",stage="BETA"} 1 -kubernetes_feature_enabled{name="PodReadyToStartContainersCondition",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="PodLifecycleSleepAction",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="PodReadyToStartContainersCondition",stage="BETA"} 1 kubernetes_feature_enabled{name="PodSchedulingReadiness",stage="BETA"} 1 -kubernetes_feature_enabled{name="ProbeTerminationGracePeriod",stage=""} 1 kubernetes_feature_enabled{name="ProcMountType",stage="ALPHA"} 0 kubernetes_feature_enabled{name="ProxyTerminatingEndpoints",stage=""} 1 kubernetes_feature_enabled{name="QOSReserved",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="ReadWriteOncePod",stage="BETA"} 1 +kubernetes_feature_enabled{name="ReadWriteOncePod",stage=""} 1 kubernetes_feature_enabled{name="RecoverVolumeExpansionFailure",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="RemainingItemCount",stage="BETA"} 1 +kubernetes_feature_enabled{name="RemainingItemCount",stage=""} 1 kubernetes_feature_enabled{name="RemoveSelfLink",stage=""} 1 -kubernetes_feature_enabled{name="RetroactiveDefaultStorageClass",stage=""} 1 kubernetes_feature_enabled{name="RotateKubeletServerCertificate",stage="BETA"} 1 +kubernetes_feature_enabled{name="RuntimeClassInImageCriApi",stage="ALPHA"} 0 kubernetes_feature_enabled{name="SELinuxMountReadWriteOncePod",stage="BETA"} 1 -kubernetes_feature_enabled{name="SchedulerQueueingHints",stage="BETA"} 1 -kubernetes_feature_enabled{name="SeccompDefault",stage=""} 1 +kubernetes_feature_enabled{name="SchedulerQueueingHints",stage="BETA"} 0 kubernetes_feature_enabled{name="SecurityContextDeny",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="SeparateTaintEvictionController",stage="BETA"} 1 kubernetes_feature_enabled{name="ServerSideApply",stage=""} 1 kubernetes_feature_enabled{name="ServerSideFieldValidation",stage=""} 1 -kubernetes_feature_enabled{name="ServiceNodePortStaticSubrange",stage="BETA"} 1 -kubernetes_feature_enabled{name="SidecarContainers",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceAccountTokenJTI",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceAccountTokenNodeBinding",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceAccountTokenNodeBindingValidation",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceAccountTokenPodNodeInfo",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceNodePortStaticSubrange",stage=""} 1 +kubernetes_feature_enabled{name="SidecarContainers",stage="BETA"} 1 kubernetes_feature_enabled{name="SizeMemoryBackedVolumes",stage="BETA"} 1 -kubernetes_feature_enabled{name="SkipReadOnlyValidationGCE",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="SkipReadOnlyValidationGCE",stage="DEPRECATED"} 1 kubernetes_feature_enabled{name="StableLoadBalancerNodeSet",stage="BETA"} 1 kubernetes_feature_enabled{name="StatefulSetAutoDeletePVC",stage="BETA"} 1 kubernetes_feature_enabled{name="StatefulSetStartOrdinal",stage="BETA"} 1 kubernetes_feature_enabled{name="StorageVersionAPI",stage="ALPHA"} 0 kubernetes_feature_enabled{name="StorageVersionHash",stage="BETA"} 1 +kubernetes_feature_enabled{name="StructuredAuthenticationConfiguration",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="StructuredAuthorizationConfiguration",stage="ALPHA"} 0 kubernetes_feature_enabled{name="TopologyAwareHints",stage="BETA"} 1 -kubernetes_feature_enabled{name="TopologyManager",stage=""} 1 kubernetes_feature_enabled{name="TopologyManagerPolicyAlphaOptions",stage="ALPHA"} 0 kubernetes_feature_enabled{name="TopologyManagerPolicyBetaOptions",stage="BETA"} 1 kubernetes_feature_enabled{name="TopologyManagerPolicyOptions",stage="BETA"} 1 +kubernetes_feature_enabled{name="TranslateStreamCloseWebsocketRequests",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="UnauthenticatedHTTP2DOSMitigation",stage="BETA"} 1 kubernetes_feature_enabled{name="UnknownVersionInteroperabilityProxy",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="UserNamespacesPodSecurityStandards",stage="ALPHA"} 0 kubernetes_feature_enabled{name="UserNamespacesSupport",stage="ALPHA"} 0 kubernetes_feature_enabled{name="ValidatingAdmissionPolicy",stage="BETA"} 0 +kubernetes_feature_enabled{name="VolumeAttributesClass",stage="ALPHA"} 0 kubernetes_feature_enabled{name="VolumeCapacityPriority",stage="ALPHA"} 0 kubernetes_feature_enabled{name="WatchBookmark",stage=""} 1 kubernetes_feature_enabled{name="WatchList",stage="ALPHA"} 0 kubernetes_feature_enabled{name="WinDSR",stage="ALPHA"} 0 kubernetes_feature_enabled{name="WinOverlay",stage="BETA"} 1 kubernetes_feature_enabled{name="WindowsHostNetwork",stage="ALPHA"} 1 +kubernetes_feature_enabled{name="ZeroLimitedNominalConcurrencyShares",stage="BETA"} 0 # HELP leader_election_master_status [ALPHA] Gauge of if the reporting system is master of the relevant lease, 0 indicates backup, 1 indicates master. 'name' is the string used to identify the lease. Please make sure to group by name. # TYPE leader_election_master_status gauge leader_election_master_status{name="kube-controller-manager"} 1 @@ -754,30 +856,30 @@ node_collector_evictions_total{zone=""} 0 node_collector_unhealthy_nodes_in_zone{zone=""} 0 # HELP node_collector_update_all_nodes_health_duration_seconds [ALPHA] Duration in seconds for NodeController to update the health of all nodes. # TYPE node_collector_update_all_nodes_health_duration_seconds histogram -node_collector_update_all_nodes_health_duration_seconds_bucket{le="0.01"} 16 -node_collector_update_all_nodes_health_duration_seconds_bucket{le="0.04"} 16 -node_collector_update_all_nodes_health_duration_seconds_bucket{le="0.16"} 16 -node_collector_update_all_nodes_health_duration_seconds_bucket{le="0.64"} 16 -node_collector_update_all_nodes_health_duration_seconds_bucket{le="2.56"} 16 -node_collector_update_all_nodes_health_duration_seconds_bucket{le="10.24"} 16 -node_collector_update_all_nodes_health_duration_seconds_bucket{le="40.96"} 16 -node_collector_update_all_nodes_health_duration_seconds_bucket{le="163.84"} 16 -node_collector_update_all_nodes_health_duration_seconds_bucket{le="+Inf"} 16 -node_collector_update_all_nodes_health_duration_seconds_sum 0.004820456000000001 -node_collector_update_all_nodes_health_duration_seconds_count 16 +node_collector_update_all_nodes_health_duration_seconds_bucket{le="0.01"} 441 +node_collector_update_all_nodes_health_duration_seconds_bucket{le="0.04"} 441 +node_collector_update_all_nodes_health_duration_seconds_bucket{le="0.16"} 441 +node_collector_update_all_nodes_health_duration_seconds_bucket{le="0.64"} 441 +node_collector_update_all_nodes_health_duration_seconds_bucket{le="2.56"} 441 +node_collector_update_all_nodes_health_duration_seconds_bucket{le="10.24"} 441 +node_collector_update_all_nodes_health_duration_seconds_bucket{le="40.96"} 441 +node_collector_update_all_nodes_health_duration_seconds_bucket{le="163.84"} 441 +node_collector_update_all_nodes_health_duration_seconds_bucket{le="+Inf"} 441 +node_collector_update_all_nodes_health_duration_seconds_sum 0.08619570099999996 +node_collector_update_all_nodes_health_duration_seconds_count 441 # HELP node_collector_update_node_health_duration_seconds [ALPHA] Duration in seconds for NodeController to update the health of a single node. # TYPE node_collector_update_node_health_duration_seconds histogram -node_collector_update_node_health_duration_seconds_bucket{le="0.001"} 16 -node_collector_update_node_health_duration_seconds_bucket{le="0.004"} 16 -node_collector_update_node_health_duration_seconds_bucket{le="0.016"} 16 -node_collector_update_node_health_duration_seconds_bucket{le="0.064"} 16 -node_collector_update_node_health_duration_seconds_bucket{le="0.256"} 16 -node_collector_update_node_health_duration_seconds_bucket{le="1.024"} 16 -node_collector_update_node_health_duration_seconds_bucket{le="4.096"} 16 -node_collector_update_node_health_duration_seconds_bucket{le="16.384"} 16 -node_collector_update_node_health_duration_seconds_bucket{le="+Inf"} 16 -node_collector_update_node_health_duration_seconds_sum 0.0030302140000000003 -node_collector_update_node_health_duration_seconds_count 16 +node_collector_update_node_health_duration_seconds_bucket{le="0.001"} 441 +node_collector_update_node_health_duration_seconds_bucket{le="0.004"} 441 +node_collector_update_node_health_duration_seconds_bucket{le="0.016"} 441 +node_collector_update_node_health_duration_seconds_bucket{le="0.064"} 441 +node_collector_update_node_health_duration_seconds_bucket{le="0.256"} 441 +node_collector_update_node_health_duration_seconds_bucket{le="1.024"} 441 +node_collector_update_node_health_duration_seconds_bucket{le="4.096"} 441 +node_collector_update_node_health_duration_seconds_bucket{le="16.384"} 441 +node_collector_update_node_health_duration_seconds_bucket{le="+Inf"} 441 +node_collector_update_node_health_duration_seconds_sum 0.05145579200000004 +node_collector_update_node_health_duration_seconds_count 441 # HELP node_collector_zone_health [ALPHA] Gauge measuring percentage of healthy nodes per zone. # TYPE node_collector_zone_health gauge node_collector_zone_health{zone=""} 100 @@ -805,7 +907,7 @@ node_ipam_controller_cidrset_usage_cidrs{clusterCIDR="10.244.0.0/16"} 0.00390625 node_ipam_controller_cirdset_max_cidrs{clusterCIDR="10.244.0.0/16"} 256 # HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. # TYPE process_cpu_seconds_total counter -process_cpu_seconds_total 4.75 +process_cpu_seconds_total 58.19 # HELP process_max_fds Maximum number of open file descriptors. # TYPE process_max_fds gauge process_max_fds 1.048576e+06 @@ -814,22 +916,22 @@ process_max_fds 1.048576e+06 process_open_fds 18 # HELP process_resident_memory_bytes Resident memory size in bytes. # TYPE process_resident_memory_bytes gauge -process_resident_memory_bytes 9.9876864e+07 +process_resident_memory_bytes 1.092608e+08 # HELP process_start_time_seconds Start time of the process since unix epoch in seconds. # TYPE process_start_time_seconds gauge -process_start_time_seconds 1.69875238451e+09 +process_start_time_seconds 1.70489476711e+09 # HELP process_virtual_memory_bytes Virtual memory size in bytes. # TYPE process_virtual_memory_bytes gauge -process_virtual_memory_bytes 8.07755776e+08 +process_virtual_memory_bytes 1.337397248e+09 # HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. # TYPE process_virtual_memory_max_bytes gauge process_virtual_memory_max_bytes 1.8446744073709552e+19 # HELP registered_metrics_total [BETA] The count of registered metrics broken by stability level and deprecation version. # TYPE registered_metrics_total counter -registered_metrics_total{deprecated_version="",stability_level="ALPHA"} 128 -registered_metrics_total{deprecated_version="",stability_level="BETA"} 6 -registered_metrics_total{deprecated_version="",stability_level="STABLE"} 9 -# HELP replicaset_controller_sorting_deletion_age_ratio [ALPHA] The ratio of chosen deleted pod's ages to the current youngest pod's age (at the time). Should be <2.The intent of this metric is to measure the rough efficacy of the LogarithmicScaleDown feature gate's effect onthe sorting (and deletion) of pods when a replicaset scales down. This only considers Ready pods when calculating and reporting. +registered_metrics_total{deprecated_version="",stability_level="ALPHA"} 138 +registered_metrics_total{deprecated_version="",stability_level="BETA"} 4 +registered_metrics_total{deprecated_version="",stability_level="STABLE"} 11 +# HELP replicaset_controller_sorting_deletion_age_ratio [ALPHA] The ratio of chosen deleted pod's ages to the current youngest pod's age (at the time). Should be <2. The intent of this metric is to measure the rough efficacy of the LogarithmicScaleDown feature gate's effect on the sorting (and deletion) of pods when a replicaset scales down. This only considers Ready pods when calculating and reporting. # TYPE replicaset_controller_sorting_deletion_age_ratio histogram replicaset_controller_sorting_deletion_age_ratio_bucket{le="0.25"} 0 replicaset_controller_sorting_deletion_age_ratio_bucket{le="0.5"} 0 @@ -861,260 +963,259 @@ rest_client_exec_plugin_certificate_rotation_age_count 0 rest_client_exec_plugin_ttl_seconds +Inf # HELP rest_client_rate_limiter_duration_seconds [ALPHA] Client side rate limiter latency in seconds. Broken down by verb, and host. # TYPE rest_client_rate_limiter_duration_seconds histogram -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.005"} 151 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.025"} 154 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.1"} 190 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.25"} 197 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.5"} 206 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="1"} 208 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="2"} 208 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="4"} 208 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="8"} 208 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="15"} 208 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="30"} 208 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="60"} 208 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="+Inf"} 208 -rest_client_rate_limiter_duration_seconds_sum{host="172.18.0.2:6443",verb="GET"} 7.200475773999999 -rest_client_rate_limiter_duration_seconds_count{host="172.18.0.2:6443",verb="GET"} 208 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.005"} 13 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.025"} 13 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.1"} 13 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.25"} 13 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.5"} 13 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="1"} 13 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="2"} 13 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="4"} 13 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="8"} 13 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="15"} 13 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="30"} 13 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="60"} 13 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="+Inf"} 13 -rest_client_rate_limiter_duration_seconds_sum{host="172.18.0.2:6443",verb="PATCH"} 4.9287e-05 -rest_client_rate_limiter_duration_seconds_count{host="172.18.0.2:6443",verb="PATCH"} 13 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.005"} 68 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.025"} 69 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.1"} 85 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.25"} 88 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.5"} 94 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="1"} 94 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="2"} 94 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="4"} 94 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="8"} 94 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="15"} 94 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="30"} 94 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="60"} 94 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="+Inf"} 94 -rest_client_rate_limiter_duration_seconds_sum{host="172.18.0.2:6443",verb="POST"} 3.542371532000001 -rest_client_rate_limiter_duration_seconds_count{host="172.18.0.2:6443",verb="POST"} 94 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.005"} 90 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.025"} 90 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.1"} 90 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.25"} 90 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.5"} 90 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="1"} 90 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="2"} 90 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="4"} 90 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="8"} 90 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="15"} 90 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="30"} 90 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="60"} 90 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="+Inf"} 90 -rest_client_rate_limiter_duration_seconds_sum{host="172.18.0.2:6443",verb="PUT"} 0.000247974 -rest_client_rate_limiter_duration_seconds_count{host="172.18.0.2:6443",verb="PUT"} 90 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.005"} 1487 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.025"} 1489 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.1"} 1528 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.25"} 1533 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.5"} 1544 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="1"} 1546 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="2"} 1546 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="4"} 1546 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="8"} 1546 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="15"} 1546 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="30"} 1546 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="60"} 1546 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="+Inf"} 1546 +rest_client_rate_limiter_duration_seconds_sum{host="172.18.0.2:6443",verb="GET"} 7.7935272619999925 +rest_client_rate_limiter_duration_seconds_count{host="172.18.0.2:6443",verb="GET"} 1546 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.005"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.025"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.1"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.25"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.5"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="1"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="2"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="4"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="8"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="15"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="30"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="60"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="+Inf"} 10 +rest_client_rate_limiter_duration_seconds_sum{host="172.18.0.2:6443",verb="PATCH"} 5.7598e-05 +rest_client_rate_limiter_duration_seconds_count{host="172.18.0.2:6443",verb="PATCH"} 10 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.005"} 57 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.025"} 57 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.1"} 74 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.25"} 75 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.5"} 84 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="1"} 84 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="2"} 84 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="4"} 84 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="8"} 84 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="15"} 84 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="30"} 84 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="60"} 84 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="+Inf"} 84 +rest_client_rate_limiter_duration_seconds_sum{host="172.18.0.2:6443",verb="POST"} 4.6114257960000025 +rest_client_rate_limiter_duration_seconds_count{host="172.18.0.2:6443",verb="POST"} 84 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.005"} 1128 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.025"} 1128 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.1"} 1128 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.25"} 1128 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.5"} 1128 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="1"} 1128 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="2"} 1128 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="4"} 1128 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="8"} 1128 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="15"} 1128 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="30"} 1128 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="60"} 1128 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="+Inf"} 1128 +rest_client_rate_limiter_duration_seconds_sum{host="172.18.0.2:6443",verb="PUT"} 0.0029605479999999973 +rest_client_rate_limiter_duration_seconds_count{host="172.18.0.2:6443",verb="PUT"} 1128 # HELP rest_client_request_duration_seconds [ALPHA] Request latency in seconds. Broken down by verb, and host. # TYPE rest_client_request_duration_seconds histogram -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.005"} 108 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.025"} 151 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.1"} 188 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.25"} 196 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.5"} 205 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="1"} 207 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="2"} 207 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="4"} 208 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="8"} 208 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="15"} 208 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="30"} 208 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="60"} 208 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="+Inf"} 208 -rest_client_request_duration_seconds_sum{host="172.18.0.2:6443",verb="GET"} 10.876092844000002 -rest_client_request_duration_seconds_count{host="172.18.0.2:6443",verb="GET"} 208 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.005"} 1414 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.025"} 1482 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.1"} 1523 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.25"} 1531 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.5"} 1543 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="1"} 1545 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="2"} 1545 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="4"} 1546 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="8"} 1546 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="15"} 1546 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="30"} 1546 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="60"} 1546 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="+Inf"} 1546 +rest_client_request_duration_seconds_sum{host="172.18.0.2:6443",verb="GET"} 16.967002752999996 +rest_client_request_duration_seconds_count{host="172.18.0.2:6443",verb="GET"} 1546 rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.005"} 0 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.025"} 9 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.1"} 9 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.25"} 9 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.5"} 10 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="1"} 13 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="2"} 13 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="4"} 13 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="8"} 13 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="15"} 13 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="30"} 13 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="60"} 13 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="+Inf"} 13 -rest_client_request_duration_seconds_sum{host="172.18.0.2:6443",verb="PATCH"} 1.9169451439999996 -rest_client_request_duration_seconds_count{host="172.18.0.2:6443",verb="PATCH"} 13 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.005"} 14 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.025"} 57 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.1"} 75 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.25"} 78 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.5"} 87 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="1"} 94 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="2"} 94 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="4"} 94 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="8"} 94 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="15"} 94 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="30"} 94 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="60"} 94 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="+Inf"} 94 -rest_client_request_duration_seconds_sum{host="172.18.0.2:6443",verb="POST"} 9.382712604000002 -rest_client_request_duration_seconds_count{host="172.18.0.2:6443",verb="POST"} 94 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.005"} 11 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.025"} 86 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.1"} 89 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.25"} 89 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.5"} 89 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="1"} 90 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="2"} 90 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="4"} 90 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="8"} 90 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="15"} 90 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="30"} 90 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="60"} 90 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="+Inf"} 90 -rest_client_request_duration_seconds_sum{host="172.18.0.2:6443",verb="PUT"} 1.593709599 -rest_client_request_duration_seconds_count{host="172.18.0.2:6443",verb="PUT"} 90 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.025"} 4 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.1"} 6 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.25"} 6 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.5"} 6 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="1"} 10 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="2"} 10 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="4"} 10 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="8"} 10 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="15"} 10 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="30"} 10 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="60"} 10 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="+Inf"} 10 +rest_client_request_duration_seconds_sum{host="172.18.0.2:6443",verb="PATCH"} 3.209190288 +rest_client_request_duration_seconds_count{host="172.18.0.2:6443",verb="PATCH"} 10 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.005"} 10 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.025"} 46 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.1"} 64 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.25"} 66 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.5"} 77 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="1"} 84 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="2"} 84 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="4"} 84 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="8"} 84 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="15"} 84 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="30"} 84 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="60"} 84 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="+Inf"} 84 +rest_client_request_duration_seconds_sum{host="172.18.0.2:6443",verb="POST"} 11.851414454 +rest_client_request_duration_seconds_count{host="172.18.0.2:6443",verb="POST"} 84 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.005"} 1 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.025"} 1114 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.1"} 1125 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.25"} 1127 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.5"} 1127 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="1"} 1128 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="2"} 1128 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="4"} 1128 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="8"} 1128 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="15"} 1128 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="30"} 1128 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="60"} 1128 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="+Inf"} 1128 +rest_client_request_duration_seconds_sum{host="172.18.0.2:6443",verb="PUT"} 15.549394524000004 +rest_client_request_duration_seconds_count{host="172.18.0.2:6443",verb="PUT"} 1128 # HELP rest_client_request_size_bytes [ALPHA] Request size in bytes. Broken down by verb and host. # TYPE rest_client_request_size_bytes histogram -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="64"} 208 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="256"} 208 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="512"} 208 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1024"} 208 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="4096"} 208 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="16384"} 208 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="65536"} 208 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="262144"} 208 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1.048576e+06"} 208 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="4.194304e+06"} 208 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1.6777216e+07"} 208 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="+Inf"} 208 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="64"} 1546 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="256"} 1546 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="512"} 1546 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1024"} 1546 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="4096"} 1546 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="16384"} 1546 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="65536"} 1546 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="262144"} 1546 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1.048576e+06"} 1546 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="4.194304e+06"} 1546 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1.6777216e+07"} 1546 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="+Inf"} 1546 rest_client_request_size_bytes_sum{host="172.18.0.2:6443",verb="GET"} 0 -rest_client_request_size_bytes_count{host="172.18.0.2:6443",verb="GET"} 208 +rest_client_request_size_bytes_count{host="172.18.0.2:6443",verb="GET"} 1546 rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="64"} 1 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="256"} 6 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="512"} 7 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1024"} 7 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="4096"} 13 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="16384"} 13 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="65536"} 13 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="262144"} 13 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1.048576e+06"} 13 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="4.194304e+06"} 13 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1.6777216e+07"} 13 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="+Inf"} 13 -rest_client_request_size_bytes_sum{host="172.18.0.2:6443",verb="PATCH"} 17446 -rest_client_request_size_bytes_count{host="172.18.0.2:6443",verb="PATCH"} 13 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="256"} 3 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="512"} 4 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1024"} 4 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="4096"} 10 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="16384"} 10 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="65536"} 10 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="262144"} 10 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1.048576e+06"} 10 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="4.194304e+06"} 10 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1.6777216e+07"} 10 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="+Inf"} 10 +rest_client_request_size_bytes_sum{host="172.18.0.2:6443",verb="PATCH"} 18531 +rest_client_request_size_bytes_count{host="172.18.0.2:6443",verb="PATCH"} 10 rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="64"} 1 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="256"} 53 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="512"} 70 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1024"} 74 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="4096"} 93 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="16384"} 94 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="65536"} 94 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="262144"} 94 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1.048576e+06"} 94 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="4.194304e+06"} 94 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1.6777216e+07"} 94 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="+Inf"} 94 -rest_client_request_size_bytes_sum{host="172.18.0.2:6443",verb="POST"} 53563 -rest_client_request_size_bytes_count{host="172.18.0.2:6443",verb="POST"} 94 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="256"} 55 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="512"} 66 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1024"} 70 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="4096"} 84 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="16384"} 84 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="65536"} 84 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="262144"} 84 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1.048576e+06"} 84 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="4.194304e+06"} 84 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1.6777216e+07"} 84 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="+Inf"} 84 +rest_client_request_size_bytes_sum{host="172.18.0.2:6443",verb="POST"} 34927 +rest_client_request_size_bytes_count{host="172.18.0.2:6443",verb="POST"} 84 rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="64"} 0 rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="256"} 0 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="512"} 46 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1024"} 51 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="4096"} 80 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="16384"} 90 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="65536"} 90 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="262144"} 90 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1.048576e+06"} 90 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="4.194304e+06"} 90 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1.6777216e+07"} 90 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="+Inf"} 90 -rest_client_request_size_bytes_sum{host="172.18.0.2:6443",verb="PUT"} 175260 -rest_client_request_size_bytes_count{host="172.18.0.2:6443",verb="PUT"} 90 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="512"} 1099 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1024"} 1104 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="4096"} 1128 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="16384"} 1128 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="65536"} 1128 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="262144"} 1128 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1.048576e+06"} 1128 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="4.194304e+06"} 1128 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1.6777216e+07"} 1128 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="+Inf"} 1128 +rest_client_request_size_bytes_sum{host="172.18.0.2:6443",verb="PUT"} 534678 +rest_client_request_size_bytes_count{host="172.18.0.2:6443",verb="PUT"} 1128 # HELP rest_client_requests_total [ALPHA] Number of HTTP requests, partitioned by status code, method, and host. # TYPE rest_client_requests_total counter -rest_client_requests_total{code="200",host="172.18.0.2:6443",method="GET"} 220 -rest_client_requests_total{code="200",host="172.18.0.2:6443",method="PATCH"} 13 -rest_client_requests_total{code="200",host="172.18.0.2:6443",method="PUT"} 85 -rest_client_requests_total{code="201",host="172.18.0.2:6443",method="POST"} 90 +rest_client_requests_total{code="200",host="172.18.0.2:6443",method="GET"} 1756 +rest_client_requests_total{code="200",host="172.18.0.2:6443",method="PATCH"} 10 +rest_client_requests_total{code="200",host="172.18.0.2:6443",method="PUT"} 1125 +rest_client_requests_total{code="201",host="172.18.0.2:6443",method="POST"} 84 rest_client_requests_total{code="403",host="172.18.0.2:6443",method="GET"} 1 -rest_client_requests_total{code="403",host="172.18.0.2:6443",method="POST"} 4 -rest_client_requests_total{code="404",host="172.18.0.2:6443",method="GET"} 33 -rest_client_requests_total{code="409",host="172.18.0.2:6443",method="PUT"} 5 +rest_client_requests_total{code="404",host="172.18.0.2:6443",method="GET"} 34 +rest_client_requests_total{code="409",host="172.18.0.2:6443",method="PUT"} 3 # HELP rest_client_response_size_bytes [ALPHA] Response size in bytes. Broken down by verb and host. # TYPE rest_client_response_size_bytes histogram rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="64"} 21 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="256"} 85 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="512"} 166 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1024"} 169 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="4096"} 177 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="16384"} 205 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="65536"} 208 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="262144"} 208 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1.048576e+06"} 208 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="4.194304e+06"} 208 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1.6777216e+07"} 208 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="+Inf"} 208 -rest_client_response_size_bytes_sum{host="172.18.0.2:6443",verb="GET"} 452159 -rest_client_response_size_bytes_count{host="172.18.0.2:6443",verb="GET"} 208 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="256"} 88 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="512"} 1223 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1024"} 1226 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="4096"} 1233 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="16384"} 1543 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="65536"} 1546 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="262144"} 1546 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1.048576e+06"} 1546 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="4.194304e+06"} 1546 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1.6777216e+07"} 1546 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="+Inf"} 1546 +rest_client_response_size_bytes_sum{host="172.18.0.2:6443",verb="GET"} 4.16515e+06 +rest_client_response_size_bytes_count{host="172.18.0.2:6443",verb="GET"} 1546 rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="64"} 0 rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="256"} 0 rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="512"} 0 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1024"} 4 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="4096"} 13 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="16384"} 13 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="65536"} 13 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="262144"} 13 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1.048576e+06"} 13 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="4.194304e+06"} 13 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1.6777216e+07"} 13 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="+Inf"} 13 -rest_client_response_size_bytes_sum{host="172.18.0.2:6443",verb="PATCH"} 30011 -rest_client_response_size_bytes_count{host="172.18.0.2:6443",verb="PATCH"} 13 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1024"} 1 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="4096"} 10 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="16384"} 10 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="65536"} 10 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="262144"} 10 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1.048576e+06"} 10 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="4.194304e+06"} 10 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1.6777216e+07"} 10 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="+Inf"} 10 +rest_client_response_size_bytes_sum{host="172.18.0.2:6443",verb="PATCH"} 28839 +rest_client_response_size_bytes_count{host="172.18.0.2:6443",verb="PATCH"} 10 rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="64"} 0 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="256"} 41 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="512"} 42 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1024"} 60 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="4096"} 90 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="16384"} 94 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="65536"} 94 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="262144"} 94 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1.048576e+06"} 94 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="4.194304e+06"} 94 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1.6777216e+07"} 94 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="+Inf"} 94 -rest_client_response_size_bytes_sum{host="172.18.0.2:6443",verb="POST"} 94345 -rest_client_response_size_bytes_count{host="172.18.0.2:6443",verb="POST"} 94 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="256"} 38 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="512"} 39 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1024"} 51 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="4096"} 82 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="16384"} 84 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="65536"} 84 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="262144"} 84 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1.048576e+06"} 84 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="4.194304e+06"} 84 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1.6777216e+07"} 84 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="+Inf"} 84 +rest_client_response_size_bytes_sum{host="172.18.0.2:6443",verb="POST"} 78889 +rest_client_response_size_bytes_count{host="172.18.0.2:6443",verb="POST"} 84 rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="64"} 0 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="256"} 2 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="512"} 51 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1024"} 55 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="4096"} 78 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="16384"} 90 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="65536"} 90 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="262144"} 90 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1.048576e+06"} 90 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="4.194304e+06"} 90 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1.6777216e+07"} 90 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="+Inf"} 90 -rest_client_response_size_bytes_sum{host="172.18.0.2:6443",verb="PUT"} 164099 -rest_client_response_size_bytes_count{host="172.18.0.2:6443",verb="PUT"} 90 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="256"} 1 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="512"} 1101 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1024"} 1103 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="4096"} 1123 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="16384"} 1128 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="65536"} 1128 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="262144"} 1128 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1.048576e+06"} 1128 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="4.194304e+06"} 1128 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1.6777216e+07"} 1128 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="+Inf"} 1128 +rest_client_response_size_bytes_sum{host="172.18.0.2:6443",verb="PUT"} 578751 +rest_client_response_size_bytes_count{host="172.18.0.2:6443",verb="PUT"} 1128 # HELP rest_client_transport_cache_entries [ALPHA] Number of transport entries in the internal cache. # TYPE rest_client_transport_cache_entries gauge rest_client_transport_cache_entries 3 # HELP rest_client_transport_create_calls_total [ALPHA] Number of calls to get a new transport, partitioned by the result of the operation hit: obtained from the cache, miss: created and added to the cache, uncacheable: created and not cached # TYPE rest_client_transport_create_calls_total counter -rest_client_transport_create_calls_total{result="hit"} 51 +rest_client_transport_create_calls_total{result="hit"} 52 rest_client_transport_create_calls_total{result="miss"} 3 # HELP retroactive_storageclass_errors_total [ALPHA] Total number of failed retroactive StorageClass assignments to persistent volume claim # TYPE retroactive_storageclass_errors_total counter @@ -1127,8 +1228,8 @@ retroactive_storageclass_total 0 root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="0.001"} 0 root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="0.002"} 0 root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="0.004"} 0 -root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="0.008"} 1 -root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="0.016"} 3 +root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="0.008"} 0 +root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="0.016"} 4 root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="0.032"} 4 root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="0.064"} 4 root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="0.128"} 4 @@ -1140,7 +1241,7 @@ root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="4.096"} 5 root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="8.192"} 5 root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="16.384"} 5 root_ca_cert_publisher_sync_duration_seconds_bucket{code="200",le="+Inf"} 5 -root_ca_cert_publisher_sync_duration_seconds_sum{code="200"} 0.7634069600000001 +root_ca_cert_publisher_sync_duration_seconds_sum{code="200"} 0.7155615430000001 root_ca_cert_publisher_sync_duration_seconds_count{code="200"} 5 # HELP root_ca_cert_publisher_sync_total [ALPHA] Number of namespace syncs happened in root ca cert publisher. # TYPE root_ca_cert_publisher_sync_total counter @@ -1194,6 +1295,26 @@ service_controller_update_loadbalancer_host_latency_seconds_bucket{le="16384"} 0 service_controller_update_loadbalancer_host_latency_seconds_bucket{le="+Inf"} 0 service_controller_update_loadbalancer_host_latency_seconds_sum 0 service_controller_update_loadbalancer_host_latency_seconds_count 0 +# HELP taint_eviction_controller_pod_deletion_duration_seconds [ALPHA] Latency, in seconds, between the time when a taint effect has been activated for the Pod and its deletion via TaintEvictionController. +# TYPE taint_eviction_controller_pod_deletion_duration_seconds histogram +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="0.005"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="0.025"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="0.1"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="0.5"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="1"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="2.5"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="10"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="30"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="60"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="120"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="180"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="240"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_bucket{le="+Inf"} 0 +taint_eviction_controller_pod_deletion_duration_seconds_sum 0 +taint_eviction_controller_pod_deletion_duration_seconds_count 0 +# HELP taint_eviction_controller_pod_deletions_total [ALPHA] Total number of Pods deleted by TaintEvictionController since its start. +# TYPE taint_eviction_controller_pod_deletions_total counter +taint_eviction_controller_pod_deletions_total 0 # HELP ttl_after_finished_controller_job_deletion_duration_seconds [ALPHA] The time it took to delete the job since it became eligible for deletion # TYPE ttl_after_finished_controller_job_deletion_duration_seconds histogram ttl_after_finished_controller_job_deletion_duration_seconds_bucket{le="0.1"} 0 @@ -1215,40 +1336,40 @@ ttl_after_finished_controller_job_deletion_duration_seconds_sum 0 ttl_after_finished_controller_job_deletion_duration_seconds_count 0 # HELP workqueue_adds_total [ALPHA] Total number of adds handled by workqueue # TYPE workqueue_adds_total counter -workqueue_adds_total{name="ClusterRoleAggregator"} 22 -workqueue_adds_total{name="DynamicCABundle-client-ca-bundle"} 1 -workqueue_adds_total{name="DynamicCABundle-csr-controller"} 7 -workqueue_adds_total{name="DynamicCABundle-request-header"} 1 -workqueue_adds_total{name="DynamicServingCertificateController"} 2 +workqueue_adds_total{name="ClusterRoleAggregator"} 19 +workqueue_adds_total{name="DynamicCABundle-client-ca-bundle"} 2 +workqueue_adds_total{name="DynamicCABundle-csr-controller"} 8 +workqueue_adds_total{name="DynamicCABundle-request-header"} 2 +workqueue_adds_total{name="DynamicServingCertificateController"} 38 workqueue_adds_total{name="bootstrap_signer_queue"} 2 workqueue_adds_total{name="certificate"} 0 workqueue_adds_total{name="claims"} 0 workqueue_adds_total{name="cronjob"} 0 -workqueue_adds_total{name="daemonset"} 25 -workqueue_adds_total{name="deployment"} 21 +workqueue_adds_total{name="daemonset"} 17 +workqueue_adds_total{name="deployment"} 20 workqueue_adds_total{name="disruption"} 0 workqueue_adds_total{name="disruption_recheck"} 0 -workqueue_adds_total{name="endpoint"} 8 -workqueue_adds_total{name="endpoint_slice"} 10 -workqueue_adds_total{name="endpoint_slice_mirroring"} 6 +workqueue_adds_total{name="endpoint"} 6 +workqueue_adds_total{name="endpoint_slice"} 7 +workqueue_adds_total{name="endpoint_slice_mirroring"} 4 workqueue_adds_total{name="ephemeral_volume"} 0 -workqueue_adds_total{name="garbage_collector_attempt_to_delete"} 0 +workqueue_adds_total{name="garbage_collector_attempt_to_delete"} 1 workqueue_adds_total{name="garbage_collector_attempt_to_orphan"} 0 -workqueue_adds_total{name="garbage_collector_graph_changes"} 473 +workqueue_adds_total{name="garbage_collector_graph_changes"} 2979 workqueue_adds_total{name="horizontalpodautoscaler"} 0 workqueue_adds_total{name="job"} 0 workqueue_adds_total{name="job_orphan_pod"} 0 workqueue_adds_total{name="namespace"} 0 workqueue_adds_total{name="node"} 1 -workqueue_adds_total{name="node_lifecycle_controller"} 4 -workqueue_adds_total{name="node_lifecycle_controller_pods"} 11 +workqueue_adds_total{name="node_lifecycle_controller"} 12 +workqueue_adds_total{name="node_lifecycle_controller_pods"} 10 workqueue_adds_total{name="noexec_taint_node"} 1 -workqueue_adds_total{name="noexec_taint_pod"} 18 +workqueue_adds_total{name="noexec_taint_pod"} 16 workqueue_adds_total{name="orphaned_pods_nodes"} 0 workqueue_adds_total{name="pvcprotection"} 0 workqueue_adds_total{name="pvcs"} 0 workqueue_adds_total{name="pvprotection"} 0 -workqueue_adds_total{name="replicaset"} 23 +workqueue_adds_total{name="replicaset"} 18 workqueue_adds_total{name="replicationmanager"} 0 workqueue_adds_total{name="resource_quota_controller_resource_changes"} 0 workqueue_adds_total{name="resourcequota_primary"} 0 @@ -1262,7 +1383,7 @@ workqueue_adds_total{name="stale_pod_disruption"} 0 workqueue_adds_total{name="statefulset"} 0 workqueue_adds_total{name="token_cleaner"} 1 workqueue_adds_total{name="ttl_jobs_to_delete"} 0 -workqueue_adds_total{name="ttlcontroller"} 4 +workqueue_adds_total{name="ttlcontroller"} 12 workqueue_adds_total{name="volume_expand"} 0 workqueue_adds_total{name="volumes"} 0 # HELP workqueue_depth [ALPHA] Current depth of workqueue @@ -1374,68 +1495,68 @@ workqueue_longest_running_processor_seconds{name="volumes"} 0 workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="1e-06"} 0 -workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="9.999999999999999e-06"} 3 -workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="9.999999999999999e-05"} 10 -workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="0.001"} 14 -workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="0.01"} 18 -workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="0.1"} 22 -workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="1"} 22 -workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="10"} 22 -workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="+Inf"} 22 -workqueue_queue_duration_seconds_sum{name="ClusterRoleAggregator"} 0.296991747 -workqueue_queue_duration_seconds_count{name="ClusterRoleAggregator"} 22 +workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="9.999999999999999e-06"} 1 +workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="9.999999999999999e-05"} 7 +workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="0.001"} 11 +workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="0.01"} 12 +workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="0.1"} 19 +workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="1"} 19 +workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="10"} 19 +workqueue_queue_duration_seconds_bucket{name="ClusterRoleAggregator",le="+Inf"} 19 +workqueue_queue_duration_seconds_sum{name="ClusterRoleAggregator"} 0.29662182300000006 +workqueue_queue_duration_seconds_count{name="ClusterRoleAggregator"} 19 workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="1e-06"} 0 workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="9.999999999999999e-06"} 0 -workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="9.999999999999999e-05"} 1 -workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="0.001"} 1 -workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="0.01"} 1 -workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="0.1"} 1 -workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="1"} 1 -workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="10"} 1 -workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="+Inf"} 1 -workqueue_queue_duration_seconds_sum{name="DynamicCABundle-client-ca-bundle"} 9.5473e-05 -workqueue_queue_duration_seconds_count{name="DynamicCABundle-client-ca-bundle"} 1 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="9.999999999999999e-05"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="0.001"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="0.01"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="0.1"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="1"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="10"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="+Inf"} 2 +workqueue_queue_duration_seconds_sum{name="DynamicCABundle-client-ca-bundle"} 0.00010036499999999999 +workqueue_queue_duration_seconds_count{name="DynamicCABundle-client-ca-bundle"} 2 workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="1e-06"} 0 workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="9.999999999999999e-06"} 0 workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="9.999999999999999e-05"} 4 -workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="0.001"} 7 -workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="0.01"} 7 -workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="0.1"} 7 -workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="1"} 7 -workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="10"} 7 -workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="+Inf"} 7 -workqueue_queue_duration_seconds_sum{name="DynamicCABundle-csr-controller"} 0.0026557580000000007 -workqueue_queue_duration_seconds_count{name="DynamicCABundle-csr-controller"} 7 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="0.001"} 5 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="0.01"} 8 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="0.1"} 8 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="1"} 8 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="10"} 8 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="+Inf"} 8 +workqueue_queue_duration_seconds_sum{name="DynamicCABundle-csr-controller"} 0.005034077 +workqueue_queue_duration_seconds_count{name="DynamicCABundle-csr-controller"} 8 workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="1e-06"} 0 workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="9.999999999999999e-06"} 0 -workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="9.999999999999999e-05"} 0 -workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="0.001"} 1 -workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="0.01"} 1 -workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="0.1"} 1 -workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="1"} 1 -workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="10"} 1 -workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="+Inf"} 1 -workqueue_queue_duration_seconds_sum{name="DynamicCABundle-request-header"} 0.000169111 -workqueue_queue_duration_seconds_count{name="DynamicCABundle-request-header"} 1 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="9.999999999999999e-05"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="0.001"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="0.01"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="0.1"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="1"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="10"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicCABundle-request-header",le="+Inf"} 2 +workqueue_queue_duration_seconds_sum{name="DynamicCABundle-request-header"} 0.00012161999999999999 +workqueue_queue_duration_seconds_count{name="DynamicCABundle-request-header"} 2 workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="1e-06"} 0 workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="9.999999999999999e-06"} 0 -workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="9.999999999999999e-05"} 2 -workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.001"} 2 -workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.01"} 2 -workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.1"} 2 -workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="1"} 2 -workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="10"} 2 -workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="+Inf"} 2 -workqueue_queue_duration_seconds_sum{name="DynamicServingCertificateController"} 3.6030000000000006e-05 -workqueue_queue_duration_seconds_count{name="DynamicServingCertificateController"} 2 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="9.999999999999999e-05"} 37 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.001"} 38 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.01"} 38 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.1"} 38 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="1"} 38 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="10"} 38 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="+Inf"} 38 +workqueue_queue_duration_seconds_sum{name="DynamicServingCertificateController"} 0.0008584600000000002 +workqueue_queue_duration_seconds_count{name="DynamicServingCertificateController"} 38 workqueue_queue_duration_seconds_bucket{name="bootstrap_signer_queue",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="bootstrap_signer_queue",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="bootstrap_signer_queue",le="1e-06"} 0 @@ -1444,10 +1565,10 @@ workqueue_queue_duration_seconds_bucket{name="bootstrap_signer_queue",le="9.9999 workqueue_queue_duration_seconds_bucket{name="bootstrap_signer_queue",le="0.001"} 1 workqueue_queue_duration_seconds_bucket{name="bootstrap_signer_queue",le="0.01"} 1 workqueue_queue_duration_seconds_bucket{name="bootstrap_signer_queue",le="0.1"} 1 -workqueue_queue_duration_seconds_bucket{name="bootstrap_signer_queue",le="1"} 2 -workqueue_queue_duration_seconds_bucket{name="bootstrap_signer_queue",le="10"} 2 +workqueue_queue_duration_seconds_bucket{name="bootstrap_signer_queue",le="1"} 1 +workqueue_queue_duration_seconds_bucket{name="bootstrap_signer_queue",le="10"} 1 workqueue_queue_duration_seconds_bucket{name="bootstrap_signer_queue",le="+Inf"} 2 -workqueue_queue_duration_seconds_sum{name="bootstrap_signer_queue"} 0.9013261469999999 +workqueue_queue_duration_seconds_sum{name="bootstrap_signer_queue"} 11.600700546 workqueue_queue_duration_seconds_count{name="bootstrap_signer_queue"} 2 workqueue_queue_duration_seconds_bucket{name="certificate",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="certificate",le="1e-07"} 0 @@ -1491,29 +1612,29 @@ workqueue_queue_duration_seconds_count{name="cronjob"} 0 workqueue_queue_duration_seconds_bucket{name="daemonset",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="daemonset",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="daemonset",le="1e-06"} 0 -workqueue_queue_duration_seconds_bucket{name="daemonset",le="9.999999999999999e-06"} 11 -workqueue_queue_duration_seconds_bucket{name="daemonset",le="9.999999999999999e-05"} 13 -workqueue_queue_duration_seconds_bucket{name="daemonset",le="0.001"} 17 -workqueue_queue_duration_seconds_bucket{name="daemonset",le="0.01"} 19 -workqueue_queue_duration_seconds_bucket{name="daemonset",le="0.1"} 23 -workqueue_queue_duration_seconds_bucket{name="daemonset",le="1"} 25 -workqueue_queue_duration_seconds_bucket{name="daemonset",le="10"} 25 -workqueue_queue_duration_seconds_bucket{name="daemonset",le="+Inf"} 25 -workqueue_queue_duration_seconds_sum{name="daemonset"} 0.3524839229999999 -workqueue_queue_duration_seconds_count{name="daemonset"} 25 +workqueue_queue_duration_seconds_bucket{name="daemonset",le="9.999999999999999e-06"} 8 +workqueue_queue_duration_seconds_bucket{name="daemonset",le="9.999999999999999e-05"} 8 +workqueue_queue_duration_seconds_bucket{name="daemonset",le="0.001"} 12 +workqueue_queue_duration_seconds_bucket{name="daemonset",le="0.01"} 12 +workqueue_queue_duration_seconds_bucket{name="daemonset",le="0.1"} 17 +workqueue_queue_duration_seconds_bucket{name="daemonset",le="1"} 17 +workqueue_queue_duration_seconds_bucket{name="daemonset",le="10"} 17 +workqueue_queue_duration_seconds_bucket{name="daemonset",le="+Inf"} 17 +workqueue_queue_duration_seconds_sum{name="daemonset"} 0.22901414399999995 +workqueue_queue_duration_seconds_count{name="daemonset"} 17 workqueue_queue_duration_seconds_bucket{name="deployment",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="deployment",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="deployment",le="1e-06"} 0 -workqueue_queue_duration_seconds_bucket{name="deployment",le="9.999999999999999e-06"} 7 -workqueue_queue_duration_seconds_bucket{name="deployment",le="9.999999999999999e-05"} 9 -workqueue_queue_duration_seconds_bucket{name="deployment",le="0.001"} 13 -workqueue_queue_duration_seconds_bucket{name="deployment",le="0.01"} 15 -workqueue_queue_duration_seconds_bucket{name="deployment",le="0.1"} 21 -workqueue_queue_duration_seconds_bucket{name="deployment",le="1"} 21 -workqueue_queue_duration_seconds_bucket{name="deployment",le="10"} 21 -workqueue_queue_duration_seconds_bucket{name="deployment",le="+Inf"} 21 -workqueue_queue_duration_seconds_sum{name="deployment"} 0.13473920699999997 -workqueue_queue_duration_seconds_count{name="deployment"} 21 +workqueue_queue_duration_seconds_bucket{name="deployment",le="9.999999999999999e-06"} 5 +workqueue_queue_duration_seconds_bucket{name="deployment",le="9.999999999999999e-05"} 12 +workqueue_queue_duration_seconds_bucket{name="deployment",le="0.001"} 14 +workqueue_queue_duration_seconds_bucket{name="deployment",le="0.01"} 14 +workqueue_queue_duration_seconds_bucket{name="deployment",le="0.1"} 20 +workqueue_queue_duration_seconds_bucket{name="deployment",le="1"} 20 +workqueue_queue_duration_seconds_bucket{name="deployment",le="10"} 20 +workqueue_queue_duration_seconds_bucket{name="deployment",le="+Inf"} 20 +workqueue_queue_duration_seconds_sum{name="deployment"} 0.160956561 +workqueue_queue_duration_seconds_count{name="deployment"} 20 workqueue_queue_duration_seconds_bucket{name="disruption",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="disruption",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="disruption",le="1e-06"} 0 @@ -1543,42 +1664,42 @@ workqueue_queue_duration_seconds_count{name="disruption_recheck"} 0 workqueue_queue_duration_seconds_bucket{name="endpoint",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="endpoint",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="endpoint",le="1e-06"} 0 -workqueue_queue_duration_seconds_bucket{name="endpoint",le="9.999999999999999e-06"} 5 -workqueue_queue_duration_seconds_bucket{name="endpoint",le="9.999999999999999e-05"} 5 -workqueue_queue_duration_seconds_bucket{name="endpoint",le="0.001"} 5 -workqueue_queue_duration_seconds_bucket{name="endpoint",le="0.01"} 5 -workqueue_queue_duration_seconds_bucket{name="endpoint",le="0.1"} 7 -workqueue_queue_duration_seconds_bucket{name="endpoint",le="1"} 8 -workqueue_queue_duration_seconds_bucket{name="endpoint",le="10"} 8 -workqueue_queue_duration_seconds_bucket{name="endpoint",le="+Inf"} 8 -workqueue_queue_duration_seconds_sum{name="endpoint"} 0.329293839 -workqueue_queue_duration_seconds_count{name="endpoint"} 8 +workqueue_queue_duration_seconds_bucket{name="endpoint",le="9.999999999999999e-06"} 4 +workqueue_queue_duration_seconds_bucket{name="endpoint",le="9.999999999999999e-05"} 4 +workqueue_queue_duration_seconds_bucket{name="endpoint",le="0.001"} 4 +workqueue_queue_duration_seconds_bucket{name="endpoint",le="0.01"} 4 +workqueue_queue_duration_seconds_bucket{name="endpoint",le="0.1"} 6 +workqueue_queue_duration_seconds_bucket{name="endpoint",le="1"} 6 +workqueue_queue_duration_seconds_bucket{name="endpoint",le="10"} 6 +workqueue_queue_duration_seconds_bucket{name="endpoint",le="+Inf"} 6 +workqueue_queue_duration_seconds_sum{name="endpoint"} 0.16378205300000004 +workqueue_queue_duration_seconds_count{name="endpoint"} 6 workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="1e-06"} 0 workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="9.999999999999999e-06"} 4 -workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="9.999999999999999e-05"} 7 -workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="0.001"} 7 -workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="0.01"} 7 -workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="0.1"} 9 -workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="1"} 10 -workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="10"} 10 -workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="+Inf"} 10 -workqueue_queue_duration_seconds_sum{name="endpoint_slice"} 0.23840660900000002 -workqueue_queue_duration_seconds_count{name="endpoint_slice"} 10 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="9.999999999999999e-05"} 5 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="0.001"} 5 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="0.01"} 5 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="0.1"} 5 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="1"} 7 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="10"} 7 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice",le="+Inf"} 7 +workqueue_queue_duration_seconds_sum{name="endpoint_slice"} 0.359466593 +workqueue_queue_duration_seconds_count{name="endpoint_slice"} 7 workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="1e-06"} 0 -workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="9.999999999999999e-06"} 3 -workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="9.999999999999999e-05"} 5 -workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="0.001"} 5 -workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="0.01"} 5 -workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="0.1"} 6 -workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="1"} 6 -workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="10"} 6 -workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="+Inf"} 6 -workqueue_queue_duration_seconds_sum{name="endpoint_slice_mirroring"} 0.08032317800000001 -workqueue_queue_duration_seconds_count{name="endpoint_slice_mirroring"} 6 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="9.999999999999999e-06"} 2 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="9.999999999999999e-05"} 3 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="0.001"} 3 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="0.01"} 3 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="0.1"} 3 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="1"} 4 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="10"} 4 +workqueue_queue_duration_seconds_bucket{name="endpoint_slice_mirroring",le="+Inf"} 4 +workqueue_queue_duration_seconds_sum{name="endpoint_slice_mirroring"} 0.100958319 +workqueue_queue_duration_seconds_count{name="endpoint_slice_mirroring"} 4 workqueue_queue_duration_seconds_bucket{name="ephemeral_volume",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="ephemeral_volume",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="ephemeral_volume",le="1e-06"} 0 @@ -1600,11 +1721,11 @@ workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_delet workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="0.001"} 0 workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="0.01"} 0 workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="0.1"} 0 -workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="1"} 0 -workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="10"} 0 -workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="+Inf"} 0 -workqueue_queue_duration_seconds_sum{name="garbage_collector_attempt_to_delete"} 0 -workqueue_queue_duration_seconds_count{name="garbage_collector_attempt_to_delete"} 0 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="1"} 1 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="10"} 1 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="+Inf"} 1 +workqueue_queue_duration_seconds_sum{name="garbage_collector_attempt_to_delete"} 0.636190164 +workqueue_queue_duration_seconds_count{name="garbage_collector_attempt_to_delete"} 1 workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="1e-06"} 0 @@ -1621,16 +1742,16 @@ workqueue_queue_duration_seconds_count{name="garbage_collector_attempt_to_orphan workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="1e-06"} 0 -workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="9.999999999999999e-06"} 236 -workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="9.999999999999999e-05"} 361 -workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="0.001"} 473 -workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="0.01"} 473 -workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="0.1"} 473 -workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="1"} 473 -workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="10"} 473 -workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="+Inf"} 473 -workqueue_queue_duration_seconds_sum{name="garbage_collector_graph_changes"} 0.03026248499999999 -workqueue_queue_duration_seconds_count{name="garbage_collector_graph_changes"} 473 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="9.999999999999999e-06"} 2629 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="9.999999999999999e-05"} 2927 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="0.001"} 2979 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="0.01"} 2979 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="0.1"} 2979 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="1"} 2979 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="10"} 2979 +workqueue_queue_duration_seconds_bucket{name="garbage_collector_graph_changes",le="+Inf"} 2979 +workqueue_queue_duration_seconds_sum{name="garbage_collector_graph_changes"} 0.032251662000000084 +workqueue_queue_duration_seconds_count{name="garbage_collector_graph_changes"} 2979 workqueue_queue_duration_seconds_bucket{name="horizontalpodautoscaler",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="horizontalpodautoscaler",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="horizontalpodautoscaler",le="1e-06"} 0 @@ -1699,29 +1820,29 @@ workqueue_queue_duration_seconds_count{name="node"} 0 workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="1e-06"} 0 -workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="9.999999999999999e-06"} 1 -workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="9.999999999999999e-05"} 2 -workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="0.001"} 3 -workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="0.01"} 3 -workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="0.1"} 4 -workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="1"} 4 -workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="10"} 4 -workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="+Inf"} 4 -workqueue_queue_duration_seconds_sum{name="node_lifecycle_controller"} 0.070519022 -workqueue_queue_duration_seconds_count{name="node_lifecycle_controller"} 4 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="9.999999999999999e-06"} 9 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="9.999999999999999e-05"} 10 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="0.001"} 11 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="0.01"} 11 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="0.1"} 12 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="1"} 12 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="10"} 12 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller",le="+Inf"} 12 +workqueue_queue_duration_seconds_sum{name="node_lifecycle_controller"} 0.09350184499999999 +workqueue_queue_duration_seconds_count{name="node_lifecycle_controller"} 12 workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="1e-06"} 0 -workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="9.999999999999999e-06"} 0 -workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="9.999999999999999e-05"} 4 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="9.999999999999999e-06"} 4 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="9.999999999999999e-05"} 6 workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="0.001"} 6 -workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="0.01"} 7 -workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="0.1"} 11 -workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="1"} 11 -workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="10"} 11 -workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="+Inf"} 11 -workqueue_queue_duration_seconds_sum{name="node_lifecycle_controller_pods"} 0.28271710400000005 -workqueue_queue_duration_seconds_count{name="node_lifecycle_controller_pods"} 11 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="0.01"} 6 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="0.1"} 10 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="1"} 10 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="10"} 10 +workqueue_queue_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="+Inf"} 10 +workqueue_queue_duration_seconds_sum{name="node_lifecycle_controller_pods"} 0.374331337 +workqueue_queue_duration_seconds_count{name="node_lifecycle_controller_pods"} 10 workqueue_queue_duration_seconds_bucket{name="noexec_taint_node",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="noexec_taint_node",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="noexec_taint_node",le="1e-06"} 0 @@ -1733,21 +1854,21 @@ workqueue_queue_duration_seconds_bucket{name="noexec_taint_node",le="0.1"} 1 workqueue_queue_duration_seconds_bucket{name="noexec_taint_node",le="1"} 1 workqueue_queue_duration_seconds_bucket{name="noexec_taint_node",le="10"} 1 workqueue_queue_duration_seconds_bucket{name="noexec_taint_node",le="+Inf"} 1 -workqueue_queue_duration_seconds_sum{name="noexec_taint_node"} 0.070277014 +workqueue_queue_duration_seconds_sum{name="noexec_taint_node"} 0.068335756 workqueue_queue_duration_seconds_count{name="noexec_taint_node"} 1 workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="1e-06"} 0 -workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="9.999999999999999e-06"} 14 -workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="9.999999999999999e-05"} 14 -workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="0.001"} 14 -workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="0.01"} 14 -workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="0.1"} 18 -workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="1"} 18 -workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="10"} 18 -workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="+Inf"} 18 -workqueue_queue_duration_seconds_sum{name="noexec_taint_pod"} 0.27898988700000005 -workqueue_queue_duration_seconds_count{name="noexec_taint_pod"} 18 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="9.999999999999999e-06"} 9 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="9.999999999999999e-05"} 12 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="0.001"} 12 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="0.01"} 12 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="0.1"} 16 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="1"} 16 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="10"} 16 +workqueue_queue_duration_seconds_bucket{name="noexec_taint_pod",le="+Inf"} 16 +workqueue_queue_duration_seconds_sum{name="noexec_taint_pod"} 0.275766187 +workqueue_queue_duration_seconds_count{name="noexec_taint_pod"} 16 workqueue_queue_duration_seconds_bucket{name="orphaned_pods_nodes",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="orphaned_pods_nodes",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="orphaned_pods_nodes",le="1e-06"} 0 @@ -1803,16 +1924,16 @@ workqueue_queue_duration_seconds_count{name="pvprotection"} 0 workqueue_queue_duration_seconds_bucket{name="replicaset",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="replicaset",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="replicaset",le="1e-06"} 0 -workqueue_queue_duration_seconds_bucket{name="replicaset",le="9.999999999999999e-06"} 15 -workqueue_queue_duration_seconds_bucket{name="replicaset",le="9.999999999999999e-05"} 17 -workqueue_queue_duration_seconds_bucket{name="replicaset",le="0.001"} 19 -workqueue_queue_duration_seconds_bucket{name="replicaset",le="0.01"} 21 -workqueue_queue_duration_seconds_bucket{name="replicaset",le="0.1"} 23 -workqueue_queue_duration_seconds_bucket{name="replicaset",le="1"} 23 -workqueue_queue_duration_seconds_bucket{name="replicaset",le="10"} 23 -workqueue_queue_duration_seconds_bucket{name="replicaset",le="+Inf"} 23 -workqueue_queue_duration_seconds_sum{name="replicaset"} 0.04710115100000001 -workqueue_queue_duration_seconds_count{name="replicaset"} 23 +workqueue_queue_duration_seconds_bucket{name="replicaset",le="9.999999999999999e-06"} 9 +workqueue_queue_duration_seconds_bucket{name="replicaset",le="9.999999999999999e-05"} 12 +workqueue_queue_duration_seconds_bucket{name="replicaset",le="0.001"} 14 +workqueue_queue_duration_seconds_bucket{name="replicaset",le="0.01"} 14 +workqueue_queue_duration_seconds_bucket{name="replicaset",le="0.1"} 18 +workqueue_queue_duration_seconds_bucket{name="replicaset",le="1"} 18 +workqueue_queue_duration_seconds_bucket{name="replicaset",le="10"} 18 +workqueue_queue_duration_seconds_bucket{name="replicaset",le="+Inf"} 18 +workqueue_queue_duration_seconds_sum{name="replicaset"} 0.16475134600000005 +workqueue_queue_duration_seconds_count{name="replicaset"} 18 workqueue_queue_duration_seconds_bucket{name="replicationmanager",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="replicationmanager",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="replicationmanager",le="1e-06"} 0 @@ -1872,11 +1993,11 @@ workqueue_queue_duration_seconds_bucket{name="root_ca_cert_publisher",le="9.9999 workqueue_queue_duration_seconds_bucket{name="root_ca_cert_publisher",le="9.999999999999999e-05"} 0 workqueue_queue_duration_seconds_bucket{name="root_ca_cert_publisher",le="0.001"} 0 workqueue_queue_duration_seconds_bucket{name="root_ca_cert_publisher",le="0.01"} 0 -workqueue_queue_duration_seconds_bucket{name="root_ca_cert_publisher",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="root_ca_cert_publisher",le="0.1"} 1 workqueue_queue_duration_seconds_bucket{name="root_ca_cert_publisher",le="1"} 5 workqueue_queue_duration_seconds_bucket{name="root_ca_cert_publisher",le="10"} 5 workqueue_queue_duration_seconds_bucket{name="root_ca_cert_publisher",le="+Inf"} 5 -workqueue_queue_duration_seconds_sum{name="root_ca_cert_publisher"} 4.05847741 +workqueue_queue_duration_seconds_sum{name="root_ca_cert_publisher"} 2.900853618 workqueue_queue_duration_seconds_count{name="root_ca_cert_publisher"} 5 workqueue_queue_duration_seconds_bucket{name="service",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="service",le="1e-07"} 0 @@ -1897,12 +2018,12 @@ workqueue_queue_duration_seconds_bucket{name="serviceaccount",le="1e-06"} 0 workqueue_queue_duration_seconds_bucket{name="serviceaccount",le="9.999999999999999e-06"} 0 workqueue_queue_duration_seconds_bucket{name="serviceaccount",le="9.999999999999999e-05"} 0 workqueue_queue_duration_seconds_bucket{name="serviceaccount",le="0.001"} 0 -workqueue_queue_duration_seconds_bucket{name="serviceaccount",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="serviceaccount",le="0.01"} 1 workqueue_queue_duration_seconds_bucket{name="serviceaccount",le="0.1"} 1 workqueue_queue_duration_seconds_bucket{name="serviceaccount",le="1"} 5 workqueue_queue_duration_seconds_bucket{name="serviceaccount",le="10"} 5 workqueue_queue_duration_seconds_bucket{name="serviceaccount",le="+Inf"} 5 -workqueue_queue_duration_seconds_sum{name="serviceaccount"} 1.894219071 +workqueue_queue_duration_seconds_sum{name="serviceaccount"} 0.876180307 workqueue_queue_duration_seconds_count{name="serviceaccount"} 5 workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="1e-07"} 0 @@ -1920,15 +2041,15 @@ workqueue_queue_duration_seconds_count{name="serviceaccount_tokens_secret"} 0 workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="1e-06"} 0 -workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="9.999999999999999e-06"} 20 -workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="9.999999999999999e-05"} 34 -workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="0.001"} 34 -workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="0.01"} 35 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="9.999999999999999e-06"} 28 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="9.999999999999999e-05"} 39 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="0.001"} 39 +workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="0.01"} 40 workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="0.1"} 43 workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="1"} 43 workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="10"} 43 workqueue_queue_duration_seconds_bucket{name="serviceaccount_tokens_service",le="+Inf"} 43 -workqueue_queue_duration_seconds_sum{name="serviceaccount_tokens_service"} 0.43841727100000005 +workqueue_queue_duration_seconds_sum{name="serviceaccount_tokens_service"} 0.14209613899999995 workqueue_queue_duration_seconds_count{name="serviceaccount_tokens_service"} 43 workqueue_queue_duration_seconds_bucket{name="stale_pod_disruption",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="stale_pod_disruption",le="1e-07"} 0 @@ -1959,15 +2080,15 @@ workqueue_queue_duration_seconds_count{name="statefulset"} 0 workqueue_queue_duration_seconds_bucket{name="token_cleaner",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="token_cleaner",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="token_cleaner",le="1e-06"} 0 -workqueue_queue_duration_seconds_bucket{name="token_cleaner",le="9.999999999999999e-06"} 0 -workqueue_queue_duration_seconds_bucket{name="token_cleaner",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="token_cleaner",le="9.999999999999999e-06"} 1 +workqueue_queue_duration_seconds_bucket{name="token_cleaner",le="9.999999999999999e-05"} 1 workqueue_queue_duration_seconds_bucket{name="token_cleaner",le="0.001"} 1 workqueue_queue_duration_seconds_bucket{name="token_cleaner",le="0.01"} 1 workqueue_queue_duration_seconds_bucket{name="token_cleaner",le="0.1"} 1 workqueue_queue_duration_seconds_bucket{name="token_cleaner",le="1"} 1 workqueue_queue_duration_seconds_bucket{name="token_cleaner",le="10"} 1 workqueue_queue_duration_seconds_bucket{name="token_cleaner",le="+Inf"} 1 -workqueue_queue_duration_seconds_sum{name="token_cleaner"} 0.000147205 +workqueue_queue_duration_seconds_sum{name="token_cleaner"} 6.897e-06 workqueue_queue_duration_seconds_count{name="token_cleaner"} 1 workqueue_queue_duration_seconds_bucket{name="ttl_jobs_to_delete",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="ttl_jobs_to_delete",le="1e-07"} 0 @@ -1985,16 +2106,16 @@ workqueue_queue_duration_seconds_count{name="ttl_jobs_to_delete"} 0 workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="1e-06"} 0 -workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="9.999999999999999e-06"} 1 -workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="9.999999999999999e-05"} 3 -workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="0.001"} 3 -workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="0.01"} 3 -workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="0.1"} 4 -workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="1"} 4 -workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="10"} 4 -workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="+Inf"} 4 -workqueue_queue_duration_seconds_sum{name="ttlcontroller"} 0.07179900599999998 -workqueue_queue_duration_seconds_count{name="ttlcontroller"} 4 +workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="9.999999999999999e-06"} 9 +workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="9.999999999999999e-05"} 10 +workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="0.001"} 10 +workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="0.01"} 10 +workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="0.1"} 11 +workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="1"} 12 +workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="10"} 12 +workqueue_queue_duration_seconds_bucket{name="ttlcontroller",le="+Inf"} 12 +workqueue_queue_duration_seconds_sum{name="ttlcontroller"} 0.776090998 +workqueue_queue_duration_seconds_count{name="ttlcontroller"} 12 workqueue_queue_duration_seconds_bucket{name="volume_expand",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="volume_expand",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="volume_expand",le="1e-06"} 0 @@ -2031,12 +2152,12 @@ workqueue_retries_total{name="DynamicServingCertificateController"} 0 workqueue_retries_total{name="bootstrap_signer_queue"} 0 workqueue_retries_total{name="certificate"} 0 workqueue_retries_total{name="cronjob"} 0 -workqueue_retries_total{name="daemonset"} 4 -workqueue_retries_total{name="deployment"} 12 +workqueue_retries_total{name="daemonset"} 0 +workqueue_retries_total{name="deployment"} 9 workqueue_retries_total{name="disruption"} 0 workqueue_retries_total{name="disruption_recheck"} 0 -workqueue_retries_total{name="endpoint"} 6 -workqueue_retries_total{name="endpoint_slice"} 11 +workqueue_retries_total{name="endpoint"} 4 +workqueue_retries_total{name="endpoint_slice"} 6 workqueue_retries_total{name="endpoint_slice_mirroring"} 0 workqueue_retries_total{name="ephemeral_volume"} 0 workqueue_retries_total{name="garbage_collector_attempt_to_delete"} 0 @@ -2127,66 +2248,66 @@ workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="1e-07"} workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="1e-06"} 0 workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="9.999999999999999e-06"} 0 workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="9.999999999999999e-05"} 1 -workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="0.001"} 13 -workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="0.01"} 16 -workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="0.1"} 19 -workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="1"} 22 -workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="10"} 22 -workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="+Inf"} 22 -workqueue_work_duration_seconds_sum{name="ClusterRoleAggregator"} 1.608312209 -workqueue_work_duration_seconds_count{name="ClusterRoleAggregator"} 22 +workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="0.001"} 12 +workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="0.01"} 12 +workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="0.1"} 16 +workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="1"} 19 +workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="10"} 19 +workqueue_work_duration_seconds_bucket{name="ClusterRoleAggregator",le="+Inf"} 19 +workqueue_work_duration_seconds_sum{name="ClusterRoleAggregator"} 2.4599846950000006 +workqueue_work_duration_seconds_count{name="ClusterRoleAggregator"} 19 workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="1e-06"} 0 workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="9.999999999999999e-06"} 0 workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="9.999999999999999e-05"} 1 -workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="0.001"} 1 -workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="0.01"} 1 -workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="0.1"} 1 -workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="1"} 1 -workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="10"} 1 -workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="+Inf"} 1 -workqueue_work_duration_seconds_sum{name="DynamicCABundle-client-ca-bundle"} 5.416e-05 -workqueue_work_duration_seconds_count{name="DynamicCABundle-client-ca-bundle"} 1 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="0.001"} 2 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="0.01"} 2 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="0.1"} 2 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="1"} 2 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="10"} 2 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-client-ca-bundle",le="+Inf"} 2 +workqueue_work_duration_seconds_sum{name="DynamicCABundle-client-ca-bundle"} 0.000147692 +workqueue_work_duration_seconds_count{name="DynamicCABundle-client-ca-bundle"} 2 workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="1e-06"} 0 workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="9.999999999999999e-06"} 0 workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="9.999999999999999e-05"} 0 -workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="0.001"} 5 -workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="0.01"} 7 -workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="0.1"} 7 -workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="1"} 7 -workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="10"} 7 -workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="+Inf"} 7 -workqueue_work_duration_seconds_sum{name="DynamicCABundle-csr-controller"} 0.005854245 -workqueue_work_duration_seconds_count{name="DynamicCABundle-csr-controller"} 7 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="0.001"} 3 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="0.01"} 8 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="0.1"} 8 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="1"} 8 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="10"} 8 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-csr-controller",le="+Inf"} 8 +workqueue_work_duration_seconds_sum{name="DynamicCABundle-csr-controller"} 0.008532069 +workqueue_work_duration_seconds_count{name="DynamicCABundle-csr-controller"} 8 workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="1e-06"} 0 workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="9.999999999999999e-06"} 0 workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="9.999999999999999e-05"} 1 -workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="0.001"} 1 -workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="0.01"} 1 -workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="0.1"} 1 -workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="1"} 1 -workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="10"} 1 -workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="+Inf"} 1 -workqueue_work_duration_seconds_sum{name="DynamicCABundle-request-header"} 6.7793e-05 -workqueue_work_duration_seconds_count{name="DynamicCABundle-request-header"} 1 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="0.001"} 2 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="0.01"} 2 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="0.1"} 2 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="1"} 2 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="10"} 2 +workqueue_work_duration_seconds_bucket{name="DynamicCABundle-request-header",le="+Inf"} 2 +workqueue_work_duration_seconds_sum{name="DynamicCABundle-request-header"} 0.000143932 +workqueue_work_duration_seconds_count{name="DynamicCABundle-request-header"} 2 workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="1e-06"} 0 workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="9.999999999999999e-06"} 0 -workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="9.999999999999999e-05"} 2 -workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.001"} 2 -workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.01"} 2 -workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.1"} 2 -workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="1"} 2 -workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="10"} 2 -workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="+Inf"} 2 -workqueue_work_duration_seconds_sum{name="DynamicServingCertificateController"} 9.181099999999999e-05 -workqueue_work_duration_seconds_count{name="DynamicServingCertificateController"} 2 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="9.999999999999999e-05"} 38 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.001"} 38 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.01"} 38 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.1"} 38 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="1"} 38 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="10"} 38 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="+Inf"} 38 +workqueue_work_duration_seconds_sum{name="DynamicServingCertificateController"} 0.0014076110000000002 +workqueue_work_duration_seconds_count{name="DynamicServingCertificateController"} 38 workqueue_work_duration_seconds_bucket{name="bootstrap_signer_queue",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="bootstrap_signer_queue",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="bootstrap_signer_queue",le="1e-06"} 0 @@ -2198,7 +2319,7 @@ workqueue_work_duration_seconds_bucket{name="bootstrap_signer_queue",le="0.1"} 1 workqueue_work_duration_seconds_bucket{name="bootstrap_signer_queue",le="1"} 2 workqueue_work_duration_seconds_bucket{name="bootstrap_signer_queue",le="10"} 2 workqueue_work_duration_seconds_bucket{name="bootstrap_signer_queue",le="+Inf"} 2 -workqueue_work_duration_seconds_sum{name="bootstrap_signer_queue"} 0.724080388 +workqueue_work_duration_seconds_sum{name="bootstrap_signer_queue"} 0.562168307 workqueue_work_duration_seconds_count{name="bootstrap_signer_queue"} 2 workqueue_work_duration_seconds_bucket{name="certificate",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="certificate",le="1e-07"} 0 @@ -2244,27 +2365,27 @@ workqueue_work_duration_seconds_bucket{name="daemonset",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="daemonset",le="1e-06"} 0 workqueue_work_duration_seconds_bucket{name="daemonset",le="9.999999999999999e-06"} 0 workqueue_work_duration_seconds_bucket{name="daemonset",le="9.999999999999999e-05"} 0 -workqueue_work_duration_seconds_bucket{name="daemonset",le="0.001"} 8 -workqueue_work_duration_seconds_bucket{name="daemonset",le="0.01"} 15 -workqueue_work_duration_seconds_bucket{name="daemonset",le="0.1"} 23 -workqueue_work_duration_seconds_bucket{name="daemonset",le="1"} 25 -workqueue_work_duration_seconds_bucket{name="daemonset",le="10"} 25 -workqueue_work_duration_seconds_bucket{name="daemonset",le="+Inf"} 25 -workqueue_work_duration_seconds_sum{name="daemonset"} 1.5486732470000002 -workqueue_work_duration_seconds_count{name="daemonset"} 25 +workqueue_work_duration_seconds_bucket{name="daemonset",le="0.001"} 9 +workqueue_work_duration_seconds_bucket{name="daemonset",le="0.01"} 11 +workqueue_work_duration_seconds_bucket{name="daemonset",le="0.1"} 15 +workqueue_work_duration_seconds_bucket{name="daemonset",le="1"} 17 +workqueue_work_duration_seconds_bucket{name="daemonset",le="10"} 17 +workqueue_work_duration_seconds_bucket{name="daemonset",le="+Inf"} 17 +workqueue_work_duration_seconds_sum{name="daemonset"} 1.9677030530000001 +workqueue_work_duration_seconds_count{name="daemonset"} 17 workqueue_work_duration_seconds_bucket{name="deployment",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="deployment",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="deployment",le="1e-06"} 0 workqueue_work_duration_seconds_bucket{name="deployment",le="9.999999999999999e-06"} 0 workqueue_work_duration_seconds_bucket{name="deployment",le="9.999999999999999e-05"} 0 -workqueue_work_duration_seconds_bucket{name="deployment",le="0.001"} 9 -workqueue_work_duration_seconds_bucket{name="deployment",le="0.01"} 16 -workqueue_work_duration_seconds_bucket{name="deployment",le="0.1"} 19 -workqueue_work_duration_seconds_bucket{name="deployment",le="1"} 21 -workqueue_work_duration_seconds_bucket{name="deployment",le="10"} 21 -workqueue_work_duration_seconds_bucket{name="deployment",le="+Inf"} 21 -workqueue_work_duration_seconds_sum{name="deployment"} 0.381222111 -workqueue_work_duration_seconds_count{name="deployment"} 21 +workqueue_work_duration_seconds_bucket{name="deployment",le="0.001"} 10 +workqueue_work_duration_seconds_bucket{name="deployment",le="0.01"} 11 +workqueue_work_duration_seconds_bucket{name="deployment",le="0.1"} 18 +workqueue_work_duration_seconds_bucket{name="deployment",le="1"} 20 +workqueue_work_duration_seconds_bucket{name="deployment",le="10"} 20 +workqueue_work_duration_seconds_bucket{name="deployment",le="+Inf"} 20 +workqueue_work_duration_seconds_sum{name="deployment"} 1.680529541 +workqueue_work_duration_seconds_count{name="deployment"} 20 workqueue_work_duration_seconds_bucket{name="disruption",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="disruption",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="disruption",le="1e-06"} 0 @@ -2294,42 +2415,42 @@ workqueue_work_duration_seconds_count{name="disruption_recheck"} 0 workqueue_work_duration_seconds_bucket{name="endpoint",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="endpoint",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="endpoint",le="1e-06"} 0 -workqueue_work_duration_seconds_bucket{name="endpoint",le="9.999999999999999e-06"} 1 +workqueue_work_duration_seconds_bucket{name="endpoint",le="9.999999999999999e-06"} 0 workqueue_work_duration_seconds_bucket{name="endpoint",le="9.999999999999999e-05"} 3 workqueue_work_duration_seconds_bucket{name="endpoint",le="0.001"} 3 -workqueue_work_duration_seconds_bucket{name="endpoint",le="0.01"} 6 -workqueue_work_duration_seconds_bucket{name="endpoint",le="0.1"} 7 -workqueue_work_duration_seconds_bucket{name="endpoint",le="1"} 8 -workqueue_work_duration_seconds_bucket{name="endpoint",le="10"} 8 -workqueue_work_duration_seconds_bucket{name="endpoint",le="+Inf"} 8 -workqueue_work_duration_seconds_sum{name="endpoint"} 0.7418064000000001 -workqueue_work_duration_seconds_count{name="endpoint"} 8 +workqueue_work_duration_seconds_bucket{name="endpoint",le="0.01"} 3 +workqueue_work_duration_seconds_bucket{name="endpoint",le="0.1"} 5 +workqueue_work_duration_seconds_bucket{name="endpoint",le="1"} 6 +workqueue_work_duration_seconds_bucket{name="endpoint",le="10"} 6 +workqueue_work_duration_seconds_bucket{name="endpoint",le="+Inf"} 6 +workqueue_work_duration_seconds_sum{name="endpoint"} 0.878932805 +workqueue_work_duration_seconds_count{name="endpoint"} 6 workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="1e-06"} 0 workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="9.999999999999999e-06"} 0 workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="9.999999999999999e-05"} 1 -workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="0.001"} 5 -workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="0.01"} 7 -workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="0.1"} 9 -workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="1"} 10 -workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="10"} 10 -workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="+Inf"} 10 -workqueue_work_duration_seconds_sum{name="endpoint_slice"} 0.696790555 -workqueue_work_duration_seconds_count{name="endpoint_slice"} 10 +workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="0.001"} 4 +workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="0.01"} 4 +workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="0.1"} 6 +workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="1"} 7 +workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="10"} 7 +workqueue_work_duration_seconds_bucket{name="endpoint_slice",le="+Inf"} 7 +workqueue_work_duration_seconds_sum{name="endpoint_slice"} 0.895027736 +workqueue_work_duration_seconds_count{name="endpoint_slice"} 7 workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="1e-06"} 0 workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="9.999999999999999e-06"} 0 -workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="9.999999999999999e-05"} 5 -workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="0.001"} 6 -workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="0.01"} 6 -workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="0.1"} 6 -workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="1"} 6 -workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="10"} 6 -workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="+Inf"} 6 -workqueue_work_duration_seconds_sum{name="endpoint_slice_mirroring"} 0.000374157 -workqueue_work_duration_seconds_count{name="endpoint_slice_mirroring"} 6 +workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="9.999999999999999e-05"} 4 +workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="0.001"} 4 +workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="0.01"} 4 +workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="0.1"} 4 +workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="1"} 4 +workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="10"} 4 +workqueue_work_duration_seconds_bucket{name="endpoint_slice_mirroring",le="+Inf"} 4 +workqueue_work_duration_seconds_sum{name="endpoint_slice_mirroring"} 0.000188079 +workqueue_work_duration_seconds_count{name="endpoint_slice_mirroring"} 4 workqueue_work_duration_seconds_bucket{name="ephemeral_volume",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="ephemeral_volume",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="ephemeral_volume",le="1e-06"} 0 @@ -2347,15 +2468,15 @@ workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="1e-06"} 0 workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="9.999999999999999e-06"} 0 -workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="9.999999999999999e-05"} 0 -workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="0.001"} 0 -workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="0.01"} 0 -workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="0.1"} 0 -workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="1"} 0 -workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="10"} 0 -workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="+Inf"} 0 -workqueue_work_duration_seconds_sum{name="garbage_collector_attempt_to_delete"} 0 -workqueue_work_duration_seconds_count{name="garbage_collector_attempt_to_delete"} 0 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="9.999999999999999e-05"} 1 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="0.001"} 1 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="0.01"} 1 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="0.1"} 1 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="1"} 1 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="10"} 1 +workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_delete",le="+Inf"} 1 +workqueue_work_duration_seconds_sum{name="garbage_collector_attempt_to_delete"} 6.8602e-05 +workqueue_work_duration_seconds_count{name="garbage_collector_attempt_to_delete"} 1 workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="garbage_collector_attempt_to_orphan",le="1e-06"} 0 @@ -2372,16 +2493,16 @@ workqueue_work_duration_seconds_count{name="garbage_collector_attempt_to_orphan" workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="1e-06"} 0 -workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="9.999999999999999e-06"} 230 -workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="9.999999999999999e-05"} 472 -workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="0.001"} 473 -workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="0.01"} 473 -workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="0.1"} 473 -workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="1"} 473 -workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="10"} 473 -workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="+Inf"} 473 -workqueue_work_duration_seconds_sum{name="garbage_collector_graph_changes"} 0.005345119000000003 -workqueue_work_duration_seconds_count{name="garbage_collector_graph_changes"} 473 +workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="9.999999999999999e-06"} 665 +workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="9.999999999999999e-05"} 2971 +workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="0.001"} 2979 +workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="0.01"} 2979 +workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="0.1"} 2979 +workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="1"} 2979 +workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="10"} 2979 +workqueue_work_duration_seconds_bucket{name="garbage_collector_graph_changes",le="+Inf"} 2979 +workqueue_work_duration_seconds_sum{name="garbage_collector_graph_changes"} 0.04551231999999994 +workqueue_work_duration_seconds_count{name="garbage_collector_graph_changes"} 2979 workqueue_work_duration_seconds_bucket{name="horizontalpodautoscaler",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="horizontalpodautoscaler",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="horizontalpodautoscaler",le="1e-06"} 0 @@ -2450,29 +2571,29 @@ workqueue_work_duration_seconds_count{name="node"} 0 workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="1e-06"} 0 -workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="9.999999999999999e-06"} 0 -workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="9.999999999999999e-05"} 3 -workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="0.001"} 3 -workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="0.01"} 3 -workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="0.1"} 4 -workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="1"} 4 -workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="10"} 4 -workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="+Inf"} 4 -workqueue_work_duration_seconds_sum{name="node_lifecycle_controller"} 0.014400573 -workqueue_work_duration_seconds_count{name="node_lifecycle_controller"} 4 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="9.999999999999999e-06"} 2 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="9.999999999999999e-05"} 11 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="0.001"} 11 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="0.01"} 11 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="0.1"} 12 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="1"} 12 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="10"} 12 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller",le="+Inf"} 12 +workqueue_work_duration_seconds_sum{name="node_lifecycle_controller"} 0.020806040000000005 +workqueue_work_duration_seconds_count{name="node_lifecycle_controller"} 12 workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="1e-06"} 0 -workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="9.999999999999999e-06"} 0 -workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="9.999999999999999e-05"} 5 -workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="0.001"} 7 -workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="0.01"} 7 -workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="0.1"} 11 -workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="1"} 11 -workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="10"} 11 -workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="+Inf"} 11 -workqueue_work_duration_seconds_sum{name="node_lifecycle_controller_pods"} 0.12585382299999998 -workqueue_work_duration_seconds_count{name="node_lifecycle_controller_pods"} 11 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="9.999999999999999e-06"} 1 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="9.999999999999999e-05"} 7 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="0.001"} 10 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="0.01"} 10 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="0.1"} 10 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="1"} 10 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="10"} 10 +workqueue_work_duration_seconds_bucket{name="node_lifecycle_controller_pods",le="+Inf"} 10 +workqueue_work_duration_seconds_sum{name="node_lifecycle_controller_pods"} 0.0005371999999999999 +workqueue_work_duration_seconds_count{name="node_lifecycle_controller_pods"} 10 workqueue_work_duration_seconds_bucket{name="noexec_taint_node",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="noexec_taint_node",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="noexec_taint_node",le="1e-06"} 0 @@ -2484,21 +2605,21 @@ workqueue_work_duration_seconds_bucket{name="noexec_taint_node",le="0.1"} 1 workqueue_work_duration_seconds_bucket{name="noexec_taint_node",le="1"} 1 workqueue_work_duration_seconds_bucket{name="noexec_taint_node",le="10"} 1 workqueue_work_duration_seconds_bucket{name="noexec_taint_node",le="+Inf"} 1 -workqueue_work_duration_seconds_sum{name="noexec_taint_node"} 8.8066e-05 +workqueue_work_duration_seconds_sum{name="noexec_taint_node"} 4.2807e-05 workqueue_work_duration_seconds_count{name="noexec_taint_node"} 1 workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="1e-06"} 0 -workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="9.999999999999999e-06"} 1 -workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="9.999999999999999e-05"} 18 -workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="0.001"} 18 -workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="0.01"} 18 -workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="0.1"} 18 -workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="1"} 18 -workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="10"} 18 -workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="+Inf"} 18 -workqueue_work_duration_seconds_sum{name="noexec_taint_pod"} 0.000428115 -workqueue_work_duration_seconds_count{name="noexec_taint_pod"} 18 +workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="9.999999999999999e-05"} 14 +workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="0.001"} 16 +workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="0.01"} 16 +workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="0.1"} 16 +workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="1"} 16 +workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="10"} 16 +workqueue_work_duration_seconds_bucket{name="noexec_taint_pod",le="+Inf"} 16 +workqueue_work_duration_seconds_sum{name="noexec_taint_pod"} 0.0005546540000000001 +workqueue_work_duration_seconds_count{name="noexec_taint_pod"} 16 workqueue_work_duration_seconds_bucket{name="orphaned_pods_nodes",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="orphaned_pods_nodes",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="orphaned_pods_nodes",le="1e-06"} 0 @@ -2555,15 +2676,15 @@ workqueue_work_duration_seconds_bucket{name="replicaset",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="replicaset",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="replicaset",le="1e-06"} 0 workqueue_work_duration_seconds_bucket{name="replicaset",le="9.999999999999999e-06"} 0 -workqueue_work_duration_seconds_bucket{name="replicaset",le="9.999999999999999e-05"} 1 -workqueue_work_duration_seconds_bucket{name="replicaset",le="0.001"} 16 -workqueue_work_duration_seconds_bucket{name="replicaset",le="0.01"} 19 -workqueue_work_duration_seconds_bucket{name="replicaset",le="0.1"} 21 -workqueue_work_duration_seconds_bucket{name="replicaset",le="1"} 23 -workqueue_work_duration_seconds_bucket{name="replicaset",le="10"} 23 -workqueue_work_duration_seconds_bucket{name="replicaset",le="+Inf"} 23 -workqueue_work_duration_seconds_sum{name="replicaset"} 1.225059364 -workqueue_work_duration_seconds_count{name="replicaset"} 23 +workqueue_work_duration_seconds_bucket{name="replicaset",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="replicaset",le="0.001"} 11 +workqueue_work_duration_seconds_bucket{name="replicaset",le="0.01"} 11 +workqueue_work_duration_seconds_bucket{name="replicaset",le="0.1"} 16 +workqueue_work_duration_seconds_bucket{name="replicaset",le="1"} 18 +workqueue_work_duration_seconds_bucket{name="replicaset",le="10"} 18 +workqueue_work_duration_seconds_bucket{name="replicaset",le="+Inf"} 18 +workqueue_work_duration_seconds_sum{name="replicaset"} 0.941117998 +workqueue_work_duration_seconds_count{name="replicaset"} 18 workqueue_work_duration_seconds_bucket{name="replicationmanager",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="replicationmanager",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="replicationmanager",le="1e-06"} 0 @@ -2622,12 +2743,12 @@ workqueue_work_duration_seconds_bucket{name="root_ca_cert_publisher",le="1e-06"} workqueue_work_duration_seconds_bucket{name="root_ca_cert_publisher",le="9.999999999999999e-06"} 0 workqueue_work_duration_seconds_bucket{name="root_ca_cert_publisher",le="9.999999999999999e-05"} 0 workqueue_work_duration_seconds_bucket{name="root_ca_cert_publisher",le="0.001"} 0 -workqueue_work_duration_seconds_bucket{name="root_ca_cert_publisher",le="0.01"} 1 +workqueue_work_duration_seconds_bucket{name="root_ca_cert_publisher",le="0.01"} 2 workqueue_work_duration_seconds_bucket{name="root_ca_cert_publisher",le="0.1"} 4 workqueue_work_duration_seconds_bucket{name="root_ca_cert_publisher",le="1"} 5 workqueue_work_duration_seconds_bucket{name="root_ca_cert_publisher",le="10"} 5 workqueue_work_duration_seconds_bucket{name="root_ca_cert_publisher",le="+Inf"} 5 -workqueue_work_duration_seconds_sum{name="root_ca_cert_publisher"} 0.7635239699999999 +workqueue_work_duration_seconds_sum{name="root_ca_cert_publisher"} 0.715673963 workqueue_work_duration_seconds_count{name="root_ca_cert_publisher"} 5 workqueue_work_duration_seconds_bucket{name="service",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="service",le="1e-07"} 0 @@ -2648,12 +2769,12 @@ workqueue_work_duration_seconds_bucket{name="serviceaccount",le="1e-06"} 0 workqueue_work_duration_seconds_bucket{name="serviceaccount",le="9.999999999999999e-06"} 0 workqueue_work_duration_seconds_bucket{name="serviceaccount",le="9.999999999999999e-05"} 0 workqueue_work_duration_seconds_bucket{name="serviceaccount",le="0.001"} 0 -workqueue_work_duration_seconds_bucket{name="serviceaccount",le="0.01"} 3 +workqueue_work_duration_seconds_bucket{name="serviceaccount",le="0.01"} 1 workqueue_work_duration_seconds_bucket{name="serviceaccount",le="0.1"} 4 workqueue_work_duration_seconds_bucket{name="serviceaccount",le="1"} 5 workqueue_work_duration_seconds_bucket{name="serviceaccount",le="10"} 5 workqueue_work_duration_seconds_bucket{name="serviceaccount",le="+Inf"} 5 -workqueue_work_duration_seconds_sum{name="serviceaccount"} 0.39078707999999995 +workqueue_work_duration_seconds_sum{name="serviceaccount"} 0.24347626800000002 workqueue_work_duration_seconds_count{name="serviceaccount"} 5 workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_secret",le="1e-07"} 0 @@ -2671,7 +2792,7 @@ workqueue_work_duration_seconds_count{name="serviceaccount_tokens_secret"} 0 workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_service",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_service",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_service",le="1e-06"} 0 -workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_service",le="9.999999999999999e-06"} 14 +workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_service",le="9.999999999999999e-06"} 12 workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_service",le="9.999999999999999e-05"} 43 workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_service",le="0.001"} 43 workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_service",le="0.01"} 43 @@ -2679,7 +2800,7 @@ workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_service",le=" workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_service",le="1"} 43 workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_service",le="10"} 43 workqueue_work_duration_seconds_bucket{name="serviceaccount_tokens_service",le="+Inf"} 43 -workqueue_work_duration_seconds_sum{name="serviceaccount_tokens_service"} 0.000655238 +workqueue_work_duration_seconds_sum{name="serviceaccount_tokens_service"} 0.000620875 workqueue_work_duration_seconds_count{name="serviceaccount_tokens_service"} 43 workqueue_work_duration_seconds_bucket{name="stale_pod_disruption",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="stale_pod_disruption",le="1e-07"} 0 @@ -2718,7 +2839,7 @@ workqueue_work_duration_seconds_bucket{name="token_cleaner",le="0.1"} 1 workqueue_work_duration_seconds_bucket{name="token_cleaner",le="1"} 1 workqueue_work_duration_seconds_bucket{name="token_cleaner",le="10"} 1 workqueue_work_duration_seconds_bucket{name="token_cleaner",le="+Inf"} 1 -workqueue_work_duration_seconds_sum{name="token_cleaner"} 3.272e-05 +workqueue_work_duration_seconds_sum{name="token_cleaner"} 5.1242e-05 workqueue_work_duration_seconds_count{name="token_cleaner"} 1 workqueue_work_duration_seconds_bucket{name="ttl_jobs_to_delete",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="ttl_jobs_to_delete",le="1e-07"} 0 @@ -2736,16 +2857,16 @@ workqueue_work_duration_seconds_count{name="ttl_jobs_to_delete"} 0 workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="1e-06"} 0 -workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="9.999999999999999e-06"} 0 -workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="9.999999999999999e-05"} 3 -workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="0.001"} 3 -workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="0.01"} 3 -workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="0.1"} 3 -workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="1"} 4 -workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="10"} 4 -workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="+Inf"} 4 -workqueue_work_duration_seconds_sum{name="ttlcontroller"} 0.27492485799999994 -workqueue_work_duration_seconds_count{name="ttlcontroller"} 4 +workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="9.999999999999999e-06"} 6 +workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="9.999999999999999e-05"} 11 +workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="0.001"} 11 +workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="0.01"} 11 +workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="0.1"} 11 +workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="1"} 12 +workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="10"} 12 +workqueue_work_duration_seconds_bucket{name="ttlcontroller",le="+Inf"} 12 +workqueue_work_duration_seconds_sum{name="ttlcontroller"} 0.7475187799999997 +workqueue_work_duration_seconds_count{name="ttlcontroller"} 12 workqueue_work_duration_seconds_bucket{name="volume_expand",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="volume_expand",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="volume_expand",le="1e-06"} 0 diff --git a/metricbeat/module/kubernetes/controllermanager/_meta/testdata/docs.plain-expected.json b/metricbeat/module/kubernetes/controllermanager/_meta/testdata/docs.plain-expected.json index 3400aae5c547..22c911cd8325 100644 --- a/metricbeat/module/kubernetes/controllermanager/_meta/testdata/docs.plain-expected.json +++ b/metricbeat/module/kubernetes/controllermanager/_meta/testdata/docs.plain-expected.json @@ -7,10 +7,10 @@ }, "kubernetes": { "controllermanager": { - "name": "serviceaccount", + "name": "noexec_taint_pod", "workqueue": { "adds": { - "count": 5 + "count": 16 }, "depth": { "count": 0 @@ -18,9 +18,6 @@ "longestrunning": { "sec": 0 }, - "retries": { - "count": 0 - }, "unfinished": { "sec": 0 } @@ -44,14 +41,24 @@ }, "kubernetes": { "controllermanager": { - "client": { - "request": { + "name": "serviceaccount", + "workqueue": { + "adds": { "count": 5 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 } - }, - "code": "409", - "host": "172.18.0.2:6443", - "method": "PUT" + } } }, "metricset": { @@ -110,12 +117,73 @@ "controllermanager": { "client": { "request": { - "count": 90 + "duration": { + "us": { + "bucket": { + "+Inf": 1546, + "100000": 1523, + "1000000": 1545, + "15000000": 1546, + "2000000": 1545, + "25000": 1482, + "250000": 1531, + "30000000": 1546, + "4000000": 1546, + "5000": 1414, + "500000": 1543, + "60000000": 1546, + "8000000": 1546 + }, + "count": 1546, + "sum": 16967002.752999995 + } + }, + "size": { + "bytes": { + "bucket": { + "+Inf": 1546, + "1024": 1546, + "1048576": 1546, + "16384": 1546, + "16777216": 1546, + "256": 1546, + "262144": 1546, + "4096": 1546, + "4194304": 1546, + "512": 1546, + "64": 1546, + "65536": 1546 + }, + "count": 1546, + "sum": 0 + } + } + }, + "response": { + "size": { + "bytes": { + "bucket": { + "+Inf": 1546, + "1024": 1226, + "1048576": 1546, + "16384": 1543, + "16777216": 1546, + "256": 88, + "262144": 1546, + "4096": 1233, + "4194304": 1546, + "512": 1223, + "64": 21, + "65536": 1546 + }, + "count": 1546, + "sum": 4165150 + } + } } }, - "code": "201", "host": "172.18.0.2:6443", - "method": "POST" + "verb": "GET" } }, "metricset": { @@ -137,12 +205,73 @@ "controllermanager": { "client": { "request": { - "count": 33 + "duration": { + "us": { + "bucket": { + "+Inf": 84, + "100000": 64, + "1000000": 84, + "15000000": 84, + "2000000": 84, + "25000": 46, + "250000": 66, + "30000000": 84, + "4000000": 84, + "5000": 10, + "500000": 77, + "60000000": 84, + "8000000": 84 + }, + "count": 84, + "sum": 11851414.454 + } + }, + "size": { + "bytes": { + "bucket": { + "+Inf": 84, + "1024": 70, + "1048576": 84, + "16384": 84, + "16777216": 84, + "256": 55, + "262144": 84, + "4096": 84, + "4194304": 84, + "512": 66, + "64": 1, + "65536": 84 + }, + "count": 84, + "sum": 34927 + } + } + }, + "response": { + "size": { + "bytes": { + "bucket": { + "+Inf": 84, + "1024": 51, + "1048576": 84, + "16384": 84, + "16777216": 84, + "256": 38, + "262144": 84, + "4096": 82, + "4194304": 84, + "512": 39, + "64": 0, + "65536": 84 + }, + "count": 84, + "sum": 78889 + } + } } }, - "code": "404", "host": "172.18.0.2:6443", - "method": "GET" + "verb": "POST" } }, "metricset": { @@ -300,75 +429,24 @@ }, "kubernetes": { "controllermanager": { - "client": { - "request": { - "duration": { - "us": { - "bucket": { - "+Inf": 13, - "100000": 9, - "1000000": 13, - "15000000": 13, - "2000000": 13, - "25000": 9, - "250000": 9, - "30000000": 13, - "4000000": 13, - "5000": 0, - "500000": 10, - "60000000": 13, - "8000000": 13 - }, - "count": 13, - "sum": 1916945.1439999996 - } - }, - "size": { - "bytes": { - "bucket": { - "+Inf": 13, - "1024": 7, - "1048576": 13, - "16384": 13, - "16777216": 13, - "256": 6, - "262144": 13, - "4096": 13, - "4194304": 13, - "512": 7, - "64": 1, - "65536": 13 - }, - "count": 13, - "sum": 17446 - } - } + "name": "DynamicServingCertificateController", + "workqueue": { + "adds": { + "count": 38 }, - "response": { - "size": { - "bytes": { - "bucket": { - "+Inf": 13, - "1024": 4, - "1048576": 13, - "16384": 13, - "16777216": 13, - "256": 0, - "262144": 13, - "4096": 13, - "4194304": 13, - "512": 0, - "64": 0, - "65536": 13 - }, - "count": 13, - "sum": 30011 - } - } + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 } - }, - "host": "172.18.0.2:6443", - "verb": "PATCH" + } } }, "metricset": { @@ -388,10 +466,10 @@ }, "kubernetes": { "controllermanager": { - "name": "garbage_collector_graph_changes", + "name": "orphaned_pods_nodes", "workqueue": { "adds": { - "count": 473 + "count": 0 }, "depth": { "count": 0 @@ -425,7 +503,7 @@ }, "kubernetes": { "controllermanager": { - "name": "orphaned_pods_nodes", + "name": "replicationmanager", "workqueue": { "adds": { "count": 0 @@ -462,14 +540,24 @@ }, "kubernetes": { "controllermanager": { - "client": { - "request": { - "count": 85 + "name": "endpoint_slice_mirroring", + "workqueue": { + "adds": { + "count": 4 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 } - }, - "code": "200", - "host": "172.18.0.2:6443", - "method": "PUT" + } } }, "metricset": { @@ -489,7 +577,7 @@ }, "kubernetes": { "controllermanager": { - "name": "garbage_collector_attempt_to_delete", + "name": "horizontalpodautoscaler", "workqueue": { "adds": { "count": 0 @@ -526,10 +614,10 @@ }, "kubernetes": { "controllermanager": { - "name": "endpoint_slice_mirroring", + "name": "ClusterRoleAggregator", "workqueue": { "adds": { - "count": 6 + "count": 19 }, "depth": { "count": 0 @@ -563,10 +651,10 @@ }, "kubernetes": { "controllermanager": { - "name": "ClusterRoleAggregator", + "name": "resourcequota_priority", "workqueue": { "adds": { - "count": 22 + "count": 0 }, "depth": { "count": 0 @@ -600,10 +688,10 @@ }, "kubernetes": { "controllermanager": { - "name": "endpoint", + "name": "statefulset", "workqueue": { "adds": { - "count": 8 + "count": 0 }, "depth": { "count": 0 @@ -612,7 +700,7 @@ "sec": 0 }, "retries": { - "count": 6 + "count": 0 }, "unfinished": { "sec": 0 @@ -637,10 +725,10 @@ }, "kubernetes": { "controllermanager": { - "name": "replicationmanager", + "name": "endpoint", "workqueue": { "adds": { - "count": 0 + "count": 6 }, "depth": { "count": 0 @@ -649,7 +737,7 @@ "sec": 0 }, "retries": { - "count": 0 + "count": 4 }, "unfinished": { "sec": 0 @@ -674,7 +762,7 @@ }, "kubernetes": { "controllermanager": { - "name": "horizontalpodautoscaler", + "name": "ephemeral_volume", "workqueue": { "adds": { "count": 0 @@ -711,10 +799,10 @@ }, "kubernetes": { "controllermanager": { - "name": "resourcequota_priority", + "name": "replicaset", "workqueue": { "adds": { - "count": 0 + "count": 18 }, "depth": { "count": 0 @@ -748,10 +836,10 @@ }, "kubernetes": { "controllermanager": { - "name": "statefulset", + "name": "endpoint_slice", "workqueue": { "adds": { - "count": 0 + "count": 7 }, "depth": { "count": 0 @@ -760,7 +848,7 @@ "sec": 0 }, "retries": { - "count": 0 + "count": 6 }, "unfinished": { "sec": 0 @@ -785,10 +873,10 @@ }, "kubernetes": { "controllermanager": { - "name": "ephemeral_volume", + "name": "garbage_collector_graph_changes", "workqueue": { "adds": { - "count": 0 + "count": 2979 }, "depth": { "count": 0 @@ -859,10 +947,10 @@ }, "kubernetes": { "controllermanager": { - "name": "stale_pod_disruption", + "name": "DynamicCABundle-client-ca-bundle", "workqueue": { "adds": { - "count": 0 + "count": 2 }, "depth": { "count": 0 @@ -896,75 +984,24 @@ }, "kubernetes": { "controllermanager": { - "client": { - "request": { - "duration": { - "us": { - "bucket": { - "+Inf": 208, - "100000": 188, - "1000000": 207, - "15000000": 208, - "2000000": 207, - "25000": 151, - "250000": 196, - "30000000": 208, - "4000000": 208, - "5000": 108, - "500000": 205, - "60000000": 208, - "8000000": 208 - }, - "count": 208, - "sum": 10876092.844000002 - } - }, - "size": { - "bytes": { - "bucket": { - "+Inf": 208, - "1024": 208, - "1048576": 208, - "16384": 208, - "16777216": 208, - "256": 208, - "262144": 208, - "4096": 208, - "4194304": 208, - "512": 208, - "64": 208, - "65536": 208 - }, - "count": 208, - "sum": 0 - } - } + "name": "garbage_collector_attempt_to_delete", + "workqueue": { + "adds": { + "count": 1 }, - "response": { - "size": { - "bytes": { - "bucket": { - "+Inf": 208, - "1024": 169, - "1048576": 208, - "16384": 205, - "16777216": 208, - "256": 85, - "262144": 208, - "4096": 177, - "4194304": 208, - "512": 166, - "64": 21, - "65536": 208 - }, - "count": 208, - "sum": 452159 - } - } + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 } - }, - "host": "172.18.0.2:6443", - "verb": "GET" + } } }, "metricset": { @@ -984,14 +1021,24 @@ }, "kubernetes": { "controllermanager": { - "client": { - "request": { - "count": 220 + "name": "stale_pod_disruption", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 } - }, - "code": "200", - "host": "172.18.0.2:6443", - "method": "GET" + } } }, "metricset": { @@ -1011,10 +1058,10 @@ }, "kubernetes": { "controllermanager": { - "name": "pvprotection", + "name": "daemonset", "workqueue": { "adds": { - "count": 0 + "count": 17 }, "depth": { "count": 0 @@ -1048,22 +1095,42 @@ }, "kubernetes": { "controllermanager": { - "name": "namespace", - "workqueue": { - "adds": { - "count": 0 - }, - "depth": { - "count": 0 + "node": { + "collector": { + "count": 1, + "eviction": { + "count": 0 + }, + "health": { + "pct": 100 + }, + "unhealthy": { + "count": 0 + } + } + }, + "process": { + "cpu": { + "sec": 58 }, - "longestrunning": { - "sec": 0 + "fds": { + "max": { + "count": 1048576 + }, + "open": { + "count": 18 + } }, - "retries": { - "count": 0 + "memory": { + "resident": { + "bytes": 109260800 + }, + "virtual": { + "bytes": 1337397248 + } }, - "unfinished": { - "sec": 0 + "started": { + "sec": 1704894767.11 } } } @@ -1085,21 +1152,14 @@ }, "kubernetes": { "controllermanager": { - "name": "noexec_taint_node", - "workqueue": { - "adds": { - "count": 1 - }, - "depth": { - "count": 0 - }, - "longestrunning": { - "sec": 0 - }, - "unfinished": { - "sec": 0 + "client": { + "request": { + "count": 1125 } - } + }, + "code": "200", + "host": "172.18.0.2:6443", + "method": "PUT" } }, "metricset": { @@ -1119,10 +1179,10 @@ }, "kubernetes": { "controllermanager": { - "name": "DynamicCABundle-client-ca-bundle", + "name": "pvprotection", "workqueue": { "adds": { - "count": 1 + "count": 0 }, "depth": { "count": 0 @@ -1156,10 +1216,10 @@ }, "kubernetes": { "controllermanager": { - "name": "node_lifecycle_controller_pods", + "name": "namespace", "workqueue": { "adds": { - "count": 11 + "count": 0 }, "depth": { "count": 0 @@ -1193,21 +1253,14 @@ }, "kubernetes": { "controllermanager": { - "name": "noexec_taint_pod", - "workqueue": { - "adds": { - "count": 18 - }, - "depth": { - "count": 0 - }, - "longestrunning": { - "sec": 0 - }, - "unfinished": { - "sec": 0 + "client": { + "request": { + "count": 10 } - } + }, + "code": "200", + "host": "172.18.0.2:6443", + "method": "PATCH" } }, "metricset": { @@ -1227,10 +1280,10 @@ }, "kubernetes": { "controllermanager": { - "name": "job", + "name": "noexec_taint_node", "workqueue": { "adds": { - "count": 0 + "count": 1 }, "depth": { "count": 0 @@ -1238,9 +1291,6 @@ "longestrunning": { "sec": 0 }, - "retries": { - "count": 0 - }, "unfinished": { "sec": 0 } @@ -1264,10 +1314,14 @@ }, "kubernetes": { "controllermanager": { - "leader": { - "is_master": true + "client": { + "request": { + "count": 34 + } }, - "name": "kube-controller-manager" + "code": "404", + "host": "172.18.0.2:6443", + "method": "GET" } }, "metricset": { @@ -1287,21 +1341,14 @@ }, "kubernetes": { "controllermanager": { - "name": "node_lifecycle_controller", - "workqueue": { - "adds": { - "count": 4 - }, - "depth": { - "count": 0 - }, - "longestrunning": { - "sec": 0 - }, - "unfinished": { - "sec": 0 + "client": { + "request": { + "count": 3 } - } + }, + "code": "409", + "host": "172.18.0.2:6443", + "method": "PUT" } }, "metricset": { @@ -1321,24 +1368,14 @@ }, "kubernetes": { "controllermanager": { - "name": "pvcprotection", - "workqueue": { - "adds": { - "count": 0 - }, - "depth": { - "count": 0 - }, - "longestrunning": { - "sec": 0 - }, - "retries": { - "count": 0 - }, - "unfinished": { - "sec": 0 + "client": { + "request": { + "count": 1756 } - } + }, + "code": "200", + "host": "172.18.0.2:6443", + "method": "GET" } }, "metricset": { @@ -1358,10 +1395,10 @@ }, "kubernetes": { "controllermanager": { - "name": "DynamicCABundle-csr-controller", + "name": "job", "workqueue": { "adds": { - "count": 7 + "count": 0 }, "depth": { "count": 0 @@ -1395,10 +1432,10 @@ }, "kubernetes": { "controllermanager": { - "name": "disruption_recheck", + "name": "ttlcontroller", "workqueue": { "adds": { - "count": 0 + "count": 12 }, "depth": { "count": 0 @@ -1432,75 +1469,10 @@ }, "kubernetes": { "controllermanager": { - "client": { - "request": { - "duration": { - "us": { - "bucket": { - "+Inf": 94, - "100000": 75, - "1000000": 94, - "15000000": 94, - "2000000": 94, - "25000": 57, - "250000": 78, - "30000000": 94, - "4000000": 94, - "5000": 14, - "500000": 87, - "60000000": 94, - "8000000": 94 - }, - "count": 94, - "sum": 9382712.604000002 - } - }, - "size": { - "bytes": { - "bucket": { - "+Inf": 94, - "1024": 74, - "1048576": 94, - "16384": 94, - "16777216": 94, - "256": 53, - "262144": 94, - "4096": 93, - "4194304": 94, - "512": 70, - "64": 1, - "65536": 94 - }, - "count": 94, - "sum": 53563 - } - } - }, - "response": { - "size": { - "bytes": { - "bucket": { - "+Inf": 94, - "1024": 60, - "1048576": 94, - "16384": 94, - "16777216": 94, - "256": 41, - "262144": 94, - "4096": 90, - "4194304": 94, - "512": 42, - "64": 0, - "65536": 94 - }, - "count": 94, - "sum": 94345 - } - } - } + "leader": { + "is_master": true }, - "host": "172.18.0.2:6443", - "verb": "POST" + "name": "kube-controller-manager" } }, "metricset": { @@ -1520,7 +1492,7 @@ }, "kubernetes": { "controllermanager": { - "name": "volume_expand", + "name": "pvcprotection", "workqueue": { "adds": { "count": 0 @@ -1557,24 +1529,14 @@ }, "kubernetes": { "controllermanager": { - "name": "job_orphan_pod", - "workqueue": { - "adds": { - "count": 0 - }, - "depth": { - "count": 0 - }, - "longestrunning": { - "sec": 0 - }, - "retries": { - "count": 0 - }, - "unfinished": { - "sec": 0 + "client": { + "request": { + "count": 84 } - } + }, + "code": "201", + "host": "172.18.0.2:6443", + "method": "POST" } }, "metricset": { @@ -1594,10 +1556,10 @@ }, "kubernetes": { "controllermanager": { - "name": "daemonset", + "name": "disruption_recheck", "workqueue": { "adds": { - "count": 25 + "count": 0 }, "depth": { "count": 0 @@ -1606,7 +1568,7 @@ "sec": 0 }, "retries": { - "count": 4 + "count": 0 }, "unfinished": { "sec": 0 @@ -1631,10 +1593,10 @@ }, "kubernetes": { "controllermanager": { - "name": "DynamicServingCertificateController", + "name": "volume_expand", "workqueue": { "adds": { - "count": 2 + "count": 0 }, "depth": { "count": 0 @@ -1668,10 +1630,10 @@ }, "kubernetes": { "controllermanager": { - "name": "bootstrap_signer_queue", + "name": "job_orphan_pod", "workqueue": { "adds": { - "count": 2 + "count": 0 }, "depth": { "count": 0 @@ -1705,10 +1667,10 @@ }, "kubernetes": { "controllermanager": { - "name": "service", + "name": "deployment", "workqueue": { "adds": { - "count": 0 + "count": 20 }, "depth": { "count": 0 @@ -1717,7 +1679,7 @@ "sec": 0 }, "retries": { - "count": 0 + "count": 9 }, "unfinished": { "sec": 0 @@ -1742,24 +1704,75 @@ }, "kubernetes": { "controllermanager": { - "name": "endpoint_slice", - "workqueue": { - "adds": { - "count": 10 - }, - "depth": { - "count": 0 - }, - "longestrunning": { - "sec": 0 - }, - "retries": { - "count": 11 + "client": { + "request": { + "duration": { + "us": { + "bucket": { + "+Inf": 10, + "100000": 6, + "1000000": 10, + "15000000": 10, + "2000000": 10, + "25000": 4, + "250000": 6, + "30000000": 10, + "4000000": 10, + "5000": 0, + "500000": 6, + "60000000": 10, + "8000000": 10 + }, + "count": 10, + "sum": 3209190.2879999997 + } + }, + "size": { + "bytes": { + "bucket": { + "+Inf": 10, + "1024": 4, + "1048576": 10, + "16384": 10, + "16777216": 10, + "256": 3, + "262144": 10, + "4096": 10, + "4194304": 10, + "512": 4, + "64": 1, + "65536": 10 + }, + "count": 10, + "sum": 18531 + } + } }, - "unfinished": { - "sec": 0 + "response": { + "size": { + "bytes": { + "bucket": { + "+Inf": 10, + "1024": 1, + "1048576": 10, + "16384": 10, + "16777216": 10, + "256": 0, + "262144": 10, + "4096": 10, + "4194304": 10, + "512": 0, + "64": 0, + "65536": 10 + }, + "count": 10, + "sum": 28839 + } + } } - } + }, + "host": "172.18.0.2:6443", + "verb": "PATCH" } }, "metricset": { @@ -1779,10 +1792,10 @@ }, "kubernetes": { "controllermanager": { - "name": "certificate", + "name": "bootstrap_signer_queue", "workqueue": { "adds": { - "count": 0 + "count": 2 }, "depth": { "count": 0 @@ -1816,7 +1829,7 @@ }, "kubernetes": { "controllermanager": { - "name": "disruption", + "name": "service", "workqueue": { "adds": { "count": 0 @@ -1853,34 +1866,7 @@ }, "kubernetes": { "controllermanager": { - "client": { - "request": { - "count": 4 - } - }, - "code": "403", - "host": "172.18.0.2:6443", - "method": "POST" - } - }, - "metricset": { - "name": "controllermanager", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "kubernetes" - } - }, - { - "event": { - "dataset": "kubernetes.controllermanager", - "duration": 115000, - "module": "kubernetes" - }, - "kubernetes": { - "controllermanager": { - "name": "volumes", + "name": "certificate", "workqueue": { "adds": { "count": 0 @@ -1891,6 +1877,9 @@ "longestrunning": { "sec": 0 }, + "retries": { + "count": 0 + }, "unfinished": { "sec": 0 } @@ -1914,10 +1903,10 @@ }, "kubernetes": { "controllermanager": { - "name": "ttlcontroller", + "name": "disruption", "workqueue": { "adds": { - "count": 4 + "count": 0 }, "depth": { "count": 0 @@ -1951,10 +1940,10 @@ }, "kubernetes": { "controllermanager": { - "name": "serviceaccount_tokens_service", + "name": "node_lifecycle_controller", "workqueue": { "adds": { - "count": 43 + "count": 12 }, "depth": { "count": 0 @@ -1962,9 +1951,6 @@ "longestrunning": { "sec": 0 }, - "retries": { - "count": 0 - }, "unfinished": { "sec": 0 } @@ -1988,7 +1974,7 @@ }, "kubernetes": { "controllermanager": { - "name": "resource_quota_controller_resource_changes", + "name": "volumes", "workqueue": { "adds": { "count": 0 @@ -1999,9 +1985,6 @@ "longestrunning": { "sec": 0 }, - "retries": { - "count": 0 - }, "unfinished": { "sec": 0 } @@ -2025,10 +2008,10 @@ }, "kubernetes": { "controllermanager": { - "name": "replicaset", + "name": "DynamicCABundle-request-header", "workqueue": { "adds": { - "count": 23 + "count": 2 }, "depth": { "count": 0 @@ -2062,10 +2045,10 @@ }, "kubernetes": { "controllermanager": { - "name": "garbage_collector_attempt_to_orphan", + "name": "serviceaccount_tokens_service", "workqueue": { "adds": { - "count": 0 + "count": 43 }, "depth": { "count": 0 @@ -2099,10 +2082,10 @@ }, "kubernetes": { "controllermanager": { - "name": "token_cleaner", + "name": "resource_quota_controller_resource_changes", "workqueue": { "adds": { - "count": 1 + "count": 0 }, "depth": { "count": 0 @@ -2111,7 +2094,7 @@ "sec": 0 }, "retries": { - "count": 1 + "count": 0 }, "unfinished": { "sec": 0 @@ -2136,10 +2119,10 @@ }, "kubernetes": { "controllermanager": { - "name": "DynamicCABundle-request-header", + "name": "garbage_collector_attempt_to_orphan", "workqueue": { "adds": { - "count": 1 + "count": 0 }, "depth": { "count": 0 @@ -2173,67 +2156,10 @@ }, "kubernetes": { "controllermanager": { - "node": { - "collector": { - "count": 1, - "eviction": { - "count": 0 - }, - "health": { - "pct": 100 - }, - "unhealthy": { - "count": 0 - } - } - }, - "process": { - "cpu": { - "sec": 4 - }, - "fds": { - "max": { - "count": 1048576 - }, - "open": { - "count": 18 - } - }, - "memory": { - "resident": { - "bytes": 99876864 - }, - "virtual": { - "bytes": 807755776 - } - }, - "started": { - "sec": 1698752384.51 - } - } - } - }, - "metricset": { - "name": "controllermanager", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "kubernetes" - } - }, - { - "event": { - "dataset": "kubernetes.controllermanager", - "duration": 115000, - "module": "kubernetes" - }, - "kubernetes": { - "controllermanager": { - "name": "claims", + "name": "token_cleaner", "workqueue": { "adds": { - "count": 0 + "count": 1 }, "depth": { "count": 0 @@ -2241,6 +2167,9 @@ "longestrunning": { "sec": 0 }, + "retries": { + "count": 1 + }, "unfinished": { "sec": 0 } @@ -2264,10 +2193,10 @@ }, "kubernetes": { "controllermanager": { - "name": "deployment", + "name": "claims", "workqueue": { "adds": { - "count": 21 + "count": 0 }, "depth": { "count": 0 @@ -2275,9 +2204,6 @@ "longestrunning": { "sec": 0 }, - "retries": { - "count": 12 - }, "unfinished": { "sec": 0 } @@ -2306,42 +2232,42 @@ "duration": { "us": { "bucket": { - "+Inf": 90, - "100000": 89, - "1000000": 90, - "15000000": 90, - "2000000": 90, - "25000": 86, - "250000": 89, - "30000000": 90, - "4000000": 90, - "5000": 11, - "500000": 89, - "60000000": 90, - "8000000": 90 + "+Inf": 1128, + "100000": 1125, + "1000000": 1128, + "15000000": 1128, + "2000000": 1128, + "25000": 1114, + "250000": 1127, + "30000000": 1128, + "4000000": 1128, + "5000": 1, + "500000": 1127, + "60000000": 1128, + "8000000": 1128 }, - "count": 90, - "sum": 1593709.5990000002 + "count": 1128, + "sum": 15549394.524000004 } }, "size": { "bytes": { "bucket": { - "+Inf": 90, - "1024": 51, - "1048576": 90, - "16384": 90, - "16777216": 90, + "+Inf": 1128, + "1024": 1104, + "1048576": 1128, + "16384": 1128, + "16777216": 1128, "256": 0, - "262144": 90, - "4096": 80, - "4194304": 90, - "512": 46, + "262144": 1128, + "4096": 1128, + "4194304": 1128, + "512": 1099, "64": 0, - "65536": 90 + "65536": 1128 }, - "count": 90, - "sum": 175260 + "count": 1128, + "sum": 534678 } } }, @@ -2349,21 +2275,21 @@ "size": { "bytes": { "bucket": { - "+Inf": 90, - "1024": 55, - "1048576": 90, - "16384": 90, - "16777216": 90, - "256": 2, - "262144": 90, - "4096": 78, - "4194304": 90, - "512": 51, + "+Inf": 1128, + "1024": 1103, + "1048576": 1128, + "16384": 1128, + "16777216": 1128, + "256": 1, + "262144": 1128, + "4096": 1123, + "4194304": 1128, + "512": 1101, "64": 0, - "65536": 90 + "65536": 1128 }, - "count": 90, - "sum": 164099 + "count": 1128, + "sum": 578751 } } } @@ -2389,14 +2315,24 @@ }, "kubernetes": { "controllermanager": { - "client": { - "request": { - "count": 13 + "name": "node_lifecycle_controller_pods", + "workqueue": { + "adds": { + "count": 10 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 } - }, - "code": "200", - "host": "172.18.0.2:6443", - "method": "PATCH" + } } }, "metricset": { @@ -2481,5 +2417,42 @@ "address": "127.0.0.1:55555", "type": "kubernetes" } + }, + { + "event": { + "dataset": "kubernetes.controllermanager", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "controllermanager": { + "name": "DynamicCABundle-csr-controller", + "workqueue": { + "adds": { + "count": 8 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + } + }, + "metricset": { + "name": "controllermanager", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } } ] \ No newline at end of file diff --git a/metricbeat/module/kubernetes/controllermanager/controllermanager_test.go b/metricbeat/module/kubernetes/controllermanager/controllermanager_test.go index b3c9f47d8d5f..4ede6653a9d0 100644 --- a/metricbeat/module/kubernetes/controllermanager/controllermanager_test.go +++ b/metricbeat/module/kubernetes/controllermanager/controllermanager_test.go @@ -32,6 +32,7 @@ var files = []string{ "./_meta/test/metrics.1.26", "./_meta/test/metrics.1.27", "./_meta/test/metrics.1.28", + "./_meta/test/metrics.1.29", } func TestEventMapping(t *testing.T) { diff --git a/metricbeat/module/kubernetes/proxy/_meta/test/metrics.1.29 b/metricbeat/module/kubernetes/proxy/_meta/test/metrics.1.29 new file mode 100644 index 000000000000..d3443e93df68 --- /dev/null +++ b/metricbeat/module/kubernetes/proxy/_meta/test/metrics.1.29 @@ -0,0 +1,938 @@ +# HELP aggregator_discovery_aggregation_count_total [ALPHA] Counter of number of times discovery was aggregated +# TYPE aggregator_discovery_aggregation_count_total counter +aggregator_discovery_aggregation_count_total 0 +# HELP apiserver_audit_event_total [ALPHA] Counter of audit events generated and sent to the audit backend. +# TYPE apiserver_audit_event_total counter +apiserver_audit_event_total 0 +# HELP apiserver_audit_requests_rejected_total [ALPHA] Counter of apiserver requests rejected due to an error in audit logging backend. +# TYPE apiserver_audit_requests_rejected_total counter +apiserver_audit_requests_rejected_total 0 +# HELP apiserver_client_certificate_expiration_seconds [ALPHA] Distribution of the remaining lifetime on the certificate used to authenticate a request. +# TYPE apiserver_client_certificate_expiration_seconds histogram +apiserver_client_certificate_expiration_seconds_bucket{le="0"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="1800"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="3600"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="7200"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="21600"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="43200"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="86400"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="172800"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="345600"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="604800"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="2.592e+06"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="7.776e+06"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="1.5552e+07"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="3.1104e+07"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="+Inf"} 0 +apiserver_client_certificate_expiration_seconds_sum 0 +apiserver_client_certificate_expiration_seconds_count 0 +# HELP apiserver_envelope_encryption_dek_cache_fill_percent [ALPHA] Percent of the cache slots currently occupied by cached DEKs. +# TYPE apiserver_envelope_encryption_dek_cache_fill_percent gauge +apiserver_envelope_encryption_dek_cache_fill_percent 0 +# HELP apiserver_storage_data_key_generation_duration_seconds [ALPHA] Latencies in seconds of data encryption key(DEK) generation operations. +# TYPE apiserver_storage_data_key_generation_duration_seconds histogram +apiserver_storage_data_key_generation_duration_seconds_bucket{le="5e-06"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="1e-05"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="2e-05"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="4e-05"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="8e-05"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.00016"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.00032"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.00064"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.00128"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.00256"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.00512"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.01024"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.02048"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.04096"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="+Inf"} 0 +apiserver_storage_data_key_generation_duration_seconds_sum 0 +apiserver_storage_data_key_generation_duration_seconds_count 0 +# HELP apiserver_storage_data_key_generation_failures_total [ALPHA] Total number of failed data encryption key(DEK) generation operations. +# TYPE apiserver_storage_data_key_generation_failures_total counter +apiserver_storage_data_key_generation_failures_total 0 +# HELP apiserver_storage_envelope_transformation_cache_misses_total [ALPHA] Total number of cache misses while accessing key decryption key(KEK). +# TYPE apiserver_storage_envelope_transformation_cache_misses_total counter +apiserver_storage_envelope_transformation_cache_misses_total 0 +# HELP apiserver_webhooks_x509_insecure_sha1_total [ALPHA] Counts the number of requests to servers with insecure SHA1 signatures in their serving certificate OR the number of connection failures due to the insecure SHA1 signatures (either/or, based on the runtime environment) +# TYPE apiserver_webhooks_x509_insecure_sha1_total counter +apiserver_webhooks_x509_insecure_sha1_total 0 +# HELP apiserver_webhooks_x509_missing_san_total [ALPHA] Counts the number of requests to servers missing SAN extension in their serving certificate OR the number of connection failures due to the lack of x509 certificate SAN extension missing (either/or, based on the runtime environment) +# TYPE apiserver_webhooks_x509_missing_san_total counter +apiserver_webhooks_x509_missing_san_total 0 +# HELP cardinality_enforcement_unexpected_categorizations_total [ALPHA] The count of unexpected categorizations during cardinality enforcement. +# TYPE cardinality_enforcement_unexpected_categorizations_total counter +cardinality_enforcement_unexpected_categorizations_total 0 +# HELP disabled_metrics_total [BETA] The count of disabled metrics. +# TYPE disabled_metrics_total counter +disabled_metrics_total 0 +# HELP go_cgo_go_to_c_calls_calls_total Count of calls made from Go to C by the current process. +# TYPE go_cgo_go_to_c_calls_calls_total counter +go_cgo_go_to_c_calls_calls_total 0 +# HELP go_cpu_classes_gc_mark_assist_cpu_seconds_total Estimated total CPU time goroutines spent performing GC tasks to assist the GC and prevent it from falling behind the application. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +# TYPE go_cpu_classes_gc_mark_assist_cpu_seconds_total counter +go_cpu_classes_gc_mark_assist_cpu_seconds_total 0.003119684 +# HELP go_cpu_classes_gc_mark_dedicated_cpu_seconds_total Estimated total CPU time spent performing GC tasks on processors (as defined by GOMAXPROCS) dedicated to those tasks. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +# TYPE go_cpu_classes_gc_mark_dedicated_cpu_seconds_total counter +go_cpu_classes_gc_mark_dedicated_cpu_seconds_total 0.162652665 +# HELP go_cpu_classes_gc_mark_idle_cpu_seconds_total Estimated total CPU time spent performing GC tasks on spare CPU resources that the Go scheduler could not otherwise find a use for. This should be subtracted from the total GC CPU time to obtain a measure of compulsory GC CPU time. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +# TYPE go_cpu_classes_gc_mark_idle_cpu_seconds_total counter +go_cpu_classes_gc_mark_idle_cpu_seconds_total 0.016001078 +# HELP go_cpu_classes_gc_pause_cpu_seconds_total Estimated total CPU time spent with the application paused by the GC. Even if only one thread is running during the pause, this is computed as GOMAXPROCS times the pause latency because nothing else can be executing. This is the exact sum of samples in /gc/pause:seconds if each sample is multiplied by GOMAXPROCS at the time it is taken. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +# TYPE go_cpu_classes_gc_pause_cpu_seconds_total counter +go_cpu_classes_gc_pause_cpu_seconds_total 0.070784224 +# HELP go_cpu_classes_gc_total_cpu_seconds_total Estimated total CPU time spent performing GC tasks. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. Sum of all metrics in /cpu/classes/gc. +# TYPE go_cpu_classes_gc_total_cpu_seconds_total counter +go_cpu_classes_gc_total_cpu_seconds_total 0.252557651 +# HELP go_cpu_classes_idle_cpu_seconds_total Estimated total available CPU time not spent executing any Go or Go runtime code. In other words, the part of /cpu/classes/total:cpu-seconds that was unused. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +# TYPE go_cpu_classes_idle_cpu_seconds_total counter +go_cpu_classes_idle_cpu_seconds_total 37920.812157493 +# HELP go_cpu_classes_scavenge_assist_cpu_seconds_total Estimated total CPU time spent returning unused memory to the underlying platform in response eagerly in response to memory pressure. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +# TYPE go_cpu_classes_scavenge_assist_cpu_seconds_total counter +go_cpu_classes_scavenge_assist_cpu_seconds_total 2.97e-07 +# HELP go_cpu_classes_scavenge_background_cpu_seconds_total Estimated total CPU time spent performing background tasks to return unused memory to the underlying platform. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +# TYPE go_cpu_classes_scavenge_background_cpu_seconds_total counter +go_cpu_classes_scavenge_background_cpu_seconds_total 0.000712602 +# HELP go_cpu_classes_scavenge_total_cpu_seconds_total Estimated total CPU time spent performing tasks that return unused memory to the underlying platform. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. Sum of all metrics in /cpu/classes/scavenge. +# TYPE go_cpu_classes_scavenge_total_cpu_seconds_total counter +go_cpu_classes_scavenge_total_cpu_seconds_total 0.000712899 +# HELP go_cpu_classes_total_cpu_seconds_total Estimated total available CPU time for user Go code or the Go runtime, as defined by GOMAXPROCS. In other words, GOMAXPROCS integrated over the wall-clock duration this process has been executing for. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. Sum of all metrics in /cpu/classes. +# TYPE go_cpu_classes_total_cpu_seconds_total counter +go_cpu_classes_total_cpu_seconds_total 37922.372021296 +# HELP go_cpu_classes_user_cpu_seconds_total Estimated total CPU time spent running user Go code. This may also include some small amount of time spent in the Go runtime. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +# TYPE go_cpu_classes_user_cpu_seconds_total counter +go_cpu_classes_user_cpu_seconds_total 1.306593253 +# HELP go_gc_cycles_automatic_gc_cycles_total Count of completed GC cycles generated by the Go runtime. +# TYPE go_gc_cycles_automatic_gc_cycles_total counter +go_gc_cycles_automatic_gc_cycles_total 25 +# HELP go_gc_cycles_forced_gc_cycles_total Count of completed GC cycles forced by the application. +# TYPE go_gc_cycles_forced_gc_cycles_total counter +go_gc_cycles_forced_gc_cycles_total 0 +# HELP go_gc_cycles_total_gc_cycles_total Count of all completed GC cycles. +# TYPE go_gc_cycles_total_gc_cycles_total counter +go_gc_cycles_total_gc_cycles_total 25 +# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 5.8243e-05 +go_gc_duration_seconds{quantile="0.25"} 8.9039e-05 +go_gc_duration_seconds{quantile="0.5"} 0.000126473 +go_gc_duration_seconds{quantile="0.75"} 0.000239056 +go_gc_duration_seconds{quantile="1"} 0.000538357 +go_gc_duration_seconds_sum 0.004424014 +go_gc_duration_seconds_count 25 +# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. +# TYPE go_gc_gogc_percent gauge +go_gc_gogc_percent 100 +# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. +# TYPE go_gc_gomemlimit_bytes gauge +go_gc_gomemlimit_bytes 9.223372036854776e+18 +# HELP go_gc_heap_allocs_by_size_bytes Distribution of heap allocations by approximate size. Bucket counts increase monotonically. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. +# TYPE go_gc_heap_allocs_by_size_bytes histogram +go_gc_heap_allocs_by_size_bytes_bucket{le="8.999999999999998"} 7807 +go_gc_heap_allocs_by_size_bytes_bucket{le="24.999999999999996"} 66656 +go_gc_heap_allocs_by_size_bytes_bucket{le="64.99999999999999"} 96713 +go_gc_heap_allocs_by_size_bytes_bucket{le="144.99999999999997"} 131856 +go_gc_heap_allocs_by_size_bytes_bucket{le="320.99999999999994"} 137866 +go_gc_heap_allocs_by_size_bytes_bucket{le="704.9999999999999"} 140885 +go_gc_heap_allocs_by_size_bytes_bucket{le="1536.9999999999998"} 142512 +go_gc_heap_allocs_by_size_bytes_bucket{le="3200.9999999999995"} 143108 +go_gc_heap_allocs_by_size_bytes_bucket{le="6528.999999999999"} 143479 +go_gc_heap_allocs_by_size_bytes_bucket{le="13568.999999999998"} 143591 +go_gc_heap_allocs_by_size_bytes_bucket{le="27264.999999999996"} 143685 +go_gc_heap_allocs_by_size_bytes_bucket{le="+Inf"} 143764 +go_gc_heap_allocs_by_size_bytes_sum 2.6836248e+07 +go_gc_heap_allocs_by_size_bytes_count 143764 +# HELP go_gc_heap_allocs_bytes_total Cumulative sum of memory allocated to the heap by the application. +# TYPE go_gc_heap_allocs_bytes_total counter +go_gc_heap_allocs_bytes_total 2.6836248e+07 +# HELP go_gc_heap_allocs_objects_total Cumulative count of heap allocations triggered by the application. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. +# TYPE go_gc_heap_allocs_objects_total counter +go_gc_heap_allocs_objects_total 143764 +# HELP go_gc_heap_frees_by_size_bytes Distribution of freed heap allocations by approximate size. Bucket counts increase monotonically. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. +# TYPE go_gc_heap_frees_by_size_bytes histogram +go_gc_heap_frees_by_size_bytes_bucket{le="8.999999999999998"} 5771 +go_gc_heap_frees_by_size_bytes_bucket{le="24.999999999999996"} 56577 +go_gc_heap_frees_by_size_bytes_bucket{le="64.99999999999999"} 79311 +go_gc_heap_frees_by_size_bytes_bucket{le="144.99999999999997"} 111096 +go_gc_heap_frees_by_size_bytes_bucket{le="320.99999999999994"} 115045 +go_gc_heap_frees_by_size_bytes_bucket{le="704.9999999999999"} 117274 +go_gc_heap_frees_by_size_bytes_bucket{le="1536.9999999999998"} 118654 +go_gc_heap_frees_by_size_bytes_bucket{le="3200.9999999999995"} 119079 +go_gc_heap_frees_by_size_bytes_bucket{le="6528.999999999999"} 119360 +go_gc_heap_frees_by_size_bytes_bucket{le="13568.999999999998"} 119418 +go_gc_heap_frees_by_size_bytes_bucket{le="27264.999999999996"} 119489 +go_gc_heap_frees_by_size_bytes_bucket{le="+Inf"} 119540 +go_gc_heap_frees_by_size_bytes_sum 1.8566944e+07 +go_gc_heap_frees_by_size_bytes_count 119540 +# HELP go_gc_heap_frees_bytes_total Cumulative sum of heap memory freed by the garbage collector. +# TYPE go_gc_heap_frees_bytes_total counter +go_gc_heap_frees_bytes_total 1.8566944e+07 +# HELP go_gc_heap_frees_objects_total Cumulative count of heap allocations whose storage was freed by the garbage collector. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. +# TYPE go_gc_heap_frees_objects_total counter +go_gc_heap_frees_objects_total 119540 +# HELP go_gc_heap_goal_bytes Heap size target for the end of the GC cycle. +# TYPE go_gc_heap_goal_bytes gauge +go_gc_heap_goal_bytes 1.7035896e+07 +# HELP go_gc_heap_live_bytes Heap memory occupied by live objects that were marked by the previous GC. +# TYPE go_gc_heap_live_bytes gauge +go_gc_heap_live_bytes 8.239392e+06 +# HELP go_gc_heap_objects_objects Number of objects, live or unswept, occupying heap memory. +# TYPE go_gc_heap_objects_objects gauge +go_gc_heap_objects_objects 24224 +# HELP go_gc_heap_tiny_allocs_objects_total Count of small allocations that are packed together into blocks. These allocations are counted separately from other allocations because each individual allocation is not tracked by the runtime, only their block. Each block is already accounted for in allocs-by-size and frees-by-size. +# TYPE go_gc_heap_tiny_allocs_objects_total counter +go_gc_heap_tiny_allocs_objects_total 16640 +# HELP go_gc_limiter_last_enabled_gc_cycle GC cycle the last time the GC CPU limiter was enabled. This metric is useful for diagnosing the root cause of an out-of-memory error, because the limiter trades memory for CPU time when the GC's CPU time gets too high. This is most likely to occur with use of SetMemoryLimit. The first GC cycle is cycle 1, so a value of 0 indicates that it was never enabled. +# TYPE go_gc_limiter_last_enabled_gc_cycle gauge +go_gc_limiter_last_enabled_gc_cycle 0 +# HELP go_gc_pauses_seconds Distribution of individual GC-related stop-the-world pause latencies. Bucket counts increase monotonically. +# TYPE go_gc_pauses_seconds histogram +go_gc_pauses_seconds_bucket{le="6.399999999999999e-08"} 0 +go_gc_pauses_seconds_bucket{le="6.399999999999999e-07"} 0 +go_gc_pauses_seconds_bucket{le="7.167999999999999e-06"} 1 +go_gc_pauses_seconds_bucket{le="8.191999999999999e-05"} 34 +go_gc_pauses_seconds_bucket{le="0.0009175039999999999"} 50 +go_gc_pauses_seconds_bucket{le="0.010485759999999998"} 50 +go_gc_pauses_seconds_bucket{le="0.11744051199999998"} 50 +go_gc_pauses_seconds_bucket{le="+Inf"} 50 +go_gc_pauses_seconds_sum 0.0015479040000000001 +go_gc_pauses_seconds_count 50 +# HELP go_gc_scan_globals_bytes The total amount of global variable space that is scannable. +# TYPE go_gc_scan_globals_bytes gauge +go_gc_scan_globals_bytes 440104 +# HELP go_gc_scan_heap_bytes The total amount of heap space that is scannable. +# TYPE go_gc_scan_heap_bytes gauge +go_gc_scan_heap_bytes 3.58176e+06 +# HELP go_gc_scan_stack_bytes The number of bytes of stack that were scanned last GC cycle. +# TYPE go_gc_scan_stack_bytes gauge +go_gc_scan_stack_bytes 117008 +# HELP go_gc_scan_total_bytes The total amount space that is scannable. Sum of all metrics in /gc/scan. +# TYPE go_gc_scan_total_bytes gauge +go_gc_scan_total_bytes 4.138872e+06 +# HELP go_gc_stack_starting_size_bytes The stack size of new goroutines. +# TYPE go_gc_stack_starting_size_bytes gauge +go_gc_stack_starting_size_bytes 4096 +# HELP go_godebug_non_default_behavior_execerrdot_events_total The number of non-default behaviors executed by the os/exec package due to a non-default GODEBUG=execerrdot=... setting. +# TYPE go_godebug_non_default_behavior_execerrdot_events_total counter +go_godebug_non_default_behavior_execerrdot_events_total 0 +# HELP go_godebug_non_default_behavior_gocachehash_events_total The number of non-default behaviors executed by the cmd/go package due to a non-default GODEBUG=gocachehash=... setting. +# TYPE go_godebug_non_default_behavior_gocachehash_events_total counter +go_godebug_non_default_behavior_gocachehash_events_total 0 +# HELP go_godebug_non_default_behavior_gocachetest_events_total The number of non-default behaviors executed by the cmd/go package due to a non-default GODEBUG=gocachetest=... setting. +# TYPE go_godebug_non_default_behavior_gocachetest_events_total counter +go_godebug_non_default_behavior_gocachetest_events_total 0 +# HELP go_godebug_non_default_behavior_gocacheverify_events_total The number of non-default behaviors executed by the cmd/go package due to a non-default GODEBUG=gocacheverify=... setting. +# TYPE go_godebug_non_default_behavior_gocacheverify_events_total counter +go_godebug_non_default_behavior_gocacheverify_events_total 0 +# HELP go_godebug_non_default_behavior_http2client_events_total The number of non-default behaviors executed by the net/http package due to a non-default GODEBUG=http2client=... setting. +# TYPE go_godebug_non_default_behavior_http2client_events_total counter +go_godebug_non_default_behavior_http2client_events_total 0 +# HELP go_godebug_non_default_behavior_http2server_events_total The number of non-default behaviors executed by the net/http package due to a non-default GODEBUG=http2server=... setting. +# TYPE go_godebug_non_default_behavior_http2server_events_total counter +go_godebug_non_default_behavior_http2server_events_total 0 +# HELP go_godebug_non_default_behavior_installgoroot_events_total The number of non-default behaviors executed by the go/build package due to a non-default GODEBUG=installgoroot=... setting. +# TYPE go_godebug_non_default_behavior_installgoroot_events_total counter +go_godebug_non_default_behavior_installgoroot_events_total 0 +# HELP go_godebug_non_default_behavior_jstmpllitinterp_events_total The number of non-default behaviors executed by the html/template package due to a non-default GODEBUG=jstmpllitinterp=... setting. +# TYPE go_godebug_non_default_behavior_jstmpllitinterp_events_total counter +go_godebug_non_default_behavior_jstmpllitinterp_events_total 0 +# HELP go_godebug_non_default_behavior_multipartmaxheaders_events_total The number of non-default behaviors executed by the mime/multipart package due to a non-default GODEBUG=multipartmaxheaders=... setting. +# TYPE go_godebug_non_default_behavior_multipartmaxheaders_events_total counter +go_godebug_non_default_behavior_multipartmaxheaders_events_total 0 +# HELP go_godebug_non_default_behavior_multipartmaxparts_events_total The number of non-default behaviors executed by the mime/multipart package due to a non-default GODEBUG=multipartmaxparts=... setting. +# TYPE go_godebug_non_default_behavior_multipartmaxparts_events_total counter +go_godebug_non_default_behavior_multipartmaxparts_events_total 0 +# HELP go_godebug_non_default_behavior_multipathtcp_events_total The number of non-default behaviors executed by the net package due to a non-default GODEBUG=multipathtcp=... setting. +# TYPE go_godebug_non_default_behavior_multipathtcp_events_total counter +go_godebug_non_default_behavior_multipathtcp_events_total 0 +# HELP go_godebug_non_default_behavior_panicnil_events_total The number of non-default behaviors executed by the runtime package due to a non-default GODEBUG=panicnil=... setting. +# TYPE go_godebug_non_default_behavior_panicnil_events_total counter +go_godebug_non_default_behavior_panicnil_events_total 0 +# HELP go_godebug_non_default_behavior_randautoseed_events_total The number of non-default behaviors executed by the math/rand package due to a non-default GODEBUG=randautoseed=... setting. +# TYPE go_godebug_non_default_behavior_randautoseed_events_total counter +go_godebug_non_default_behavior_randautoseed_events_total 0 +# HELP go_godebug_non_default_behavior_tarinsecurepath_events_total The number of non-default behaviors executed by the archive/tar package due to a non-default GODEBUG=tarinsecurepath=... setting. +# TYPE go_godebug_non_default_behavior_tarinsecurepath_events_total counter +go_godebug_non_default_behavior_tarinsecurepath_events_total 0 +# HELP go_godebug_non_default_behavior_tlsmaxrsasize_events_total The number of non-default behaviors executed by the crypto/tls package due to a non-default GODEBUG=tlsmaxrsasize=... setting. +# TYPE go_godebug_non_default_behavior_tlsmaxrsasize_events_total counter +go_godebug_non_default_behavior_tlsmaxrsasize_events_total 0 +# HELP go_godebug_non_default_behavior_x509sha1_events_total The number of non-default behaviors executed by the crypto/x509 package due to a non-default GODEBUG=x509sha1=... setting. +# TYPE go_godebug_non_default_behavior_x509sha1_events_total counter +go_godebug_non_default_behavior_x509sha1_events_total 0 +# HELP go_godebug_non_default_behavior_x509usefallbackroots_events_total The number of non-default behaviors executed by the crypto/x509 package due to a non-default GODEBUG=x509usefallbackroots=... setting. +# TYPE go_godebug_non_default_behavior_x509usefallbackroots_events_total counter +go_godebug_non_default_behavior_x509usefallbackroots_events_total 0 +# HELP go_godebug_non_default_behavior_zipinsecurepath_events_total The number of non-default behaviors executed by the archive/zip package due to a non-default GODEBUG=zipinsecurepath=... setting. +# TYPE go_godebug_non_default_behavior_zipinsecurepath_events_total counter +go_godebug_non_default_behavior_zipinsecurepath_events_total 0 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 51 +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +go_info{version="go1.21.5"} 1 +# HELP go_memory_classes_heap_free_bytes Memory that is completely free and eligible to be returned to the underlying system, but has not been. This metric is the runtime's estimate of free address space that is backed by physical memory. +# TYPE go_memory_classes_heap_free_bytes gauge +go_memory_classes_heap_free_bytes 5.3248e+06 +# HELP go_memory_classes_heap_objects_bytes Memory occupied by live objects and dead objects that have not yet been marked free by the garbage collector. +# TYPE go_memory_classes_heap_objects_bytes gauge +go_memory_classes_heap_objects_bytes 8.269304e+06 +# HELP go_memory_classes_heap_released_bytes Memory that is completely free and has been returned to the underlying system. This metric is the runtime's estimate of free address space that is still mapped into the process, but is not backed by physical memory. +# TYPE go_memory_classes_heap_released_bytes gauge +go_memory_classes_heap_released_bytes 2.015232e+06 +# HELP go_memory_classes_heap_stacks_bytes Memory allocated from the heap that is reserved for stack space, whether or not it is currently in-use. Currently, this represents all stack memory for goroutines. It also includes all OS thread stacks in non-cgo programs. Note that stacks may be allocated differently in the future, and this may change. +# TYPE go_memory_classes_heap_stacks_bytes gauge +go_memory_classes_heap_stacks_bytes 1.835008e+06 +# HELP go_memory_classes_heap_unused_bytes Memory that is reserved for heap objects but is not currently used to hold heap objects. +# TYPE go_memory_classes_heap_unused_bytes gauge +go_memory_classes_heap_unused_bytes 3.527176e+06 +# HELP go_memory_classes_metadata_mcache_free_bytes Memory that is reserved for runtime mcache structures, but not in-use. +# TYPE go_memory_classes_metadata_mcache_free_bytes gauge +go_memory_classes_metadata_mcache_free_bytes 12000 +# HELP go_memory_classes_metadata_mcache_inuse_bytes Memory that is occupied by runtime mcache structures that are currently being used. +# TYPE go_memory_classes_metadata_mcache_inuse_bytes gauge +go_memory_classes_metadata_mcache_inuse_bytes 19200 +# HELP go_memory_classes_metadata_mspan_free_bytes Memory that is reserved for runtime mspan structures, but not in-use. +# TYPE go_memory_classes_metadata_mspan_free_bytes gauge +go_memory_classes_metadata_mspan_free_bytes 23016 +# HELP go_memory_classes_metadata_mspan_inuse_bytes Memory that is occupied by runtime mspan structures that are currently being used. +# TYPE go_memory_classes_metadata_mspan_inuse_bytes gauge +go_memory_classes_metadata_mspan_inuse_bytes 254016 +# HELP go_memory_classes_metadata_other_bytes Memory that is reserved for or used to hold runtime metadata. +# TYPE go_memory_classes_metadata_other_bytes gauge +go_memory_classes_metadata_other_bytes 4.555416e+06 +# HELP go_memory_classes_os_stacks_bytes Stack memory allocated by the underlying operating system. In non-cgo programs this metric is currently zero. This may change in the future.In cgo programs this metric includes OS thread stacks allocated directly from the OS. Currently, this only accounts for one stack in c-shared and c-archive build modes, and other sources of stacks from the OS are not measured. This too may change in the future. +# TYPE go_memory_classes_os_stacks_bytes gauge +go_memory_classes_os_stacks_bytes 0 +# HELP go_memory_classes_other_bytes Memory used by execution trace buffers, structures for debugging the runtime, finalizer and profiler specials, and more. +# TYPE go_memory_classes_other_bytes gauge +go_memory_classes_other_bytes 2.958809e+06 +# HELP go_memory_classes_profiling_buckets_bytes Memory that is used by the stack trace hash map used for profiling. +# TYPE go_memory_classes_profiling_buckets_bytes gauge +go_memory_classes_profiling_buckets_bytes 1.456271e+06 +# HELP go_memory_classes_total_bytes All memory mapped by the Go runtime into the current process as read-write. Note that this does not include memory mapped by code called via cgo or via the syscall package. Sum of all metrics in /memory/classes. +# TYPE go_memory_classes_total_bytes gauge +go_memory_classes_total_bytes 3.0250248e+07 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 8.269304e+06 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 2.6836248e+07 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 1.456271e+06 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total 136180 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes 4.555416e+06 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes 8.269304e+06 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes 7.340032e+06 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes 1.179648e+07 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects 24224 +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes gauge +go_memstats_heap_released_bytes 2.015232e+06 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes 1.9136512e+07 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds 1.7048971623253e+09 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total 0 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total 160404 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes 19200 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes 31200 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes 254016 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 277032 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes 1.7035896e+07 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes 2.958809e+06 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes 1.835008e+06 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes 1.835008e+06 +# HELP go_memstats_sys_bytes Number of bytes obtained from system. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes 3.0250248e+07 +# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. +# TYPE go_sched_gomaxprocs_threads gauge +go_sched_gomaxprocs_threads 16 +# HELP go_sched_goroutines_goroutines Count of live goroutines. +# TYPE go_sched_goroutines_goroutines gauge +go_sched_goroutines_goroutines 51 +# HELP go_sched_latencies_seconds Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running. Bucket counts increase monotonically. +# TYPE go_sched_latencies_seconds histogram +go_sched_latencies_seconds_bucket{le="6.399999999999999e-08"} 1278 +go_sched_latencies_seconds_bucket{le="6.399999999999999e-07"} 1370 +go_sched_latencies_seconds_bucket{le="7.167999999999999e-06"} 1495 +go_sched_latencies_seconds_bucket{le="8.191999999999999e-05"} 1738 +go_sched_latencies_seconds_bucket{le="0.0009175039999999999"} 1803 +go_sched_latencies_seconds_bucket{le="0.010485759999999998"} 1804 +go_sched_latencies_seconds_bucket{le="0.11744051199999998"} 1804 +go_sched_latencies_seconds_bucket{le="+Inf"} 1804 +go_sched_latencies_seconds_sum 0.008070016 +go_sched_latencies_seconds_count 1804 +# HELP go_sync_mutex_wait_total_seconds_total Approximate cumulative time goroutines have spent blocked on a sync.Mutex or sync.RWMutex. This metric is useful for identifying global changes in lock contention. Collect a mutex or block profile using the runtime/pprof package for more detailed contention data. +# TYPE go_sync_mutex_wait_total_seconds_total counter +go_sync_mutex_wait_total_seconds_total 0.000555248 +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +go_threads 18 +# HELP hidden_metrics_total [BETA] The count of hidden metrics. +# TYPE hidden_metrics_total counter +hidden_metrics_total 0 +# HELP kubeproxy_network_programming_duration_seconds [ALPHA] In Cluster Network Programming Latency in seconds +# TYPE kubeproxy_network_programming_duration_seconds histogram +kubeproxy_network_programming_duration_seconds_bucket{le="0"} 0 +kubeproxy_network_programming_duration_seconds_bucket{le="0.25"} 0 +kubeproxy_network_programming_duration_seconds_bucket{le="0.5"} 1 +kubeproxy_network_programming_duration_seconds_bucket{le="1"} 1 +kubeproxy_network_programming_duration_seconds_bucket{le="2"} 1 +kubeproxy_network_programming_duration_seconds_bucket{le="3"} 1 +kubeproxy_network_programming_duration_seconds_bucket{le="4"} 1 +kubeproxy_network_programming_duration_seconds_bucket{le="5"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="6"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="7"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="8"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="9"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="10"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="11"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="12"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="13"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="14"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="15"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="16"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="17"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="18"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="19"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="20"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="21"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="22"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="23"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="24"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="25"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="26"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="27"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="28"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="29"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="30"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="31"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="32"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="33"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="34"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="35"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="36"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="37"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="38"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="39"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="40"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="41"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="42"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="43"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="44"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="45"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="46"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="47"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="48"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="49"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="50"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="51"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="52"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="53"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="54"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="55"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="56"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="57"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="58"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="59"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="60"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="65"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="70"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="75"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="80"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="85"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="90"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="95"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="100"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="105"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="110"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="115"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="120"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="150"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="180"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="210"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="240"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="270"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="300"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="+Inf"} 2 +kubeproxy_network_programming_duration_seconds_sum 4.633884953 +kubeproxy_network_programming_duration_seconds_count 2 +# HELP kubeproxy_sync_full_proxy_rules_duration_seconds [ALPHA] SyncProxyRules latency in seconds for full resyncs +# TYPE kubeproxy_sync_full_proxy_rules_duration_seconds histogram +kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="0.001"} 0 +kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="0.002"} 0 +kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="0.004"} 0 +kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="0.008"} 0 +kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="0.016"} 0 +kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="0.032"} 0 +kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="0.064"} 0 +kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="0.128"} 2 +kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="0.256"} 2 +kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="0.512"} 2 +kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="1.024"} 2 +kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="2.048"} 2 +kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="4.096"} 2 +kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="8.192"} 2 +kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="16.384"} 2 +kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="+Inf"} 2 +kubeproxy_sync_full_proxy_rules_duration_seconds_sum 0.24495503000000002 +kubeproxy_sync_full_proxy_rules_duration_seconds_count 2 +# HELP kubeproxy_sync_partial_proxy_rules_duration_seconds [ALPHA] SyncProxyRules latency in seconds for partial resyncs +# TYPE kubeproxy_sync_partial_proxy_rules_duration_seconds histogram +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.001"} 0 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.002"} 0 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.004"} 0 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.008"} 0 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.016"} 0 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.032"} 1 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.064"} 2 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.128"} 2 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.256"} 2 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.512"} 2 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="1.024"} 2 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="2.048"} 2 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="4.096"} 2 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="8.192"} 2 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="16.384"} 2 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="+Inf"} 2 +kubeproxy_sync_partial_proxy_rules_duration_seconds_sum 0.052629146 +kubeproxy_sync_partial_proxy_rules_duration_seconds_count 2 +# HELP kubeproxy_sync_proxy_rules_duration_seconds [ALPHA] SyncProxyRules latency in seconds +# TYPE kubeproxy_sync_proxy_rules_duration_seconds histogram +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.001"} 0 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.002"} 0 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.004"} 0 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.008"} 0 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.016"} 0 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.032"} 1 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.064"} 2 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.128"} 4 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.256"} 4 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.512"} 4 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="1.024"} 4 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="2.048"} 4 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="4.096"} 4 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="8.192"} 4 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="16.384"} 4 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="+Inf"} 4 +kubeproxy_sync_proxy_rules_duration_seconds_sum 0.297575151 +kubeproxy_sync_proxy_rules_duration_seconds_count 4 +# HELP kubeproxy_sync_proxy_rules_endpoint_changes_pending [ALPHA] Pending proxy rules Endpoint changes +# TYPE kubeproxy_sync_proxy_rules_endpoint_changes_pending gauge +kubeproxy_sync_proxy_rules_endpoint_changes_pending 0 +# HELP kubeproxy_sync_proxy_rules_endpoint_changes_total [ALPHA] Cumulative proxy rules Endpoint changes +# TYPE kubeproxy_sync_proxy_rules_endpoint_changes_total counter +kubeproxy_sync_proxy_rules_endpoint_changes_total 8 +# HELP kubeproxy_sync_proxy_rules_iptables_last [ALPHA] Number of iptables rules written by kube-proxy in last sync +# TYPE kubeproxy_sync_proxy_rules_iptables_last gauge +kubeproxy_sync_proxy_rules_iptables_last{table="filter"} 4 +kubeproxy_sync_proxy_rules_iptables_last{table="nat"} 30 +# HELP kubeproxy_sync_proxy_rules_iptables_partial_restore_failures_total [ALPHA] Cumulative proxy iptables partial restore failures +# TYPE kubeproxy_sync_proxy_rules_iptables_partial_restore_failures_total counter +kubeproxy_sync_proxy_rules_iptables_partial_restore_failures_total 0 +# HELP kubeproxy_sync_proxy_rules_iptables_restore_failures_total [ALPHA] Cumulative proxy iptables restore failures +# TYPE kubeproxy_sync_proxy_rules_iptables_restore_failures_total counter +kubeproxy_sync_proxy_rules_iptables_restore_failures_total 0 +# HELP kubeproxy_sync_proxy_rules_iptables_total [ALPHA] Total number of iptables rules owned by kube-proxy +# TYPE kubeproxy_sync_proxy_rules_iptables_total gauge +kubeproxy_sync_proxy_rules_iptables_total{table="filter"} 4 +kubeproxy_sync_proxy_rules_iptables_total{table="nat"} 34 +# HELP kubeproxy_sync_proxy_rules_last_queued_timestamp_seconds [ALPHA] The last time a sync of proxy rules was queued +# TYPE kubeproxy_sync_proxy_rules_last_queued_timestamp_seconds gauge +kubeproxy_sync_proxy_rules_last_queued_timestamp_seconds 1.704894799314416e+09 +# HELP kubeproxy_sync_proxy_rules_last_timestamp_seconds [ALPHA] The last time proxy rules were successfully synced +# TYPE kubeproxy_sync_proxy_rules_last_timestamp_seconds gauge +kubeproxy_sync_proxy_rules_last_timestamp_seconds 1.704894799332805e+09 +# HELP kubeproxy_sync_proxy_rules_no_local_endpoints_total [ALPHA] Number of services with a Local traffic policy and no endpoints +# TYPE kubeproxy_sync_proxy_rules_no_local_endpoints_total gauge +kubeproxy_sync_proxy_rules_no_local_endpoints_total{traffic_policy="external"} 0 +kubeproxy_sync_proxy_rules_no_local_endpoints_total{traffic_policy="internal"} 0 +# HELP kubeproxy_sync_proxy_rules_service_changes_pending [ALPHA] Pending proxy rules Service changes +# TYPE kubeproxy_sync_proxy_rules_service_changes_pending gauge +kubeproxy_sync_proxy_rules_service_changes_pending 0 +# HELP kubeproxy_sync_proxy_rules_service_changes_total [ALPHA] Cumulative proxy rules Service changes +# TYPE kubeproxy_sync_proxy_rules_service_changes_total counter +kubeproxy_sync_proxy_rules_service_changes_total 12 +# HELP kubernetes_build_info [ALPHA] A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running. +# TYPE kubernetes_build_info gauge +kubernetes_build_info{build_date="2023-12-14T19:18:17Z",compiler="gc",git_commit="3f7a50f38688eb332e2a1b013678c6435d539ae6",git_tree_state="clean",git_version="v1.29.0",go_version="go1.21.5",major="1",minor="29",platform="linux/amd64"} 1 +# HELP kubernetes_feature_enabled [BETA] This metric records the data about the stage and enablement of a k8s feature. +# TYPE kubernetes_feature_enabled gauge +kubernetes_feature_enabled{name="APIListChunking",stage=""} 1 +kubernetes_feature_enabled{name="APIPriorityAndFairness",stage=""} 1 +kubernetes_feature_enabled{name="APIResponseCompression",stage="BETA"} 1 +kubernetes_feature_enabled{name="APISelfSubjectReview",stage=""} 1 +kubernetes_feature_enabled{name="APIServerIdentity",stage="BETA"} 1 +kubernetes_feature_enabled{name="APIServerTracing",stage="BETA"} 1 +kubernetes_feature_enabled{name="AdmissionWebhookMatchConditions",stage="BETA"} 1 +kubernetes_feature_enabled{name="AggregatedDiscoveryEndpoint",stage="BETA"} 1 +kubernetes_feature_enabled{name="AllAlpha",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="AllBeta",stage="BETA"} 0 +kubernetes_feature_enabled{name="AllowServiceLBStatusOnNonLB",stage="DEPRECATED"} 0 +kubernetes_feature_enabled{name="AnyVolumeDataSource",stage="BETA"} 1 +kubernetes_feature_enabled{name="AppArmor",stage="BETA"} 1 +kubernetes_feature_enabled{name="CPUManager",stage=""} 1 +kubernetes_feature_enabled{name="CPUManagerPolicyAlphaOptions",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="CPUManagerPolicyBetaOptions",stage="BETA"} 1 +kubernetes_feature_enabled{name="CPUManagerPolicyOptions",stage="BETA"} 1 +kubernetes_feature_enabled{name="CRDValidationRatcheting",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="CSIMigrationAzureFile",stage=""} 1 +kubernetes_feature_enabled{name="CSIMigrationPortworx",stage="BETA"} 0 +kubernetes_feature_enabled{name="CSIMigrationRBD",stage="DEPRECATED"} 0 +kubernetes_feature_enabled{name="CSINodeExpandSecret",stage=""} 1 +kubernetes_feature_enabled{name="CSIVolumeHealth",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="CloudControllerManagerWebhook",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="CloudDualStackNodeIPs",stage="BETA"} 1 +kubernetes_feature_enabled{name="ClusterTrustBundle",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ClusterTrustBundleProjection",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ComponentSLIs",stage="BETA"} 1 +kubernetes_feature_enabled{name="ConsistentHTTPGetHandlers",stage=""} 1 +kubernetes_feature_enabled{name="ConsistentListFromCache",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ContainerCheckpoint",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ContextualLogging",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="CronJobsScheduledAnnotation",stage="BETA"} 1 +kubernetes_feature_enabled{name="CrossNamespaceVolumeDataSource",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="CustomCPUCFSQuotaPeriod",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="CustomResourceValidationExpressions",stage=""} 1 +kubernetes_feature_enabled{name="DefaultHostNetworkHostPortsInPodTemplates",stage="DEPRECATED"} 0 +kubernetes_feature_enabled{name="DevicePluginCDIDevices",stage="BETA"} 1 +kubernetes_feature_enabled{name="DisableCloudProviders",stage="BETA"} 1 +kubernetes_feature_enabled{name="DisableKubeletCloudCredentialProviders",stage="BETA"} 1 +kubernetes_feature_enabled{name="DisableNodeKubeProxyVersion",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="DynamicResourceAllocation",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="EfficientWatchResumption",stage=""} 1 +kubernetes_feature_enabled{name="ElasticIndexedJob",stage="BETA"} 1 +kubernetes_feature_enabled{name="EventedPLEG",stage="BETA"} 0 +kubernetes_feature_enabled{name="ExecProbeTimeout",stage=""} 1 +kubernetes_feature_enabled{name="ExpandedDNSConfig",stage=""} 1 +kubernetes_feature_enabled{name="ExperimentalHostUserNamespaceDefaulting",stage="DEPRECATED"} 0 +kubernetes_feature_enabled{name="GracefulNodeShutdown",stage="BETA"} 1 +kubernetes_feature_enabled{name="GracefulNodeShutdownBasedOnPodPriority",stage="BETA"} 1 +kubernetes_feature_enabled{name="HPAContainerMetrics",stage="BETA"} 1 +kubernetes_feature_enabled{name="HPAScaleToZero",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="HonorPVReclaimPolicy",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="IPTablesOwnershipCleanup",stage=""} 1 +kubernetes_feature_enabled{name="ImageMaximumGCAge",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="InPlacePodVerticalScaling",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="InTreePluginAWSUnregister",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="InTreePluginAzureDiskUnregister",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="InTreePluginAzureFileUnregister",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="InTreePluginGCEUnregister",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="InTreePluginOpenStackUnregister",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="InTreePluginPortworxUnregister",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="InTreePluginRBDUnregister",stage="DEPRECATED"} 0 +kubernetes_feature_enabled{name="InTreePluginvSphereUnregister",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="JobBackoffLimitPerIndex",stage="BETA"} 1 +kubernetes_feature_enabled{name="JobPodFailurePolicy",stage="BETA"} 1 +kubernetes_feature_enabled{name="JobPodReplacementPolicy",stage="BETA"} 1 +kubernetes_feature_enabled{name="JobReadyPods",stage=""} 1 +kubernetes_feature_enabled{name="KMSv1",stage="DEPRECATED"} 0 +kubernetes_feature_enabled{name="KMSv2",stage=""} 1 +kubernetes_feature_enabled{name="KMSv2KDF",stage=""} 1 +kubernetes_feature_enabled{name="KubeProxyDrainingTerminatingNodes",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="KubeletCgroupDriverFromCRI",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="KubeletInUserNamespace",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="KubeletPodResources",stage=""} 1 +kubernetes_feature_enabled{name="KubeletPodResourcesDynamicResources",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="KubeletPodResourcesGet",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="KubeletPodResourcesGetAllocatable",stage=""} 1 +kubernetes_feature_enabled{name="KubeletSeparateDiskGC",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="KubeletTracing",stage="BETA"} 1 +kubernetes_feature_enabled{name="LegacyServiceAccountTokenCleanUp",stage="BETA"} 1 +kubernetes_feature_enabled{name="LegacyServiceAccountTokenTracking",stage=""} 1 +kubernetes_feature_enabled{name="LoadBalancerIPMode",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="LocalStorageCapacityIsolationFSQuotaMonitoring",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="LogarithmicScaleDown",stage="BETA"} 1 +kubernetes_feature_enabled{name="LoggingAlphaOptions",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="LoggingBetaOptions",stage="BETA"} 1 +kubernetes_feature_enabled{name="MatchLabelKeysInPodAffinity",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="MatchLabelKeysInPodTopologySpread",stage="BETA"} 1 +kubernetes_feature_enabled{name="MaxUnavailableStatefulSet",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="MemoryManager",stage="BETA"} 1 +kubernetes_feature_enabled{name="MemoryQoS",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="MinDomainsInPodTopologySpread",stage="BETA"} 1 +kubernetes_feature_enabled{name="MinimizeIPTablesRestore",stage=""} 1 +kubernetes_feature_enabled{name="MultiCIDRServiceAllocator",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="NFTablesProxyMode",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="NewVolumeManagerReconstruction",stage="BETA"} 1 +kubernetes_feature_enabled{name="NodeInclusionPolicyInPodTopologySpread",stage="BETA"} 1 +kubernetes_feature_enabled{name="NodeLogQuery",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="NodeOutOfServiceVolumeDetach",stage=""} 1 +kubernetes_feature_enabled{name="NodeSwap",stage="BETA"} 0 +kubernetes_feature_enabled{name="OpenAPIEnums",stage="BETA"} 1 +kubernetes_feature_enabled{name="PDBUnhealthyPodEvictionPolicy",stage="BETA"} 1 +kubernetes_feature_enabled{name="PersistentVolumeLastPhaseTransitionTime",stage="BETA"} 1 +kubernetes_feature_enabled{name="PodAndContainerStatsFromCRI",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="PodDeletionCost",stage="BETA"} 1 +kubernetes_feature_enabled{name="PodDisruptionConditions",stage="BETA"} 1 +kubernetes_feature_enabled{name="PodHostIPs",stage="BETA"} 1 +kubernetes_feature_enabled{name="PodIndexLabel",stage="BETA"} 1 +kubernetes_feature_enabled{name="PodLifecycleSleepAction",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="PodReadyToStartContainersCondition",stage="BETA"} 1 +kubernetes_feature_enabled{name="PodSchedulingReadiness",stage="BETA"} 1 +kubernetes_feature_enabled{name="ProcMountType",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ProxyTerminatingEndpoints",stage=""} 1 +kubernetes_feature_enabled{name="QOSReserved",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ReadWriteOncePod",stage=""} 1 +kubernetes_feature_enabled{name="RecoverVolumeExpansionFailure",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="RemainingItemCount",stage=""} 1 +kubernetes_feature_enabled{name="RemoveSelfLink",stage=""} 1 +kubernetes_feature_enabled{name="RotateKubeletServerCertificate",stage="BETA"} 1 +kubernetes_feature_enabled{name="RuntimeClassInImageCriApi",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="SELinuxMountReadWriteOncePod",stage="BETA"} 1 +kubernetes_feature_enabled{name="SchedulerQueueingHints",stage="BETA"} 0 +kubernetes_feature_enabled{name="SecurityContextDeny",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="SeparateTaintEvictionController",stage="BETA"} 1 +kubernetes_feature_enabled{name="ServerSideApply",stage=""} 1 +kubernetes_feature_enabled{name="ServerSideFieldValidation",stage=""} 1 +kubernetes_feature_enabled{name="ServiceAccountTokenJTI",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceAccountTokenNodeBinding",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceAccountTokenNodeBindingValidation",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceAccountTokenPodNodeInfo",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceNodePortStaticSubrange",stage=""} 1 +kubernetes_feature_enabled{name="SidecarContainers",stage="BETA"} 1 +kubernetes_feature_enabled{name="SizeMemoryBackedVolumes",stage="BETA"} 1 +kubernetes_feature_enabled{name="SkipReadOnlyValidationGCE",stage="DEPRECATED"} 1 +kubernetes_feature_enabled{name="StableLoadBalancerNodeSet",stage="BETA"} 1 +kubernetes_feature_enabled{name="StatefulSetAutoDeletePVC",stage="BETA"} 1 +kubernetes_feature_enabled{name="StatefulSetStartOrdinal",stage="BETA"} 1 +kubernetes_feature_enabled{name="StorageVersionAPI",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="StorageVersionHash",stage="BETA"} 1 +kubernetes_feature_enabled{name="StructuredAuthenticationConfiguration",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="StructuredAuthorizationConfiguration",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="TopologyAwareHints",stage="BETA"} 1 +kubernetes_feature_enabled{name="TopologyManagerPolicyAlphaOptions",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="TopologyManagerPolicyBetaOptions",stage="BETA"} 1 +kubernetes_feature_enabled{name="TopologyManagerPolicyOptions",stage="BETA"} 1 +kubernetes_feature_enabled{name="TranslateStreamCloseWebsocketRequests",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="UnauthenticatedHTTP2DOSMitigation",stage="BETA"} 1 +kubernetes_feature_enabled{name="UnknownVersionInteroperabilityProxy",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="UserNamespacesPodSecurityStandards",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="UserNamespacesSupport",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ValidatingAdmissionPolicy",stage="BETA"} 0 +kubernetes_feature_enabled{name="VolumeAttributesClass",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="VolumeCapacityPriority",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="WatchBookmark",stage=""} 1 +kubernetes_feature_enabled{name="WatchList",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="WinDSR",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="WinOverlay",stage="BETA"} 1 +kubernetes_feature_enabled{name="WindowsHostNetwork",stage="ALPHA"} 1 +kubernetes_feature_enabled{name="ZeroLimitedNominalConcurrencyShares",stage="BETA"} 0 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 1.06 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 1.048576e+06 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 11 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 6.4688128e+07 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.70489479177e+09 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 1.315946496e+09 +# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. +# TYPE process_virtual_memory_max_bytes gauge +process_virtual_memory_max_bytes 1.8446744073709552e+19 +# HELP registered_metrics_total [BETA] The count of registered metrics broken by stability level and deprecation version. +# TYPE registered_metrics_total counter +registered_metrics_total{deprecated_version="",stability_level="ALPHA"} 88 +registered_metrics_total{deprecated_version="",stability_level="BETA"} 4 +registered_metrics_total{deprecated_version="",stability_level="STABLE"} 5 +# HELP rest_client_exec_plugin_certificate_rotation_age [ALPHA] Histogram of the number of seconds the last auth exec plugin client certificate lived before being rotated. If auth exec plugin client certificates are unused, histogram will contain no data. +# TYPE rest_client_exec_plugin_certificate_rotation_age histogram +rest_client_exec_plugin_certificate_rotation_age_bucket{le="600"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="1800"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="3600"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="14400"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="86400"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="604800"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="2.592e+06"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="7.776e+06"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="1.5552e+07"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="3.1104e+07"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="1.24416e+08"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="+Inf"} 0 +rest_client_exec_plugin_certificate_rotation_age_sum 0 +rest_client_exec_plugin_certificate_rotation_age_count 0 +# HELP rest_client_exec_plugin_ttl_seconds [ALPHA] Gauge of the shortest TTL (time-to-live) of the client certificate(s) managed by the auth exec plugin. The value is in seconds until certificate expiry (negative if already expired). If auth exec plugins are unused or manage no TLS certificates, the value will be +INF. +# TYPE rest_client_exec_plugin_ttl_seconds gauge +rest_client_exec_plugin_ttl_seconds +Inf +# HELP rest_client_rate_limiter_duration_seconds [ALPHA] Client side rate limiter latency in seconds. Broken down by verb, and host. +# TYPE rest_client_rate_limiter_duration_seconds histogram +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="0.005"} 4 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="0.025"} 4 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="0.1"} 4 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="0.25"} 4 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="0.5"} 4 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="1"} 4 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="2"} 4 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="4"} 4 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="8"} 4 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="15"} 4 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="30"} 4 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="60"} 4 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="+Inf"} 4 +rest_client_rate_limiter_duration_seconds_sum{host="kind-control-plane:6443",verb="GET"} 1.4624e-05 +rest_client_rate_limiter_duration_seconds_count{host="kind-control-plane:6443",verb="GET"} 4 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="0.005"} 1 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="0.025"} 1 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="0.1"} 1 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="0.25"} 1 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="0.5"} 1 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="1"} 1 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="2"} 1 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="4"} 1 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="8"} 1 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="15"} 1 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="30"} 1 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="60"} 1 +rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="+Inf"} 1 +rest_client_rate_limiter_duration_seconds_sum{host="kind-control-plane:6443",verb="POST"} 2.725e-06 +rest_client_rate_limiter_duration_seconds_count{host="kind-control-plane:6443",verb="POST"} 1 +# HELP rest_client_request_duration_seconds [ALPHA] Request latency in seconds. Broken down by verb, and host. +# TYPE rest_client_request_duration_seconds histogram +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="0.005"} 3 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="0.025"} 4 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="0.1"} 4 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="0.25"} 4 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="0.5"} 4 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="1"} 4 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="2"} 4 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="4"} 4 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="8"} 4 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="15"} 4 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="30"} 4 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="60"} 4 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="+Inf"} 4 +rest_client_request_duration_seconds_sum{host="kind-control-plane:6443",verb="GET"} 0.025656880000000003 +rest_client_request_duration_seconds_count{host="kind-control-plane:6443",verb="GET"} 4 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="0.005"} 0 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="0.025"} 1 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="0.1"} 1 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="0.25"} 1 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="0.5"} 1 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="1"} 1 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="2"} 1 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="4"} 1 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="8"} 1 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="15"} 1 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="30"} 1 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="60"} 1 +rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="+Inf"} 1 +rest_client_request_duration_seconds_sum{host="kind-control-plane:6443",verb="POST"} 0.018671026 +rest_client_request_duration_seconds_count{host="kind-control-plane:6443",verb="POST"} 1 +# HELP rest_client_request_size_bytes [ALPHA] Request size in bytes. Broken down by verb and host. +# TYPE rest_client_request_size_bytes histogram +rest_client_request_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="64"} 4 +rest_client_request_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="256"} 4 +rest_client_request_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="512"} 4 +rest_client_request_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="1024"} 4 +rest_client_request_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="4096"} 4 +rest_client_request_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="16384"} 4 +rest_client_request_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="65536"} 4 +rest_client_request_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="262144"} 4 +rest_client_request_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="1.048576e+06"} 4 +rest_client_request_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="4.194304e+06"} 4 +rest_client_request_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="1.6777216e+07"} 4 +rest_client_request_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="+Inf"} 4 +rest_client_request_size_bytes_sum{host="kind-control-plane:6443",verb="GET"} 0 +rest_client_request_size_bytes_count{host="kind-control-plane:6443",verb="GET"} 4 +rest_client_request_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="64"} 0 +rest_client_request_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="256"} 0 +rest_client_request_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="512"} 1 +rest_client_request_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="1024"} 1 +rest_client_request_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="4096"} 1 +rest_client_request_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="16384"} 1 +rest_client_request_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="65536"} 1 +rest_client_request_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="262144"} 1 +rest_client_request_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="1.048576e+06"} 1 +rest_client_request_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="4.194304e+06"} 1 +rest_client_request_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="1.6777216e+07"} 1 +rest_client_request_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="+Inf"} 1 +rest_client_request_size_bytes_sum{host="kind-control-plane:6443",verb="POST"} 259 +rest_client_request_size_bytes_count{host="kind-control-plane:6443",verb="POST"} 1 +# HELP rest_client_requests_total [ALPHA] Number of HTTP requests, partitioned by status code, method, and host. +# TYPE rest_client_requests_total counter +rest_client_requests_total{code="200",host="kind-control-plane:6443",method="GET"} 22 +rest_client_requests_total{code="201",host="kind-control-plane:6443",method="POST"} 1 +# HELP rest_client_response_size_bytes [ALPHA] Response size in bytes. Broken down by verb and host. +# TYPE rest_client_response_size_bytes histogram +rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="64"} 0 +rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="256"} 0 +rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="512"} 0 +rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="1024"} 0 +rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="4096"} 4 +rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="16384"} 4 +rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="65536"} 4 +rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="262144"} 4 +rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="1.048576e+06"} 4 +rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="4.194304e+06"} 4 +rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="1.6777216e+07"} 4 +rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="+Inf"} 4 +rest_client_response_size_bytes_sum{host="kind-control-plane:6443",verb="GET"} 10814 +rest_client_response_size_bytes_count{host="kind-control-plane:6443",verb="GET"} 4 +rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="64"} 0 +rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="256"} 0 +rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="512"} 1 +rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="1024"} 1 +rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="4096"} 1 +rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="16384"} 1 +rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="65536"} 1 +rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="262144"} 1 +rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="1.048576e+06"} 1 +rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="4.194304e+06"} 1 +rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="1.6777216e+07"} 1 +rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="+Inf"} 1 +rest_client_response_size_bytes_sum{host="kind-control-plane:6443",verb="POST"} 503 +rest_client_response_size_bytes_count{host="kind-control-plane:6443",verb="POST"} 1 +# HELP rest_client_transport_cache_entries [ALPHA] Number of transport entries in the internal cache. +# TYPE rest_client_transport_cache_entries gauge +rest_client_transport_cache_entries 0 +# HELP rest_client_transport_create_calls_total [ALPHA] Number of calls to get a new transport, partitioned by the result of the operation hit: obtained from the cache, miss: created and added to the cache, uncacheable: created and not cached +# TYPE rest_client_transport_create_calls_total counter +rest_client_transport_create_calls_total{result="miss"} 1 diff --git a/metricbeat/module/kubernetes/proxy/_meta/test/metrics.1.29.expected b/metricbeat/module/kubernetes/proxy/_meta/test/metrics.1.29.expected new file mode 100644 index 000000000000..95143ab0e558 --- /dev/null +++ b/metricbeat/module/kubernetes/proxy/_meta/test/metrics.1.29.expected @@ -0,0 +1,380 @@ +[ + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "duration": { + "us": { + "bucket": { + "+Inf": 4, + "100000": 4, + "1000000": 4, + "15000000": 4, + "2000000": 4, + "25000": 4, + "250000": 4, + "30000000": 4, + "4000000": 4, + "5000": 3, + "500000": 4, + "60000000": 4, + "8000000": 4 + }, + "count": 4, + "sum": 25656.880000000005 + } + }, + "size": { + "bytes": { + "bucket": { + "+Inf": 4, + "1024": 4, + "1048576": 4, + "16384": 4, + "16777216": 4, + "256": 4, + "262144": 4, + "4096": 4, + "4194304": 4, + "512": 4, + "64": 4, + "65536": 4 + }, + "count": 4, + "sum": 0 + } + } + }, + "response": { + "size": { + "bytes": { + "bucket": { + "+Inf": 4, + "1024": 0, + "1048576": 4, + "16384": 4, + "16777216": 4, + "256": 0, + "262144": 4, + "4096": 4, + "4194304": 4, + "512": 0, + "64": 0, + "65536": 4 + }, + "count": 4, + "sum": 10814 + } + } + } + }, + "host": "kind-control-plane:6443", + "verb": "GET" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "count": 22 + } + }, + "code": "200", + "host": "kind-control-plane:6443", + "method": "GET" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "process": { + "cpu": { + "sec": 1 + }, + "fds": { + "max": { + "count": 1048576 + }, + "open": { + "count": 11 + } + }, + "memory": { + "resident": { + "bytes": 64688128 + }, + "virtual": { + "bytes": 1315946496 + } + }, + "started": { + "sec": 1704894791.77 + } + }, + "sync": { + "networkprogramming": { + "duration": { + "us": { + "bucket": { + "+Inf": 2, + "0": 0, + "1000000": 1, + "10000000": 2, + "100000000": 2, + "105000000": 2, + "11000000": 2, + "110000000": 2, + "115000000": 2, + "12000000": 2, + "120000000": 2, + "13000000": 2, + "14000000": 2, + "15000000": 2, + "150000000": 2, + "16000000": 2, + "17000000": 2, + "18000000": 2, + "180000000": 2, + "19000000": 2, + "2000000": 1, + "20000000": 2, + "21000000": 2, + "210000000": 2, + "22000000": 2, + "23000000": 2, + "24000000": 2, + "240000000": 2, + "250000": 0, + "25000000": 2, + "26000000": 2, + "27000000": 2, + "270000000": 2, + "28000000": 2, + "29000000": 2, + "3000000": 1, + "30000000": 2, + "300000000": 2, + "31000000": 2, + "32000000": 2, + "33000000": 2, + "34000000": 2, + "35000000": 2, + "36000000": 2, + "37000000": 2, + "38000000": 2, + "39000000": 2, + "4000000": 1, + "40000000": 2, + "41000000": 2, + "42000000": 2, + "43000000": 2, + "44000000": 2, + "45000000": 2, + "46000000": 2, + "47000000": 2, + "48000000": 2, + "49000000": 2, + "500000": 1, + "5000000": 2, + "50000000": 2, + "51000000": 2, + "52000000": 2, + "53000000": 2, + "54000000": 2, + "55000000": 2, + "56000000": 2, + "57000000": 2, + "58000000": 2, + "59000000": 2, + "6000000": 2, + "60000000": 2, + "65000000": 2, + "7000000": 2, + "70000000": 2, + "75000000": 2, + "8000000": 2, + "80000000": 2, + "85000000": 2, + "9000000": 2, + "90000000": 2, + "95000000": 2 + }, + "count": 2, + "sum": 4633884.953 + } + } + }, + "rules": { + "duration": { + "us": { + "bucket": { + "+Inf": 4, + "1000": 0, + "1024000": 4, + "128000": 4, + "16000": 0, + "16384000": 4, + "2000": 0, + "2048000": 4, + "256000": 4, + "32000": 1, + "4000": 0, + "4096000": 4, + "512000": 4, + "64000": 2, + "8000": 0, + "8192000": 4 + }, + "count": 4, + "sum": 297575.15099999995 + } + } + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "count": 1 + } + }, + "code": "201", + "host": "kind-control-plane:6443", + "method": "POST" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "duration": { + "us": { + "bucket": { + "+Inf": 1, + "100000": 1, + "1000000": 1, + "15000000": 1, + "2000000": 1, + "25000": 1, + "250000": 1, + "30000000": 1, + "4000000": 1, + "5000": 0, + "500000": 1, + "60000000": 1, + "8000000": 1 + }, + "count": 1, + "sum": 18671.026 + } + }, + "size": { + "bytes": { + "bucket": { + "+Inf": 1, + "1024": 1, + "1048576": 1, + "16384": 1, + "16777216": 1, + "256": 0, + "262144": 1, + "4096": 1, + "4194304": 1, + "512": 1, + "64": 0, + "65536": 1 + }, + "count": 1, + "sum": 259 + } + } + }, + "response": { + "size": { + "bytes": { + "bucket": { + "+Inf": 1, + "1024": 1, + "1048576": 1, + "16384": 1, + "16777216": 1, + "256": 0, + "262144": 1, + "4096": 1, + "4194304": 1, + "512": 1, + "64": 0, + "65536": 1 + }, + "count": 1, + "sum": 503 + } + } + } + }, + "host": "kind-control-plane:6443", + "verb": "POST" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + } +] \ No newline at end of file diff --git a/metricbeat/module/kubernetes/proxy/_meta/testdata/docs.plain b/metricbeat/module/kubernetes/proxy/_meta/testdata/docs.plain index fe95d1bee4df..d3443e93df68 100644 --- a/metricbeat/module/kubernetes/proxy/_meta/testdata/docs.plain +++ b/metricbeat/module/kubernetes/proxy/_meta/testdata/docs.plain @@ -60,6 +60,9 @@ apiserver_webhooks_x509_insecure_sha1_total 0 # HELP apiserver_webhooks_x509_missing_san_total [ALPHA] Counts the number of requests to servers missing SAN extension in their serving certificate OR the number of connection failures due to the lack of x509 certificate SAN extension missing (either/or, based on the runtime environment) # TYPE apiserver_webhooks_x509_missing_san_total counter apiserver_webhooks_x509_missing_san_total 0 +# HELP cardinality_enforcement_unexpected_categorizations_total [ALPHA] The count of unexpected categorizations during cardinality enforcement. +# TYPE cardinality_enforcement_unexpected_categorizations_total counter +cardinality_enforcement_unexpected_categorizations_total 0 # HELP disabled_metrics_total [BETA] The count of disabled metrics. # TYPE disabled_metrics_total counter disabled_metrics_total 0 @@ -68,267 +71,342 @@ disabled_metrics_total 0 go_cgo_go_to_c_calls_calls_total 0 # HELP go_cpu_classes_gc_mark_assist_cpu_seconds_total Estimated total CPU time goroutines spent performing GC tasks to assist the GC and prevent it from falling behind the application. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. # TYPE go_cpu_classes_gc_mark_assist_cpu_seconds_total counter -go_cpu_classes_gc_mark_assist_cpu_seconds_total 0.009223987 -# HELP go_cpu_classes_gc_mark_dedicated_cpu_seconds_total Estimated total CPU time spent performing GC tasks on processors (as defined by GOMAXPROCS) dedicated to those tasks. This includes time spent with the world stopped due to the GC. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +go_cpu_classes_gc_mark_assist_cpu_seconds_total 0.003119684 +# HELP go_cpu_classes_gc_mark_dedicated_cpu_seconds_total Estimated total CPU time spent performing GC tasks on processors (as defined by GOMAXPROCS) dedicated to those tasks. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. # TYPE go_cpu_classes_gc_mark_dedicated_cpu_seconds_total counter -go_cpu_classes_gc_mark_dedicated_cpu_seconds_total 0.028446872 +go_cpu_classes_gc_mark_dedicated_cpu_seconds_total 0.162652665 # HELP go_cpu_classes_gc_mark_idle_cpu_seconds_total Estimated total CPU time spent performing GC tasks on spare CPU resources that the Go scheduler could not otherwise find a use for. This should be subtracted from the total GC CPU time to obtain a measure of compulsory GC CPU time. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. # TYPE go_cpu_classes_gc_mark_idle_cpu_seconds_total counter -go_cpu_classes_gc_mark_idle_cpu_seconds_total 0.039283135 +go_cpu_classes_gc_mark_idle_cpu_seconds_total 0.016001078 # HELP go_cpu_classes_gc_pause_cpu_seconds_total Estimated total CPU time spent with the application paused by the GC. Even if only one thread is running during the pause, this is computed as GOMAXPROCS times the pause latency because nothing else can be executing. This is the exact sum of samples in /gc/pause:seconds if each sample is multiplied by GOMAXPROCS at the time it is taken. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. # TYPE go_cpu_classes_gc_pause_cpu_seconds_total counter -go_cpu_classes_gc_pause_cpu_seconds_total 0.009795848 +go_cpu_classes_gc_pause_cpu_seconds_total 0.070784224 # HELP go_cpu_classes_gc_total_cpu_seconds_total Estimated total CPU time spent performing GC tasks. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. Sum of all metrics in /cpu/classes/gc. # TYPE go_cpu_classes_gc_total_cpu_seconds_total counter -go_cpu_classes_gc_total_cpu_seconds_total 0.086749842 +go_cpu_classes_gc_total_cpu_seconds_total 0.252557651 # HELP go_cpu_classes_idle_cpu_seconds_total Estimated total available CPU time not spent executing any Go or Go runtime code. In other words, the part of /cpu/classes/total:cpu-seconds that was unused. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. # TYPE go_cpu_classes_idle_cpu_seconds_total counter -go_cpu_classes_idle_cpu_seconds_total 1159.619118254 +go_cpu_classes_idle_cpu_seconds_total 37920.812157493 # HELP go_cpu_classes_scavenge_assist_cpu_seconds_total Estimated total CPU time spent returning unused memory to the underlying platform in response eagerly in response to memory pressure. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. # TYPE go_cpu_classes_scavenge_assist_cpu_seconds_total counter -go_cpu_classes_scavenge_assist_cpu_seconds_total 4.18e-07 +go_cpu_classes_scavenge_assist_cpu_seconds_total 2.97e-07 # HELP go_cpu_classes_scavenge_background_cpu_seconds_total Estimated total CPU time spent performing background tasks to return unused memory to the underlying platform. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. # TYPE go_cpu_classes_scavenge_background_cpu_seconds_total counter -go_cpu_classes_scavenge_background_cpu_seconds_total 3.59e-07 +go_cpu_classes_scavenge_background_cpu_seconds_total 0.000712602 # HELP go_cpu_classes_scavenge_total_cpu_seconds_total Estimated total CPU time spent performing tasks that return unused memory to the underlying platform. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. Sum of all metrics in /cpu/classes/scavenge. # TYPE go_cpu_classes_scavenge_total_cpu_seconds_total counter -go_cpu_classes_scavenge_total_cpu_seconds_total 7.77e-07 +go_cpu_classes_scavenge_total_cpu_seconds_total 0.000712899 # HELP go_cpu_classes_total_cpu_seconds_total Estimated total available CPU time for user Go code or the Go runtime, as defined by GOMAXPROCS. In other words, GOMAXPROCS integrated over the wall-clock duration this process has been executing for. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. Sum of all metrics in /cpu/classes. # TYPE go_cpu_classes_total_cpu_seconds_total counter -go_cpu_classes_total_cpu_seconds_total 1325.824567008 +go_cpu_classes_total_cpu_seconds_total 37922.372021296 # HELP go_cpu_classes_user_cpu_seconds_total Estimated total CPU time spent running user Go code. This may also include some small amount of time spent in the Go runtime. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. # TYPE go_cpu_classes_user_cpu_seconds_total counter -go_cpu_classes_user_cpu_seconds_total 166.118698135 +go_cpu_classes_user_cpu_seconds_total 1.306593253 # HELP go_gc_cycles_automatic_gc_cycles_total Count of completed GC cycles generated by the Go runtime. # TYPE go_gc_cycles_automatic_gc_cycles_total counter -go_gc_cycles_automatic_gc_cycles_total 7 +go_gc_cycles_automatic_gc_cycles_total 25 # HELP go_gc_cycles_forced_gc_cycles_total Count of completed GC cycles forced by the application. # TYPE go_gc_cycles_forced_gc_cycles_total counter go_gc_cycles_forced_gc_cycles_total 0 # HELP go_gc_cycles_total_gc_cycles_total Count of all completed GC cycles. # TYPE go_gc_cycles_total_gc_cycles_total counter -go_gc_cycles_total_gc_cycles_total 7 +go_gc_cycles_total_gc_cycles_total 25 # HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. # TYPE go_gc_duration_seconds summary -go_gc_duration_seconds{quantile="0"} 5.0358e-05 -go_gc_duration_seconds{quantile="0.25"} 6.9666e-05 -go_gc_duration_seconds{quantile="0.5"} 9.9134e-05 -go_gc_duration_seconds{quantile="0.75"} 0.000318162 -go_gc_duration_seconds{quantile="1"} 0.000330769 -go_gc_duration_seconds_sum 0.001224481 -go_gc_duration_seconds_count 7 -# HELP go_gc_heap_allocs_by_size_bytes Distribution of heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. +go_gc_duration_seconds{quantile="0"} 5.8243e-05 +go_gc_duration_seconds{quantile="0.25"} 8.9039e-05 +go_gc_duration_seconds{quantile="0.5"} 0.000126473 +go_gc_duration_seconds{quantile="0.75"} 0.000239056 +go_gc_duration_seconds{quantile="1"} 0.000538357 +go_gc_duration_seconds_sum 0.004424014 +go_gc_duration_seconds_count 25 +# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. +# TYPE go_gc_gogc_percent gauge +go_gc_gogc_percent 100 +# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. +# TYPE go_gc_gomemlimit_bytes gauge +go_gc_gomemlimit_bytes 9.223372036854776e+18 +# HELP go_gc_heap_allocs_by_size_bytes Distribution of heap allocations by approximate size. Bucket counts increase monotonically. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. # TYPE go_gc_heap_allocs_by_size_bytes histogram -go_gc_heap_allocs_by_size_bytes_bucket{le="8.999999999999998"} 5631 -go_gc_heap_allocs_by_size_bytes_bucket{le="24.999999999999996"} 57806 -go_gc_heap_allocs_by_size_bytes_bucket{le="64.99999999999999"} 82833 -go_gc_heap_allocs_by_size_bytes_bucket{le="144.99999999999997"} 118416 -go_gc_heap_allocs_by_size_bytes_bucket{le="320.99999999999994"} 123528 -go_gc_heap_allocs_by_size_bytes_bucket{le="704.9999999999999"} 125774 -go_gc_heap_allocs_by_size_bytes_bucket{le="1536.9999999999998"} 126868 -go_gc_heap_allocs_by_size_bytes_bucket{le="3200.9999999999995"} 127279 -go_gc_heap_allocs_by_size_bytes_bucket{le="6528.999999999999"} 127613 -go_gc_heap_allocs_by_size_bytes_bucket{le="13568.999999999998"} 127739 -go_gc_heap_allocs_by_size_bytes_bucket{le="27264.999999999996"} 127811 -go_gc_heap_allocs_by_size_bytes_bucket{le="+Inf"} 127881 -go_gc_heap_allocs_by_size_bytes_sum 2.2809216e+07 -go_gc_heap_allocs_by_size_bytes_count 127881 +go_gc_heap_allocs_by_size_bytes_bucket{le="8.999999999999998"} 7807 +go_gc_heap_allocs_by_size_bytes_bucket{le="24.999999999999996"} 66656 +go_gc_heap_allocs_by_size_bytes_bucket{le="64.99999999999999"} 96713 +go_gc_heap_allocs_by_size_bytes_bucket{le="144.99999999999997"} 131856 +go_gc_heap_allocs_by_size_bytes_bucket{le="320.99999999999994"} 137866 +go_gc_heap_allocs_by_size_bytes_bucket{le="704.9999999999999"} 140885 +go_gc_heap_allocs_by_size_bytes_bucket{le="1536.9999999999998"} 142512 +go_gc_heap_allocs_by_size_bytes_bucket{le="3200.9999999999995"} 143108 +go_gc_heap_allocs_by_size_bytes_bucket{le="6528.999999999999"} 143479 +go_gc_heap_allocs_by_size_bytes_bucket{le="13568.999999999998"} 143591 +go_gc_heap_allocs_by_size_bytes_bucket{le="27264.999999999996"} 143685 +go_gc_heap_allocs_by_size_bytes_bucket{le="+Inf"} 143764 +go_gc_heap_allocs_by_size_bytes_sum 2.6836248e+07 +go_gc_heap_allocs_by_size_bytes_count 143764 # HELP go_gc_heap_allocs_bytes_total Cumulative sum of memory allocated to the heap by the application. # TYPE go_gc_heap_allocs_bytes_total counter -go_gc_heap_allocs_bytes_total 2.2809216e+07 +go_gc_heap_allocs_bytes_total 2.6836248e+07 # HELP go_gc_heap_allocs_objects_total Cumulative count of heap allocations triggered by the application. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. # TYPE go_gc_heap_allocs_objects_total counter -go_gc_heap_allocs_objects_total 127881 -# HELP go_gc_heap_frees_by_size_bytes Distribution of freed heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. +go_gc_heap_allocs_objects_total 143764 +# HELP go_gc_heap_frees_by_size_bytes Distribution of freed heap allocations by approximate size. Bucket counts increase monotonically. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. # TYPE go_gc_heap_frees_by_size_bytes histogram -go_gc_heap_frees_by_size_bytes_bucket{le="8.999999999999998"} 3603 -go_gc_heap_frees_by_size_bytes_bucket{le="24.999999999999996"} 36632 -go_gc_heap_frees_by_size_bytes_bucket{le="64.99999999999999"} 51661 -go_gc_heap_frees_by_size_bytes_bucket{le="144.99999999999997"} 77090 -go_gc_heap_frees_by_size_bytes_bucket{le="320.99999999999994"} 79910 -go_gc_heap_frees_by_size_bytes_bucket{le="704.9999999999999"} 81341 -go_gc_heap_frees_by_size_bytes_bucket{le="1536.9999999999998"} 82147 -go_gc_heap_frees_by_size_bytes_bucket{le="3200.9999999999995"} 82377 -go_gc_heap_frees_by_size_bytes_bucket{le="6528.999999999999"} 82574 -go_gc_heap_frees_by_size_bytes_bucket{le="13568.999999999998"} 82643 -go_gc_heap_frees_by_size_bytes_bucket{le="27264.999999999996"} 82678 -go_gc_heap_frees_by_size_bytes_bucket{le="+Inf"} 82715 -go_gc_heap_frees_by_size_bytes_sum 1.1375464e+07 -go_gc_heap_frees_by_size_bytes_count 82715 +go_gc_heap_frees_by_size_bytes_bucket{le="8.999999999999998"} 5771 +go_gc_heap_frees_by_size_bytes_bucket{le="24.999999999999996"} 56577 +go_gc_heap_frees_by_size_bytes_bucket{le="64.99999999999999"} 79311 +go_gc_heap_frees_by_size_bytes_bucket{le="144.99999999999997"} 111096 +go_gc_heap_frees_by_size_bytes_bucket{le="320.99999999999994"} 115045 +go_gc_heap_frees_by_size_bytes_bucket{le="704.9999999999999"} 117274 +go_gc_heap_frees_by_size_bytes_bucket{le="1536.9999999999998"} 118654 +go_gc_heap_frees_by_size_bytes_bucket{le="3200.9999999999995"} 119079 +go_gc_heap_frees_by_size_bytes_bucket{le="6528.999999999999"} 119360 +go_gc_heap_frees_by_size_bytes_bucket{le="13568.999999999998"} 119418 +go_gc_heap_frees_by_size_bytes_bucket{le="27264.999999999996"} 119489 +go_gc_heap_frees_by_size_bytes_bucket{le="+Inf"} 119540 +go_gc_heap_frees_by_size_bytes_sum 1.8566944e+07 +go_gc_heap_frees_by_size_bytes_count 119540 # HELP go_gc_heap_frees_bytes_total Cumulative sum of heap memory freed by the garbage collector. # TYPE go_gc_heap_frees_bytes_total counter -go_gc_heap_frees_bytes_total 1.1375464e+07 +go_gc_heap_frees_bytes_total 1.8566944e+07 # HELP go_gc_heap_frees_objects_total Cumulative count of heap allocations whose storage was freed by the garbage collector. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. # TYPE go_gc_heap_frees_objects_total counter -go_gc_heap_frees_objects_total 82715 +go_gc_heap_frees_objects_total 119540 # HELP go_gc_heap_goal_bytes Heap size target for the end of the GC cycle. # TYPE go_gc_heap_goal_bytes gauge -go_gc_heap_goal_bytes 1.8213232e+07 +go_gc_heap_goal_bytes 1.7035896e+07 +# HELP go_gc_heap_live_bytes Heap memory occupied by live objects that were marked by the previous GC. +# TYPE go_gc_heap_live_bytes gauge +go_gc_heap_live_bytes 8.239392e+06 # HELP go_gc_heap_objects_objects Number of objects, live or unswept, occupying heap memory. # TYPE go_gc_heap_objects_objects gauge -go_gc_heap_objects_objects 45166 +go_gc_heap_objects_objects 24224 # HELP go_gc_heap_tiny_allocs_objects_total Count of small allocations that are packed together into blocks. These allocations are counted separately from other allocations because each individual allocation is not tracked by the runtime, only their block. Each block is already accounted for in allocs-by-size and frees-by-size. # TYPE go_gc_heap_tiny_allocs_objects_total counter -go_gc_heap_tiny_allocs_objects_total 14805 +go_gc_heap_tiny_allocs_objects_total 16640 # HELP go_gc_limiter_last_enabled_gc_cycle GC cycle the last time the GC CPU limiter was enabled. This metric is useful for diagnosing the root cause of an out-of-memory error, because the limiter trades memory for CPU time when the GC's CPU time gets too high. This is most likely to occur with use of SetMemoryLimit. The first GC cycle is cycle 1, so a value of 0 indicates that it was never enabled. # TYPE go_gc_limiter_last_enabled_gc_cycle gauge go_gc_limiter_last_enabled_gc_cycle 0 -# HELP go_gc_pauses_seconds Distribution individual GC-related stop-the-world pause latencies. +# HELP go_gc_pauses_seconds Distribution of individual GC-related stop-the-world pause latencies. Bucket counts increase monotonically. # TYPE go_gc_pauses_seconds histogram go_gc_pauses_seconds_bucket{le="6.399999999999999e-08"} 0 go_gc_pauses_seconds_bucket{le="6.399999999999999e-07"} 0 -go_gc_pauses_seconds_bucket{le="7.167999999999999e-06"} 2 -go_gc_pauses_seconds_bucket{le="8.191999999999999e-05"} 9 -go_gc_pauses_seconds_bucket{le="0.0009175039999999999"} 14 -go_gc_pauses_seconds_bucket{le="0.010485759999999998"} 14 -go_gc_pauses_seconds_bucket{le="0.11744051199999998"} 14 -go_gc_pauses_seconds_bucket{le="+Inf"} 14 -go_gc_pauses_seconds_sum 0.000461056 -go_gc_pauses_seconds_count 14 +go_gc_pauses_seconds_bucket{le="7.167999999999999e-06"} 1 +go_gc_pauses_seconds_bucket{le="8.191999999999999e-05"} 34 +go_gc_pauses_seconds_bucket{le="0.0009175039999999999"} 50 +go_gc_pauses_seconds_bucket{le="0.010485759999999998"} 50 +go_gc_pauses_seconds_bucket{le="0.11744051199999998"} 50 +go_gc_pauses_seconds_bucket{le="+Inf"} 50 +go_gc_pauses_seconds_sum 0.0015479040000000001 +go_gc_pauses_seconds_count 50 +# HELP go_gc_scan_globals_bytes The total amount of global variable space that is scannable. +# TYPE go_gc_scan_globals_bytes gauge +go_gc_scan_globals_bytes 440104 +# HELP go_gc_scan_heap_bytes The total amount of heap space that is scannable. +# TYPE go_gc_scan_heap_bytes gauge +go_gc_scan_heap_bytes 3.58176e+06 +# HELP go_gc_scan_stack_bytes The number of bytes of stack that were scanned last GC cycle. +# TYPE go_gc_scan_stack_bytes gauge +go_gc_scan_stack_bytes 117008 +# HELP go_gc_scan_total_bytes The total amount space that is scannable. Sum of all metrics in /gc/scan. +# TYPE go_gc_scan_total_bytes gauge +go_gc_scan_total_bytes 4.138872e+06 # HELP go_gc_stack_starting_size_bytes The stack size of new goroutines. # TYPE go_gc_stack_starting_size_bytes gauge go_gc_stack_starting_size_bytes 4096 +# HELP go_godebug_non_default_behavior_execerrdot_events_total The number of non-default behaviors executed by the os/exec package due to a non-default GODEBUG=execerrdot=... setting. +# TYPE go_godebug_non_default_behavior_execerrdot_events_total counter +go_godebug_non_default_behavior_execerrdot_events_total 0 +# HELP go_godebug_non_default_behavior_gocachehash_events_total The number of non-default behaviors executed by the cmd/go package due to a non-default GODEBUG=gocachehash=... setting. +# TYPE go_godebug_non_default_behavior_gocachehash_events_total counter +go_godebug_non_default_behavior_gocachehash_events_total 0 +# HELP go_godebug_non_default_behavior_gocachetest_events_total The number of non-default behaviors executed by the cmd/go package due to a non-default GODEBUG=gocachetest=... setting. +# TYPE go_godebug_non_default_behavior_gocachetest_events_total counter +go_godebug_non_default_behavior_gocachetest_events_total 0 +# HELP go_godebug_non_default_behavior_gocacheverify_events_total The number of non-default behaviors executed by the cmd/go package due to a non-default GODEBUG=gocacheverify=... setting. +# TYPE go_godebug_non_default_behavior_gocacheverify_events_total counter +go_godebug_non_default_behavior_gocacheverify_events_total 0 +# HELP go_godebug_non_default_behavior_http2client_events_total The number of non-default behaviors executed by the net/http package due to a non-default GODEBUG=http2client=... setting. +# TYPE go_godebug_non_default_behavior_http2client_events_total counter +go_godebug_non_default_behavior_http2client_events_total 0 +# HELP go_godebug_non_default_behavior_http2server_events_total The number of non-default behaviors executed by the net/http package due to a non-default GODEBUG=http2server=... setting. +# TYPE go_godebug_non_default_behavior_http2server_events_total counter +go_godebug_non_default_behavior_http2server_events_total 0 +# HELP go_godebug_non_default_behavior_installgoroot_events_total The number of non-default behaviors executed by the go/build package due to a non-default GODEBUG=installgoroot=... setting. +# TYPE go_godebug_non_default_behavior_installgoroot_events_total counter +go_godebug_non_default_behavior_installgoroot_events_total 0 +# HELP go_godebug_non_default_behavior_jstmpllitinterp_events_total The number of non-default behaviors executed by the html/template package due to a non-default GODEBUG=jstmpllitinterp=... setting. +# TYPE go_godebug_non_default_behavior_jstmpllitinterp_events_total counter +go_godebug_non_default_behavior_jstmpllitinterp_events_total 0 +# HELP go_godebug_non_default_behavior_multipartmaxheaders_events_total The number of non-default behaviors executed by the mime/multipart package due to a non-default GODEBUG=multipartmaxheaders=... setting. +# TYPE go_godebug_non_default_behavior_multipartmaxheaders_events_total counter +go_godebug_non_default_behavior_multipartmaxheaders_events_total 0 +# HELP go_godebug_non_default_behavior_multipartmaxparts_events_total The number of non-default behaviors executed by the mime/multipart package due to a non-default GODEBUG=multipartmaxparts=... setting. +# TYPE go_godebug_non_default_behavior_multipartmaxparts_events_total counter +go_godebug_non_default_behavior_multipartmaxparts_events_total 0 +# HELP go_godebug_non_default_behavior_multipathtcp_events_total The number of non-default behaviors executed by the net package due to a non-default GODEBUG=multipathtcp=... setting. +# TYPE go_godebug_non_default_behavior_multipathtcp_events_total counter +go_godebug_non_default_behavior_multipathtcp_events_total 0 +# HELP go_godebug_non_default_behavior_panicnil_events_total The number of non-default behaviors executed by the runtime package due to a non-default GODEBUG=panicnil=... setting. +# TYPE go_godebug_non_default_behavior_panicnil_events_total counter +go_godebug_non_default_behavior_panicnil_events_total 0 +# HELP go_godebug_non_default_behavior_randautoseed_events_total The number of non-default behaviors executed by the math/rand package due to a non-default GODEBUG=randautoseed=... setting. +# TYPE go_godebug_non_default_behavior_randautoseed_events_total counter +go_godebug_non_default_behavior_randautoseed_events_total 0 +# HELP go_godebug_non_default_behavior_tarinsecurepath_events_total The number of non-default behaviors executed by the archive/tar package due to a non-default GODEBUG=tarinsecurepath=... setting. +# TYPE go_godebug_non_default_behavior_tarinsecurepath_events_total counter +go_godebug_non_default_behavior_tarinsecurepath_events_total 0 +# HELP go_godebug_non_default_behavior_tlsmaxrsasize_events_total The number of non-default behaviors executed by the crypto/tls package due to a non-default GODEBUG=tlsmaxrsasize=... setting. +# TYPE go_godebug_non_default_behavior_tlsmaxrsasize_events_total counter +go_godebug_non_default_behavior_tlsmaxrsasize_events_total 0 +# HELP go_godebug_non_default_behavior_x509sha1_events_total The number of non-default behaviors executed by the crypto/x509 package due to a non-default GODEBUG=x509sha1=... setting. +# TYPE go_godebug_non_default_behavior_x509sha1_events_total counter +go_godebug_non_default_behavior_x509sha1_events_total 0 +# HELP go_godebug_non_default_behavior_x509usefallbackroots_events_total The number of non-default behaviors executed by the crypto/x509 package due to a non-default GODEBUG=x509usefallbackroots=... setting. +# TYPE go_godebug_non_default_behavior_x509usefallbackroots_events_total counter +go_godebug_non_default_behavior_x509usefallbackroots_events_total 0 +# HELP go_godebug_non_default_behavior_zipinsecurepath_events_total The number of non-default behaviors executed by the archive/zip package due to a non-default GODEBUG=zipinsecurepath=... setting. +# TYPE go_godebug_non_default_behavior_zipinsecurepath_events_total counter +go_godebug_non_default_behavior_zipinsecurepath_events_total 0 # HELP go_goroutines Number of goroutines that currently exist. # TYPE go_goroutines gauge -go_goroutines 52 +go_goroutines 51 # HELP go_info Information about the Go environment. # TYPE go_info gauge -go_info{version="go1.20.7"} 1 +go_info{version="go1.21.5"} 1 # HELP go_memory_classes_heap_free_bytes Memory that is completely free and eligible to be returned to the underlying system, but has not been. This metric is the runtime's estimate of free address space that is backed by physical memory. # TYPE go_memory_classes_heap_free_bytes gauge -go_memory_classes_heap_free_bytes 794624 +go_memory_classes_heap_free_bytes 5.3248e+06 # HELP go_memory_classes_heap_objects_bytes Memory occupied by live objects and dead objects that have not yet been marked free by the garbage collector. # TYPE go_memory_classes_heap_objects_bytes gauge -go_memory_classes_heap_objects_bytes 1.1433752e+07 +go_memory_classes_heap_objects_bytes 8.269304e+06 # HELP go_memory_classes_heap_released_bytes Memory that is completely free and has been returned to the underlying system. This metric is the runtime's estimate of free address space that is still mapped into the process, but is not backed by physical memory. # TYPE go_memory_classes_heap_released_bytes gauge -go_memory_classes_heap_released_bytes 4.128768e+06 -# HELP go_memory_classes_heap_stacks_bytes Memory allocated from the heap that is reserved for stack space, whether or not it is currently in-use. +go_memory_classes_heap_released_bytes 2.015232e+06 +# HELP go_memory_classes_heap_stacks_bytes Memory allocated from the heap that is reserved for stack space, whether or not it is currently in-use. Currently, this represents all stack memory for goroutines. It also includes all OS thread stacks in non-cgo programs. Note that stacks may be allocated differently in the future, and this may change. # TYPE go_memory_classes_heap_stacks_bytes gauge -go_memory_classes_heap_stacks_bytes 1.245184e+06 +go_memory_classes_heap_stacks_bytes 1.835008e+06 # HELP go_memory_classes_heap_unused_bytes Memory that is reserved for heap objects but is not currently used to hold heap objects. # TYPE go_memory_classes_heap_unused_bytes gauge -go_memory_classes_heap_unused_bytes 3.369192e+06 +go_memory_classes_heap_unused_bytes 3.527176e+06 # HELP go_memory_classes_metadata_mcache_free_bytes Memory that is reserved for runtime mcache structures, but not in-use. # TYPE go_memory_classes_metadata_mcache_free_bytes gauge -go_memory_classes_metadata_mcache_free_bytes 6000 +go_memory_classes_metadata_mcache_free_bytes 12000 # HELP go_memory_classes_metadata_mcache_inuse_bytes Memory that is occupied by runtime mcache structures that are currently being used. # TYPE go_memory_classes_metadata_mcache_inuse_bytes gauge -go_memory_classes_metadata_mcache_inuse_bytes 9600 +go_memory_classes_metadata_mcache_inuse_bytes 19200 # HELP go_memory_classes_metadata_mspan_free_bytes Memory that is reserved for runtime mspan structures, but not in-use. # TYPE go_memory_classes_metadata_mspan_free_bytes gauge -go_memory_classes_metadata_mspan_free_bytes 19040 +go_memory_classes_metadata_mspan_free_bytes 23016 # HELP go_memory_classes_metadata_mspan_inuse_bytes Memory that is occupied by runtime mspan structures that are currently being used. # TYPE go_memory_classes_metadata_mspan_inuse_bytes gauge -go_memory_classes_metadata_mspan_inuse_bytes 209440 +go_memory_classes_metadata_mspan_inuse_bytes 254016 # HELP go_memory_classes_metadata_other_bytes Memory that is reserved for or used to hold runtime metadata. # TYPE go_memory_classes_metadata_other_bytes gauge -go_memory_classes_metadata_other_bytes 8.790776e+06 -# HELP go_memory_classes_os_stacks_bytes Stack memory allocated by the underlying operating system. +go_memory_classes_metadata_other_bytes 4.555416e+06 +# HELP go_memory_classes_os_stacks_bytes Stack memory allocated by the underlying operating system. In non-cgo programs this metric is currently zero. This may change in the future.In cgo programs this metric includes OS thread stacks allocated directly from the OS. Currently, this only accounts for one stack in c-shared and c-archive build modes, and other sources of stacks from the OS are not measured. This too may change in the future. # TYPE go_memory_classes_os_stacks_bytes gauge go_memory_classes_os_stacks_bytes 0 # HELP go_memory_classes_other_bytes Memory used by execution trace buffers, structures for debugging the runtime, finalizer and profiler specials, and more. # TYPE go_memory_classes_other_bytes gauge -go_memory_classes_other_bytes 1.692383e+06 +go_memory_classes_other_bytes 2.958809e+06 # HELP go_memory_classes_profiling_buckets_bytes Memory that is used by the stack trace hash map used for profiling. # TYPE go_memory_classes_profiling_buckets_bytes gauge -go_memory_classes_profiling_buckets_bytes 1.459649e+06 +go_memory_classes_profiling_buckets_bytes 1.456271e+06 # HELP go_memory_classes_total_bytes All memory mapped by the Go runtime into the current process as read-write. Note that this does not include memory mapped by code called via cgo or via the syscall package. Sum of all metrics in /memory/classes. # TYPE go_memory_classes_total_bytes gauge -go_memory_classes_total_bytes 3.3158408e+07 +go_memory_classes_total_bytes 3.0250248e+07 # HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. # TYPE go_memstats_alloc_bytes gauge -go_memstats_alloc_bytes 1.1433752e+07 +go_memstats_alloc_bytes 8.269304e+06 # HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. # TYPE go_memstats_alloc_bytes_total counter -go_memstats_alloc_bytes_total 2.2809216e+07 +go_memstats_alloc_bytes_total 2.6836248e+07 # HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. # TYPE go_memstats_buck_hash_sys_bytes gauge -go_memstats_buck_hash_sys_bytes 1.459649e+06 +go_memstats_buck_hash_sys_bytes 1.456271e+06 # HELP go_memstats_frees_total Total number of frees. # TYPE go_memstats_frees_total counter -go_memstats_frees_total 97520 +go_memstats_frees_total 136180 # HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. # TYPE go_memstats_gc_sys_bytes gauge -go_memstats_gc_sys_bytes 8.790776e+06 +go_memstats_gc_sys_bytes 4.555416e+06 # HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. # TYPE go_memstats_heap_alloc_bytes gauge -go_memstats_heap_alloc_bytes 1.1433752e+07 +go_memstats_heap_alloc_bytes 8.269304e+06 # HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. # TYPE go_memstats_heap_idle_bytes gauge -go_memstats_heap_idle_bytes 4.923392e+06 +go_memstats_heap_idle_bytes 7.340032e+06 # HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. # TYPE go_memstats_heap_inuse_bytes gauge -go_memstats_heap_inuse_bytes 1.4802944e+07 +go_memstats_heap_inuse_bytes 1.179648e+07 # HELP go_memstats_heap_objects Number of allocated objects. # TYPE go_memstats_heap_objects gauge -go_memstats_heap_objects 45166 +go_memstats_heap_objects 24224 # HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. # TYPE go_memstats_heap_released_bytes gauge -go_memstats_heap_released_bytes 4.128768e+06 +go_memstats_heap_released_bytes 2.015232e+06 # HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. # TYPE go_memstats_heap_sys_bytes gauge -go_memstats_heap_sys_bytes 1.9726336e+07 +go_memstats_heap_sys_bytes 1.9136512e+07 # HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. # TYPE go_memstats_last_gc_time_seconds gauge -go_memstats_last_gc_time_seconds 1.6987525748939798e+09 +go_memstats_last_gc_time_seconds 1.7048971623253e+09 # HELP go_memstats_lookups_total Total number of pointer lookups. # TYPE go_memstats_lookups_total counter go_memstats_lookups_total 0 # HELP go_memstats_mallocs_total Total number of mallocs. # TYPE go_memstats_mallocs_total counter -go_memstats_mallocs_total 142686 +go_memstats_mallocs_total 160404 # HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. # TYPE go_memstats_mcache_inuse_bytes gauge -go_memstats_mcache_inuse_bytes 9600 +go_memstats_mcache_inuse_bytes 19200 # HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. # TYPE go_memstats_mcache_sys_bytes gauge -go_memstats_mcache_sys_bytes 15600 +go_memstats_mcache_sys_bytes 31200 # HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. # TYPE go_memstats_mspan_inuse_bytes gauge -go_memstats_mspan_inuse_bytes 209440 +go_memstats_mspan_inuse_bytes 254016 # HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. # TYPE go_memstats_mspan_sys_bytes gauge -go_memstats_mspan_sys_bytes 228480 +go_memstats_mspan_sys_bytes 277032 # HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. # TYPE go_memstats_next_gc_bytes gauge -go_memstats_next_gc_bytes 1.8213232e+07 +go_memstats_next_gc_bytes 1.7035896e+07 # HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. # TYPE go_memstats_other_sys_bytes gauge -go_memstats_other_sys_bytes 1.692383e+06 +go_memstats_other_sys_bytes 2.958809e+06 # HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. # TYPE go_memstats_stack_inuse_bytes gauge -go_memstats_stack_inuse_bytes 1.245184e+06 +go_memstats_stack_inuse_bytes 1.835008e+06 # HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. # TYPE go_memstats_stack_sys_bytes gauge -go_memstats_stack_sys_bytes 1.245184e+06 +go_memstats_stack_sys_bytes 1.835008e+06 # HELP go_memstats_sys_bytes Number of bytes obtained from system. # TYPE go_memstats_sys_bytes gauge -go_memstats_sys_bytes 3.3158408e+07 +go_memstats_sys_bytes 3.0250248e+07 # HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. # TYPE go_sched_gomaxprocs_threads gauge -go_sched_gomaxprocs_threads 8 +go_sched_gomaxprocs_threads 16 # HELP go_sched_goroutines_goroutines Count of live goroutines. # TYPE go_sched_goroutines_goroutines gauge -go_sched_goroutines_goroutines 52 -# HELP go_sched_latencies_seconds Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running. +go_sched_goroutines_goroutines 51 +# HELP go_sched_latencies_seconds Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running. Bucket counts increase monotonically. # TYPE go_sched_latencies_seconds histogram -go_sched_latencies_seconds_bucket{le="6.399999999999999e-08"} 612 -go_sched_latencies_seconds_bucket{le="6.399999999999999e-07"} 648 -go_sched_latencies_seconds_bucket{le="7.167999999999999e-06"} 691 -go_sched_latencies_seconds_bucket{le="8.191999999999999e-05"} 788 -go_sched_latencies_seconds_bucket{le="0.0009175039999999999"} 823 -go_sched_latencies_seconds_bucket{le="0.010485759999999998"} 825 -go_sched_latencies_seconds_bucket{le="0.11744051199999998"} 825 -go_sched_latencies_seconds_bucket{le="+Inf"} 825 -go_sched_latencies_seconds_sum 0.005427328 -go_sched_latencies_seconds_count 825 +go_sched_latencies_seconds_bucket{le="6.399999999999999e-08"} 1278 +go_sched_latencies_seconds_bucket{le="6.399999999999999e-07"} 1370 +go_sched_latencies_seconds_bucket{le="7.167999999999999e-06"} 1495 +go_sched_latencies_seconds_bucket{le="8.191999999999999e-05"} 1738 +go_sched_latencies_seconds_bucket{le="0.0009175039999999999"} 1803 +go_sched_latencies_seconds_bucket{le="0.010485759999999998"} 1804 +go_sched_latencies_seconds_bucket{le="0.11744051199999998"} 1804 +go_sched_latencies_seconds_bucket{le="+Inf"} 1804 +go_sched_latencies_seconds_sum 0.008070016 +go_sched_latencies_seconds_count 1804 # HELP go_sync_mutex_wait_total_seconds_total Approximate cumulative time goroutines have spent blocked on a sync.Mutex or sync.RWMutex. This metric is useful for identifying global changes in lock contention. Collect a mutex or block profile using the runtime/pprof package for more detailed contention data. # TYPE go_sync_mutex_wait_total_seconds_total counter -go_sync_mutex_wait_total_seconds_total 0 +go_sync_mutex_wait_total_seconds_total 0.000555248 # HELP go_threads Number of OS threads created. # TYPE go_threads gauge -go_threads 10 +go_threads 18 # HELP hidden_metrics_total [BETA] The count of hidden metrics. # TYPE hidden_metrics_total counter hidden_metrics_total 0 @@ -338,86 +416,86 @@ kubeproxy_network_programming_duration_seconds_bucket{le="0"} 0 kubeproxy_network_programming_duration_seconds_bucket{le="0.25"} 0 kubeproxy_network_programming_duration_seconds_bucket{le="0.5"} 1 kubeproxy_network_programming_duration_seconds_bucket{le="1"} 1 -kubeproxy_network_programming_duration_seconds_bucket{le="2"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="3"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="4"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="5"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="6"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="7"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="8"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="9"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="10"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="11"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="12"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="13"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="14"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="15"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="16"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="17"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="18"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="19"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="20"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="21"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="22"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="23"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="24"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="25"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="26"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="27"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="28"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="29"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="30"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="31"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="32"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="33"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="34"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="35"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="36"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="37"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="38"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="39"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="40"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="41"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="42"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="43"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="44"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="45"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="46"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="47"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="48"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="49"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="50"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="51"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="52"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="53"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="54"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="55"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="56"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="57"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="58"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="59"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="60"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="65"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="70"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="75"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="80"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="85"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="90"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="95"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="100"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="105"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="110"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="115"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="120"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="150"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="180"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="210"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="240"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="270"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="300"} 3 -kubeproxy_network_programming_duration_seconds_bucket{le="+Inf"} 3 -kubeproxy_network_programming_duration_seconds_sum 3.693176402 -kubeproxy_network_programming_duration_seconds_count 3 +kubeproxy_network_programming_duration_seconds_bucket{le="2"} 1 +kubeproxy_network_programming_duration_seconds_bucket{le="3"} 1 +kubeproxy_network_programming_duration_seconds_bucket{le="4"} 1 +kubeproxy_network_programming_duration_seconds_bucket{le="5"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="6"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="7"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="8"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="9"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="10"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="11"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="12"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="13"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="14"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="15"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="16"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="17"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="18"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="19"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="20"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="21"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="22"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="23"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="24"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="25"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="26"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="27"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="28"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="29"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="30"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="31"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="32"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="33"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="34"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="35"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="36"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="37"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="38"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="39"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="40"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="41"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="42"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="43"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="44"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="45"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="46"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="47"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="48"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="49"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="50"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="51"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="52"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="53"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="54"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="55"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="56"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="57"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="58"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="59"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="60"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="65"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="70"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="75"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="80"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="85"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="90"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="95"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="100"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="105"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="110"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="115"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="120"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="150"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="180"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="210"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="240"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="270"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="300"} 2 +kubeproxy_network_programming_duration_seconds_bucket{le="+Inf"} 2 +kubeproxy_network_programming_duration_seconds_sum 4.633884953 +kubeproxy_network_programming_duration_seconds_count 2 # HELP kubeproxy_sync_full_proxy_rules_duration_seconds [ALPHA] SyncProxyRules latency in seconds for full resyncs # TYPE kubeproxy_sync_full_proxy_rules_duration_seconds histogram kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="0.001"} 0 @@ -427,7 +505,7 @@ kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="0.008"} 0 kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="0.016"} 0 kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="0.032"} 0 kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="0.064"} 0 -kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="0.128"} 1 +kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="0.128"} 2 kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="0.256"} 2 kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="0.512"} 2 kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="1.024"} 2 @@ -436,7 +514,7 @@ kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="4.096"} 2 kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="8.192"} 2 kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="16.384"} 2 kubeproxy_sync_full_proxy_rules_duration_seconds_bucket{le="+Inf"} 2 -kubeproxy_sync_full_proxy_rules_duration_seconds_sum 0.29591801 +kubeproxy_sync_full_proxy_rules_duration_seconds_sum 0.24495503000000002 kubeproxy_sync_full_proxy_rules_duration_seconds_count 2 # HELP kubeproxy_sync_partial_proxy_rules_duration_seconds [ALPHA] SyncProxyRules latency in seconds for partial resyncs # TYPE kubeproxy_sync_partial_proxy_rules_duration_seconds histogram @@ -444,46 +522,46 @@ kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.001"} 0 kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.002"} 0 kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.004"} 0 kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.008"} 0 -kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.016"} 3 -kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.032"} 4 -kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.064"} 4 -kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.128"} 4 -kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.256"} 4 -kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.512"} 4 -kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="1.024"} 4 -kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="2.048"} 4 -kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="4.096"} 4 -kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="8.192"} 4 -kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="16.384"} 4 -kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="+Inf"} 4 -kubeproxy_sync_partial_proxy_rules_duration_seconds_sum 0.053586814 -kubeproxy_sync_partial_proxy_rules_duration_seconds_count 4 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.016"} 0 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.032"} 1 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.064"} 2 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.128"} 2 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.256"} 2 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="0.512"} 2 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="1.024"} 2 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="2.048"} 2 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="4.096"} 2 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="8.192"} 2 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="16.384"} 2 +kubeproxy_sync_partial_proxy_rules_duration_seconds_bucket{le="+Inf"} 2 +kubeproxy_sync_partial_proxy_rules_duration_seconds_sum 0.052629146 +kubeproxy_sync_partial_proxy_rules_duration_seconds_count 2 # HELP kubeproxy_sync_proxy_rules_duration_seconds [ALPHA] SyncProxyRules latency in seconds # TYPE kubeproxy_sync_proxy_rules_duration_seconds histogram kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.001"} 0 kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.002"} 0 kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.004"} 0 kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.008"} 0 -kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.016"} 3 -kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.032"} 4 -kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.064"} 4 -kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.128"} 5 -kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.256"} 6 -kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.512"} 6 -kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="1.024"} 6 -kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="2.048"} 6 -kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="4.096"} 6 -kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="8.192"} 6 -kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="16.384"} 6 -kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="+Inf"} 6 -kubeproxy_sync_proxy_rules_duration_seconds_sum 0.349493379 -kubeproxy_sync_proxy_rules_duration_seconds_count 6 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.016"} 0 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.032"} 1 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.064"} 2 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.128"} 4 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.256"} 4 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="0.512"} 4 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="1.024"} 4 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="2.048"} 4 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="4.096"} 4 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="8.192"} 4 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="16.384"} 4 +kubeproxy_sync_proxy_rules_duration_seconds_bucket{le="+Inf"} 4 +kubeproxy_sync_proxy_rules_duration_seconds_sum 0.297575151 +kubeproxy_sync_proxy_rules_duration_seconds_count 4 # HELP kubeproxy_sync_proxy_rules_endpoint_changes_pending [ALPHA] Pending proxy rules Endpoint changes # TYPE kubeproxy_sync_proxy_rules_endpoint_changes_pending gauge kubeproxy_sync_proxy_rules_endpoint_changes_pending 0 # HELP kubeproxy_sync_proxy_rules_endpoint_changes_total [ALPHA] Cumulative proxy rules Endpoint changes # TYPE kubeproxy_sync_proxy_rules_endpoint_changes_total counter -kubeproxy_sync_proxy_rules_endpoint_changes_total 6 +kubeproxy_sync_proxy_rules_endpoint_changes_total 8 # HELP kubeproxy_sync_proxy_rules_iptables_last [ALPHA] Number of iptables rules written by kube-proxy in last sync # TYPE kubeproxy_sync_proxy_rules_iptables_last gauge kubeproxy_sync_proxy_rules_iptables_last{table="filter"} 4 @@ -500,10 +578,10 @@ kubeproxy_sync_proxy_rules_iptables_total{table="filter"} 4 kubeproxy_sync_proxy_rules_iptables_total{table="nat"} 34 # HELP kubeproxy_sync_proxy_rules_last_queued_timestamp_seconds [ALPHA] The last time a sync of proxy rules was queued # TYPE kubeproxy_sync_proxy_rules_last_queued_timestamp_seconds gauge -kubeproxy_sync_proxy_rules_last_queued_timestamp_seconds 1.698752422359788e+09 +kubeproxy_sync_proxy_rules_last_queued_timestamp_seconds 1.704894799314416e+09 # HELP kubeproxy_sync_proxy_rules_last_timestamp_seconds [ALPHA] The last time proxy rules were successfully synced # TYPE kubeproxy_sync_proxy_rules_last_timestamp_seconds gauge -kubeproxy_sync_proxy_rules_last_timestamp_seconds 1.698752422369931e+09 +kubeproxy_sync_proxy_rules_last_timestamp_seconds 1.704894799332805e+09 # HELP kubeproxy_sync_proxy_rules_no_local_endpoints_total [ALPHA] Number of services with a Local traffic policy and no endpoints # TYPE kubeproxy_sync_proxy_rules_no_local_endpoints_total gauge kubeproxy_sync_proxy_rules_no_local_endpoints_total{traffic_policy="external"} 0 @@ -513,14 +591,14 @@ kubeproxy_sync_proxy_rules_no_local_endpoints_total{traffic_policy="internal"} 0 kubeproxy_sync_proxy_rules_service_changes_pending 0 # HELP kubeproxy_sync_proxy_rules_service_changes_total [ALPHA] Cumulative proxy rules Service changes # TYPE kubeproxy_sync_proxy_rules_service_changes_total counter -kubeproxy_sync_proxy_rules_service_changes_total 4 +kubeproxy_sync_proxy_rules_service_changes_total 12 # HELP kubernetes_build_info [ALPHA] A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running. # TYPE kubernetes_build_info gauge -kubernetes_build_info{build_date="2023-08-15T21:24:51Z",compiler="gc",git_commit="855e7c48de7388eb330da0f8d9d2394ee818fb8d",git_tree_state="clean",git_version="v1.28.0",go_version="go1.20.7",major="1",minor="28",platform="linux/amd64"} 1 +kubernetes_build_info{build_date="2023-12-14T19:18:17Z",compiler="gc",git_commit="3f7a50f38688eb332e2a1b013678c6435d539ae6",git_tree_state="clean",git_version="v1.29.0",go_version="go1.21.5",major="1",minor="29",platform="linux/amd64"} 1 # HELP kubernetes_feature_enabled [BETA] This metric records the data about the stage and enablement of a k8s feature. # TYPE kubernetes_feature_enabled gauge -kubernetes_feature_enabled{name="APIListChunking",stage="BETA"} 1 -kubernetes_feature_enabled{name="APIPriorityAndFairness",stage="BETA"} 1 +kubernetes_feature_enabled{name="APIListChunking",stage=""} 1 +kubernetes_feature_enabled{name="APIPriorityAndFairness",stage=""} 1 kubernetes_feature_enabled{name="APIResponseCompression",stage="BETA"} 1 kubernetes_feature_enabled{name="APISelfSubjectReview",stage=""} 1 kubernetes_feature_enabled{name="APIServerIdentity",stage="BETA"} 1 @@ -529,6 +607,7 @@ kubernetes_feature_enabled{name="AdmissionWebhookMatchConditions",stage="BETA"} kubernetes_feature_enabled{name="AggregatedDiscoveryEndpoint",stage="BETA"} 1 kubernetes_feature_enabled{name="AllAlpha",stage="ALPHA"} 0 kubernetes_feature_enabled{name="AllBeta",stage="BETA"} 0 +kubernetes_feature_enabled{name="AllowServiceLBStatusOnNonLB",stage="DEPRECATED"} 0 kubernetes_feature_enabled{name="AnyVolumeDataSource",stage="BETA"} 1 kubernetes_feature_enabled{name="AppArmor",stage="BETA"} 1 kubernetes_feature_enabled{name="CPUManager",stage=""} 1 @@ -539,27 +618,26 @@ kubernetes_feature_enabled{name="CRDValidationRatcheting",stage="ALPHA"} 0 kubernetes_feature_enabled{name="CSIMigrationAzureFile",stage=""} 1 kubernetes_feature_enabled{name="CSIMigrationPortworx",stage="BETA"} 0 kubernetes_feature_enabled{name="CSIMigrationRBD",stage="DEPRECATED"} 0 -kubernetes_feature_enabled{name="CSIMigrationvSphere",stage=""} 1 -kubernetes_feature_enabled{name="CSINodeExpandSecret",stage="BETA"} 1 +kubernetes_feature_enabled{name="CSINodeExpandSecret",stage=""} 1 kubernetes_feature_enabled{name="CSIVolumeHealth",stage="ALPHA"} 0 kubernetes_feature_enabled{name="CloudControllerManagerWebhook",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="CloudDualStackNodeIPs",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="CloudDualStackNodeIPs",stage="BETA"} 1 kubernetes_feature_enabled{name="ClusterTrustBundle",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ClusterTrustBundleProjection",stage="ALPHA"} 0 kubernetes_feature_enabled{name="ComponentSLIs",stage="BETA"} 1 kubernetes_feature_enabled{name="ConsistentHTTPGetHandlers",stage=""} 1 kubernetes_feature_enabled{name="ConsistentListFromCache",stage="ALPHA"} 0 kubernetes_feature_enabled{name="ContainerCheckpoint",stage="ALPHA"} 0 kubernetes_feature_enabled{name="ContextualLogging",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="CronJobTimeZone",stage=""} 1 kubernetes_feature_enabled{name="CronJobsScheduledAnnotation",stage="BETA"} 1 kubernetes_feature_enabled{name="CrossNamespaceVolumeDataSource",stage="ALPHA"} 0 kubernetes_feature_enabled{name="CustomCPUCFSQuotaPeriod",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="CustomResourceValidationExpressions",stage="BETA"} 1 +kubernetes_feature_enabled{name="CustomResourceValidationExpressions",stage=""} 1 kubernetes_feature_enabled{name="DefaultHostNetworkHostPortsInPodTemplates",stage="DEPRECATED"} 0 -kubernetes_feature_enabled{name="DevicePluginCDIDevices",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="DisableCloudProviders",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="DisableKubeletCloudCredentialProviders",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="DownwardAPIHugePages",stage=""} 1 +kubernetes_feature_enabled{name="DevicePluginCDIDevices",stage="BETA"} 1 +kubernetes_feature_enabled{name="DisableCloudProviders",stage="BETA"} 1 +kubernetes_feature_enabled{name="DisableKubeletCloudCredentialProviders",stage="BETA"} 1 +kubernetes_feature_enabled{name="DisableNodeKubeProxyVersion",stage="ALPHA"} 0 kubernetes_feature_enabled{name="DynamicResourceAllocation",stage="ALPHA"} 0 kubernetes_feature_enabled{name="EfficientWatchResumption",stage=""} 1 kubernetes_feature_enabled{name="ElasticIndexedJob",stage="BETA"} 1 @@ -567,13 +645,13 @@ kubernetes_feature_enabled{name="EventedPLEG",stage="BETA"} 0 kubernetes_feature_enabled{name="ExecProbeTimeout",stage=""} 1 kubernetes_feature_enabled{name="ExpandedDNSConfig",stage=""} 1 kubernetes_feature_enabled{name="ExperimentalHostUserNamespaceDefaulting",stage="DEPRECATED"} 0 -kubernetes_feature_enabled{name="GRPCContainerProbe",stage=""} 1 kubernetes_feature_enabled{name="GracefulNodeShutdown",stage="BETA"} 1 kubernetes_feature_enabled{name="GracefulNodeShutdownBasedOnPodPriority",stage="BETA"} 1 kubernetes_feature_enabled{name="HPAContainerMetrics",stage="BETA"} 1 kubernetes_feature_enabled{name="HPAScaleToZero",stage="ALPHA"} 0 kubernetes_feature_enabled{name="HonorPVReclaimPolicy",stage="ALPHA"} 0 kubernetes_feature_enabled{name="IPTablesOwnershipCleanup",stage=""} 1 +kubernetes_feature_enabled{name="ImageMaximumGCAge",stage="ALPHA"} 0 kubernetes_feature_enabled{name="InPlacePodVerticalScaling",stage="ALPHA"} 0 kubernetes_feature_enabled{name="InTreePluginAWSUnregister",stage="ALPHA"} 0 kubernetes_feature_enabled{name="InTreePluginAzureDiskUnregister",stage="ALPHA"} 0 @@ -583,15 +661,13 @@ kubernetes_feature_enabled{name="InTreePluginOpenStackUnregister",stage="ALPHA"} kubernetes_feature_enabled{name="InTreePluginPortworxUnregister",stage="ALPHA"} 0 kubernetes_feature_enabled{name="InTreePluginRBDUnregister",stage="DEPRECATED"} 0 kubernetes_feature_enabled{name="InTreePluginvSphereUnregister",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="JobBackoffLimitPerIndex",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="JobMutableNodeSchedulingDirectives",stage=""} 1 +kubernetes_feature_enabled{name="JobBackoffLimitPerIndex",stage="BETA"} 1 kubernetes_feature_enabled{name="JobPodFailurePolicy",stage="BETA"} 1 -kubernetes_feature_enabled{name="JobPodReplacementPolicy",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="JobReadyPods",stage="BETA"} 1 -kubernetes_feature_enabled{name="JobTrackingWithFinalizers",stage=""} 1 -kubernetes_feature_enabled{name="KMSv1",stage="DEPRECATED"} 1 -kubernetes_feature_enabled{name="KMSv2",stage="BETA"} 1 -kubernetes_feature_enabled{name="KMSv2KDF",stage="BETA"} 0 +kubernetes_feature_enabled{name="JobPodReplacementPolicy",stage="BETA"} 1 +kubernetes_feature_enabled{name="JobReadyPods",stage=""} 1 +kubernetes_feature_enabled{name="KMSv1",stage="DEPRECATED"} 0 +kubernetes_feature_enabled{name="KMSv2",stage=""} 1 +kubernetes_feature_enabled{name="KMSv2KDF",stage=""} 1 kubernetes_feature_enabled{name="KubeProxyDrainingTerminatingNodes",stage="ALPHA"} 0 kubernetes_feature_enabled{name="KubeletCgroupDriverFromCRI",stage="ALPHA"} 0 kubernetes_feature_enabled{name="KubeletInUserNamespace",stage="ALPHA"} 0 @@ -599,103 +675,114 @@ kubernetes_feature_enabled{name="KubeletPodResources",stage=""} 1 kubernetes_feature_enabled{name="KubeletPodResourcesDynamicResources",stage="ALPHA"} 0 kubernetes_feature_enabled{name="KubeletPodResourcesGet",stage="ALPHA"} 0 kubernetes_feature_enabled{name="KubeletPodResourcesGetAllocatable",stage=""} 1 +kubernetes_feature_enabled{name="KubeletSeparateDiskGC",stage="ALPHA"} 0 kubernetes_feature_enabled{name="KubeletTracing",stage="BETA"} 1 -kubernetes_feature_enabled{name="LegacyServiceAccountTokenCleanUp",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="LegacyServiceAccountTokenNoAutoGeneration",stage=""} 1 +kubernetes_feature_enabled{name="LegacyServiceAccountTokenCleanUp",stage="BETA"} 1 kubernetes_feature_enabled{name="LegacyServiceAccountTokenTracking",stage=""} 1 +kubernetes_feature_enabled{name="LoadBalancerIPMode",stage="ALPHA"} 0 kubernetes_feature_enabled{name="LocalStorageCapacityIsolationFSQuotaMonitoring",stage="ALPHA"} 0 kubernetes_feature_enabled{name="LogarithmicScaleDown",stage="BETA"} 1 kubernetes_feature_enabled{name="LoggingAlphaOptions",stage="ALPHA"} 0 kubernetes_feature_enabled{name="LoggingBetaOptions",stage="BETA"} 1 +kubernetes_feature_enabled{name="MatchLabelKeysInPodAffinity",stage="ALPHA"} 0 kubernetes_feature_enabled{name="MatchLabelKeysInPodTopologySpread",stage="BETA"} 1 kubernetes_feature_enabled{name="MaxUnavailableStatefulSet",stage="ALPHA"} 0 kubernetes_feature_enabled{name="MemoryManager",stage="BETA"} 1 kubernetes_feature_enabled{name="MemoryQoS",stage="ALPHA"} 0 kubernetes_feature_enabled{name="MinDomainsInPodTopologySpread",stage="BETA"} 1 kubernetes_feature_enabled{name="MinimizeIPTablesRestore",stage=""} 1 -kubernetes_feature_enabled{name="MultiCIDRRangeAllocator",stage="ALPHA"} 0 kubernetes_feature_enabled{name="MultiCIDRServiceAllocator",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="NFTablesProxyMode",stage="ALPHA"} 0 kubernetes_feature_enabled{name="NewVolumeManagerReconstruction",stage="BETA"} 1 kubernetes_feature_enabled{name="NodeInclusionPolicyInPodTopologySpread",stage="BETA"} 1 kubernetes_feature_enabled{name="NodeLogQuery",stage="ALPHA"} 0 kubernetes_feature_enabled{name="NodeOutOfServiceVolumeDetach",stage=""} 1 kubernetes_feature_enabled{name="NodeSwap",stage="BETA"} 0 kubernetes_feature_enabled{name="OpenAPIEnums",stage="BETA"} 1 -kubernetes_feature_enabled{name="OpenAPIV3",stage=""} 1 kubernetes_feature_enabled{name="PDBUnhealthyPodEvictionPolicy",stage="BETA"} 1 -kubernetes_feature_enabled{name="PersistentVolumeLastPhaseTransitionTime",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="PersistentVolumeLastPhaseTransitionTime",stage="BETA"} 1 kubernetes_feature_enabled{name="PodAndContainerStatsFromCRI",stage="ALPHA"} 0 kubernetes_feature_enabled{name="PodDeletionCost",stage="BETA"} 1 kubernetes_feature_enabled{name="PodDisruptionConditions",stage="BETA"} 1 -kubernetes_feature_enabled{name="PodHostIPs",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="PodHostIPs",stage="BETA"} 1 kubernetes_feature_enabled{name="PodIndexLabel",stage="BETA"} 1 -kubernetes_feature_enabled{name="PodReadyToStartContainersCondition",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="PodLifecycleSleepAction",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="PodReadyToStartContainersCondition",stage="BETA"} 1 kubernetes_feature_enabled{name="PodSchedulingReadiness",stage="BETA"} 1 -kubernetes_feature_enabled{name="ProbeTerminationGracePeriod",stage=""} 1 kubernetes_feature_enabled{name="ProcMountType",stage="ALPHA"} 0 kubernetes_feature_enabled{name="ProxyTerminatingEndpoints",stage=""} 1 kubernetes_feature_enabled{name="QOSReserved",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="ReadWriteOncePod",stage="BETA"} 1 +kubernetes_feature_enabled{name="ReadWriteOncePod",stage=""} 1 kubernetes_feature_enabled{name="RecoverVolumeExpansionFailure",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="RemainingItemCount",stage="BETA"} 1 +kubernetes_feature_enabled{name="RemainingItemCount",stage=""} 1 kubernetes_feature_enabled{name="RemoveSelfLink",stage=""} 1 -kubernetes_feature_enabled{name="RetroactiveDefaultStorageClass",stage=""} 1 kubernetes_feature_enabled{name="RotateKubeletServerCertificate",stage="BETA"} 1 +kubernetes_feature_enabled{name="RuntimeClassInImageCriApi",stage="ALPHA"} 0 kubernetes_feature_enabled{name="SELinuxMountReadWriteOncePod",stage="BETA"} 1 -kubernetes_feature_enabled{name="SchedulerQueueingHints",stage="BETA"} 1 -kubernetes_feature_enabled{name="SeccompDefault",stage=""} 1 +kubernetes_feature_enabled{name="SchedulerQueueingHints",stage="BETA"} 0 kubernetes_feature_enabled{name="SecurityContextDeny",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="SeparateTaintEvictionController",stage="BETA"} 1 kubernetes_feature_enabled{name="ServerSideApply",stage=""} 1 kubernetes_feature_enabled{name="ServerSideFieldValidation",stage=""} 1 -kubernetes_feature_enabled{name="ServiceNodePortStaticSubrange",stage="BETA"} 1 -kubernetes_feature_enabled{name="SidecarContainers",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceAccountTokenJTI",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceAccountTokenNodeBinding",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceAccountTokenNodeBindingValidation",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceAccountTokenPodNodeInfo",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceNodePortStaticSubrange",stage=""} 1 +kubernetes_feature_enabled{name="SidecarContainers",stage="BETA"} 1 kubernetes_feature_enabled{name="SizeMemoryBackedVolumes",stage="BETA"} 1 -kubernetes_feature_enabled{name="SkipReadOnlyValidationGCE",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="SkipReadOnlyValidationGCE",stage="DEPRECATED"} 1 kubernetes_feature_enabled{name="StableLoadBalancerNodeSet",stage="BETA"} 1 kubernetes_feature_enabled{name="StatefulSetAutoDeletePVC",stage="BETA"} 1 kubernetes_feature_enabled{name="StatefulSetStartOrdinal",stage="BETA"} 1 kubernetes_feature_enabled{name="StorageVersionAPI",stage="ALPHA"} 0 kubernetes_feature_enabled{name="StorageVersionHash",stage="BETA"} 1 +kubernetes_feature_enabled{name="StructuredAuthenticationConfiguration",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="StructuredAuthorizationConfiguration",stage="ALPHA"} 0 kubernetes_feature_enabled{name="TopologyAwareHints",stage="BETA"} 1 -kubernetes_feature_enabled{name="TopologyManager",stage=""} 1 kubernetes_feature_enabled{name="TopologyManagerPolicyAlphaOptions",stage="ALPHA"} 0 kubernetes_feature_enabled{name="TopologyManagerPolicyBetaOptions",stage="BETA"} 1 kubernetes_feature_enabled{name="TopologyManagerPolicyOptions",stage="BETA"} 1 +kubernetes_feature_enabled{name="TranslateStreamCloseWebsocketRequests",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="UnauthenticatedHTTP2DOSMitigation",stage="BETA"} 1 kubernetes_feature_enabled{name="UnknownVersionInteroperabilityProxy",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="UserNamespacesPodSecurityStandards",stage="ALPHA"} 0 kubernetes_feature_enabled{name="UserNamespacesSupport",stage="ALPHA"} 0 kubernetes_feature_enabled{name="ValidatingAdmissionPolicy",stage="BETA"} 0 +kubernetes_feature_enabled{name="VolumeAttributesClass",stage="ALPHA"} 0 kubernetes_feature_enabled{name="VolumeCapacityPriority",stage="ALPHA"} 0 kubernetes_feature_enabled{name="WatchBookmark",stage=""} 1 kubernetes_feature_enabled{name="WatchList",stage="ALPHA"} 0 kubernetes_feature_enabled{name="WinDSR",stage="ALPHA"} 0 kubernetes_feature_enabled{name="WinOverlay",stage="BETA"} 1 kubernetes_feature_enabled{name="WindowsHostNetwork",stage="ALPHA"} 1 +kubernetes_feature_enabled{name="ZeroLimitedNominalConcurrencyShares",stage="BETA"} 0 # HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. # TYPE process_cpu_seconds_total counter -process_cpu_seconds_total 0.4 +process_cpu_seconds_total 1.06 # HELP process_max_fds Maximum number of open file descriptors. # TYPE process_max_fds gauge process_max_fds 1.048576e+06 # HELP process_open_fds Number of open file descriptors. # TYPE process_open_fds gauge -process_open_fds 12 +process_open_fds 11 # HELP process_resident_memory_bytes Resident memory size in bytes. # TYPE process_resident_memory_bytes gauge -process_resident_memory_bytes 4.6661632e+07 +process_resident_memory_bytes 6.4688128e+07 # HELP process_start_time_seconds Start time of the process since unix epoch in seconds. # TYPE process_start_time_seconds gauge -process_start_time_seconds 1.69875240824e+09 +process_start_time_seconds 1.70489479177e+09 # HELP process_virtual_memory_bytes Virtual memory size in bytes. # TYPE process_virtual_memory_bytes gauge -process_virtual_memory_bytes 7.85973248e+08 +process_virtual_memory_bytes 1.315946496e+09 # HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. # TYPE process_virtual_memory_max_bytes gauge process_virtual_memory_max_bytes 1.8446744073709552e+19 # HELP registered_metrics_total [BETA] The count of registered metrics broken by stability level and deprecation version. # TYPE registered_metrics_total counter -registered_metrics_total{deprecated_version="",stability_level="ALPHA"} 83 -registered_metrics_total{deprecated_version="",stability_level="BETA"} 6 -registered_metrics_total{deprecated_version="",stability_level="STABLE"} 3 +registered_metrics_total{deprecated_version="",stability_level="ALPHA"} 88 +registered_metrics_total{deprecated_version="",stability_level="BETA"} 4 +registered_metrics_total{deprecated_version="",stability_level="STABLE"} 5 # HELP rest_client_exec_plugin_certificate_rotation_age [ALPHA] Histogram of the number of seconds the last auth exec plugin client certificate lived before being rotated. If auth exec plugin client certificates are unused, histogram will contain no data. # TYPE rest_client_exec_plugin_certificate_rotation_age histogram rest_client_exec_plugin_certificate_rotation_age_bucket{le="600"} 0 @@ -730,7 +817,7 @@ rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443", rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="30"} 4 rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="60"} 4 rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="+Inf"} 4 -rest_client_rate_limiter_duration_seconds_sum{host="kind-control-plane:6443",verb="GET"} 1.1491e-05 +rest_client_rate_limiter_duration_seconds_sum{host="kind-control-plane:6443",verb="GET"} 1.4624e-05 rest_client_rate_limiter_duration_seconds_count{host="kind-control-plane:6443",verb="GET"} 4 rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="0.005"} 1 rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="0.025"} 1 @@ -745,7 +832,7 @@ rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443", rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="30"} 1 rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="60"} 1 rest_client_rate_limiter_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="+Inf"} 1 -rest_client_rate_limiter_duration_seconds_sum{host="kind-control-plane:6443",verb="POST"} 2.4383e-05 +rest_client_rate_limiter_duration_seconds_sum{host="kind-control-plane:6443",verb="POST"} 2.725e-06 rest_client_rate_limiter_duration_seconds_count{host="kind-control-plane:6443",verb="POST"} 1 # HELP rest_client_request_duration_seconds [ALPHA] Request latency in seconds. Broken down by verb, and host. # TYPE rest_client_request_duration_seconds histogram @@ -762,7 +849,7 @@ rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb= rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="30"} 4 rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="60"} 4 rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="GET",le="+Inf"} 4 -rest_client_request_duration_seconds_sum{host="kind-control-plane:6443",verb="GET"} 0.024854599 +rest_client_request_duration_seconds_sum{host="kind-control-plane:6443",verb="GET"} 0.025656880000000003 rest_client_request_duration_seconds_count{host="kind-control-plane:6443",verb="GET"} 4 rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="0.005"} 0 rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="0.025"} 1 @@ -777,7 +864,7 @@ rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb= rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="30"} 1 rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="60"} 1 rest_client_request_duration_seconds_bucket{host="kind-control-plane:6443",verb="POST",le="+Inf"} 1 -rest_client_request_duration_seconds_sum{host="kind-control-plane:6443",verb="POST"} 0.012964325 +rest_client_request_duration_seconds_sum{host="kind-control-plane:6443",verb="POST"} 0.018671026 rest_client_request_duration_seconds_count{host="kind-control-plane:6443",verb="POST"} 1 # HELP rest_client_request_size_bytes [ALPHA] Request size in bytes. Broken down by verb and host. # TYPE rest_client_request_size_bytes histogram @@ -811,7 +898,7 @@ rest_client_request_size_bytes_sum{host="kind-control-plane:6443",verb="POST"} 2 rest_client_request_size_bytes_count{host="kind-control-plane:6443",verb="POST"} 1 # HELP rest_client_requests_total [ALPHA] Number of HTTP requests, partitioned by status code, method, and host. # TYPE rest_client_requests_total counter -rest_client_requests_total{code="200",host="kind-control-plane:6443",method="GET"} 7 +rest_client_requests_total{code="200",host="kind-control-plane:6443",method="GET"} 22 rest_client_requests_total{code="201",host="kind-control-plane:6443",method="POST"} 1 # HELP rest_client_response_size_bytes [ALPHA] Response size in bytes. Broken down by verb and host. # TYPE rest_client_response_size_bytes histogram @@ -827,7 +914,7 @@ rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="GET" rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="4.194304e+06"} 4 rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="1.6777216e+07"} 4 rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="GET",le="+Inf"} 4 -rest_client_response_size_bytes_sum{host="kind-control-plane:6443",verb="GET"} 10992 +rest_client_response_size_bytes_sum{host="kind-control-plane:6443",verb="GET"} 10814 rest_client_response_size_bytes_count{host="kind-control-plane:6443",verb="GET"} 4 rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="64"} 0 rest_client_response_size_bytes_bucket{host="kind-control-plane:6443",verb="POST",le="256"} 0 diff --git a/metricbeat/module/kubernetes/proxy/_meta/testdata/docs.plain-expected.json b/metricbeat/module/kubernetes/proxy/_meta/testdata/docs.plain-expected.json index 6cc8117d2904..9c2ce7a5c274 100644 --- a/metricbeat/module/kubernetes/proxy/_meta/testdata/docs.plain-expected.json +++ b/metricbeat/module/kubernetes/proxy/_meta/testdata/docs.plain-expected.json @@ -26,6 +26,33 @@ "type": "kubernetes" } }, + { + "event": { + "dataset": "kubernetes.proxy", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "proxy": { + "client": { + "request": { + "count": 22 + } + }, + "code": "200", + "host": "kind-control-plane:6443", + "method": "GET" + } + }, + "metricset": { + "name": "proxy", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, { "event": { "dataset": "kubernetes.proxy", @@ -54,7 +81,7 @@ "8000000": 1 }, "count": 1, - "sum": 12964.325 + "sum": 18671.026 } }, "size": { @@ -114,94 +141,6 @@ "type": "kubernetes" } }, - { - "event": { - "dataset": "kubernetes.proxy", - "duration": 115000, - "module": "kubernetes" - }, - "kubernetes": { - "proxy": { - "client": { - "request": { - "duration": { - "us": { - "bucket": { - "+Inf": 4, - "100000": 4, - "1000000": 4, - "15000000": 4, - "2000000": 4, - "25000": 4, - "250000": 4, - "30000000": 4, - "4000000": 4, - "5000": 3, - "500000": 4, - "60000000": 4, - "8000000": 4 - }, - "count": 4, - "sum": 24854.599000000002 - } - }, - "size": { - "bytes": { - "bucket": { - "+Inf": 4, - "1024": 4, - "1048576": 4, - "16384": 4, - "16777216": 4, - "256": 4, - "262144": 4, - "4096": 4, - "4194304": 4, - "512": 4, - "64": 4, - "65536": 4 - }, - "count": 4, - "sum": 0 - } - } - }, - "response": { - "size": { - "bytes": { - "bucket": { - "+Inf": 4, - "1024": 0, - "1048576": 4, - "16384": 4, - "16777216": 4, - "256": 0, - "262144": 4, - "4096": 4, - "4194304": 4, - "512": 0, - "64": 0, - "65536": 4 - }, - "count": 4, - "sum": 10992 - } - } - } - }, - "host": "kind-control-plane:6443", - "verb": "GET" - } - }, - "metricset": { - "name": "proxy", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "kubernetes" - } - }, { "event": { "dataset": "kubernetes.proxy", @@ -212,26 +151,26 @@ "proxy": { "process": { "cpu": { - "sec": 0 + "sec": 1 }, "fds": { "max": { "count": 1048576 }, "open": { - "count": 12 + "count": 11 } }, "memory": { "resident": { - "bytes": 46661632 + "bytes": 64688128 }, "virtual": { - "bytes": 785973248 + "bytes": 1315946496 } }, "started": { - "sec": 1698752408.24 + "sec": 1704894791.77 } }, "sync": { @@ -239,91 +178,91 @@ "duration": { "us": { "bucket": { - "+Inf": 3, + "+Inf": 2, "0": 0, "1000000": 1, - "10000000": 3, - "100000000": 3, - "105000000": 3, - "11000000": 3, - "110000000": 3, - "115000000": 3, - "12000000": 3, - "120000000": 3, - "13000000": 3, - "14000000": 3, - "15000000": 3, - "150000000": 3, - "16000000": 3, - "17000000": 3, - "18000000": 3, - "180000000": 3, - "19000000": 3, - "2000000": 3, - "20000000": 3, - "21000000": 3, - "210000000": 3, - "22000000": 3, - "23000000": 3, - "24000000": 3, - "240000000": 3, + "10000000": 2, + "100000000": 2, + "105000000": 2, + "11000000": 2, + "110000000": 2, + "115000000": 2, + "12000000": 2, + "120000000": 2, + "13000000": 2, + "14000000": 2, + "15000000": 2, + "150000000": 2, + "16000000": 2, + "17000000": 2, + "18000000": 2, + "180000000": 2, + "19000000": 2, + "2000000": 1, + "20000000": 2, + "21000000": 2, + "210000000": 2, + "22000000": 2, + "23000000": 2, + "24000000": 2, + "240000000": 2, "250000": 0, - "25000000": 3, - "26000000": 3, - "27000000": 3, - "270000000": 3, - "28000000": 3, - "29000000": 3, - "3000000": 3, - "30000000": 3, - "300000000": 3, - "31000000": 3, - "32000000": 3, - "33000000": 3, - "34000000": 3, - "35000000": 3, - "36000000": 3, - "37000000": 3, - "38000000": 3, - "39000000": 3, - "4000000": 3, - "40000000": 3, - "41000000": 3, - "42000000": 3, - "43000000": 3, - "44000000": 3, - "45000000": 3, - "46000000": 3, - "47000000": 3, - "48000000": 3, - "49000000": 3, + "25000000": 2, + "26000000": 2, + "27000000": 2, + "270000000": 2, + "28000000": 2, + "29000000": 2, + "3000000": 1, + "30000000": 2, + "300000000": 2, + "31000000": 2, + "32000000": 2, + "33000000": 2, + "34000000": 2, + "35000000": 2, + "36000000": 2, + "37000000": 2, + "38000000": 2, + "39000000": 2, + "4000000": 1, + "40000000": 2, + "41000000": 2, + "42000000": 2, + "43000000": 2, + "44000000": 2, + "45000000": 2, + "46000000": 2, + "47000000": 2, + "48000000": 2, + "49000000": 2, "500000": 1, - "5000000": 3, - "50000000": 3, - "51000000": 3, - "52000000": 3, - "53000000": 3, - "54000000": 3, - "55000000": 3, - "56000000": 3, - "57000000": 3, - "58000000": 3, - "59000000": 3, - "6000000": 3, - "60000000": 3, - "65000000": 3, - "7000000": 3, - "70000000": 3, - "75000000": 3, - "8000000": 3, - "80000000": 3, - "85000000": 3, - "9000000": 3, - "90000000": 3, - "95000000": 3 + "5000000": 2, + "50000000": 2, + "51000000": 2, + "52000000": 2, + "53000000": 2, + "54000000": 2, + "55000000": 2, + "56000000": 2, + "57000000": 2, + "58000000": 2, + "59000000": 2, + "6000000": 2, + "60000000": 2, + "65000000": 2, + "7000000": 2, + "70000000": 2, + "75000000": 2, + "8000000": 2, + "80000000": 2, + "85000000": 2, + "9000000": 2, + "90000000": 2, + "95000000": 2 }, - "count": 3, - "sum": 3693176.4020000002 + "count": 2, + "sum": 4633884.953 } } }, @@ -331,25 +270,25 @@ "duration": { "us": { "bucket": { - "+Inf": 6, + "+Inf": 4, "1000": 0, - "1024000": 6, - "128000": 5, - "16000": 3, - "16384000": 6, + "1024000": 4, + "128000": 4, + "16000": 0, + "16384000": 4, "2000": 0, - "2048000": 6, - "256000": 6, - "32000": 4, + "2048000": 4, + "256000": 4, + "32000": 1, "4000": 0, - "4096000": 6, - "512000": 6, - "64000": 4, + "4096000": 4, + "512000": 4, + "64000": 2, "8000": 0, - "8192000": 6 + "8192000": 4 }, - "count": 6, - "sum": 349493.379 + "count": 4, + "sum": 297575.15099999995 } } } @@ -375,12 +314,73 @@ "proxy": { "client": { "request": { - "count": 7 + "duration": { + "us": { + "bucket": { + "+Inf": 4, + "100000": 4, + "1000000": 4, + "15000000": 4, + "2000000": 4, + "25000": 4, + "250000": 4, + "30000000": 4, + "4000000": 4, + "5000": 3, + "500000": 4, + "60000000": 4, + "8000000": 4 + }, + "count": 4, + "sum": 25656.880000000005 + } + }, + "size": { + "bytes": { + "bucket": { + "+Inf": 4, + "1024": 4, + "1048576": 4, + "16384": 4, + "16777216": 4, + "256": 4, + "262144": 4, + "4096": 4, + "4194304": 4, + "512": 4, + "64": 4, + "65536": 4 + }, + "count": 4, + "sum": 0 + } + } + }, + "response": { + "size": { + "bytes": { + "bucket": { + "+Inf": 4, + "1024": 0, + "1048576": 4, + "16384": 4, + "16777216": 4, + "256": 0, + "262144": 4, + "4096": 4, + "4194304": 4, + "512": 0, + "64": 0, + "65536": 4 + }, + "count": 4, + "sum": 10814 + } + } } }, - "code": "200", "host": "kind-control-plane:6443", - "method": "GET" + "verb": "GET" } }, "metricset": { diff --git a/metricbeat/module/kubernetes/proxy/proxy_test.go b/metricbeat/module/kubernetes/proxy/proxy_test.go index cab5a6fe4ed0..593ed555fec6 100644 --- a/metricbeat/module/kubernetes/proxy/proxy_test.go +++ b/metricbeat/module/kubernetes/proxy/proxy_test.go @@ -33,6 +33,7 @@ var files = []string{ "./_meta/test/metrics.1.26", "./_meta/test/metrics.1.27", "./_meta/test/metrics.1.28", + "./_meta/test/metrics.1.29", } func TestEventMapping(t *testing.T) { diff --git a/metricbeat/module/kubernetes/scheduler/_meta/test/metrics.1.29 b/metricbeat/module/kubernetes/scheduler/_meta/test/metrics.1.29 new file mode 100644 index 000000000000..dd5700fee34a --- /dev/null +++ b/metricbeat/module/kubernetes/scheduler/_meta/test/metrics.1.29 @@ -0,0 +1,1826 @@ +# HELP aggregator_discovery_aggregation_count_total [ALPHA] Counter of number of times discovery was aggregated +# TYPE aggregator_discovery_aggregation_count_total counter +aggregator_discovery_aggregation_count_total 0 +# HELP apiserver_audit_event_total [ALPHA] Counter of audit events generated and sent to the audit backend. +# TYPE apiserver_audit_event_total counter +apiserver_audit_event_total 0 +# HELP apiserver_audit_requests_rejected_total [ALPHA] Counter of apiserver requests rejected due to an error in audit logging backend. +# TYPE apiserver_audit_requests_rejected_total counter +apiserver_audit_requests_rejected_total 0 +# HELP apiserver_client_certificate_expiration_seconds [ALPHA] Distribution of the remaining lifetime on the certificate used to authenticate a request. +# TYPE apiserver_client_certificate_expiration_seconds histogram +apiserver_client_certificate_expiration_seconds_bucket{le="0"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="1800"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="3600"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="7200"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="21600"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="43200"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="86400"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="172800"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="345600"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="604800"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="2.592e+06"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="7.776e+06"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="1.5552e+07"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="3.1104e+07"} 0 +apiserver_client_certificate_expiration_seconds_bucket{le="+Inf"} 0 +apiserver_client_certificate_expiration_seconds_sum 0 +apiserver_client_certificate_expiration_seconds_count 0 +# HELP apiserver_delegated_authn_request_duration_seconds [ALPHA] Request latency in seconds. Broken down by status code. +# TYPE apiserver_delegated_authn_request_duration_seconds histogram +apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="0.25"} 1 +apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="0.5"} 1 +apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="0.7"} 1 +apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="1"} 1 +apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="1.5"} 1 +apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="3"} 1 +apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="5"} 1 +apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="10"} 1 +apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="+Inf"} 1 +apiserver_delegated_authn_request_duration_seconds_sum{code="201"} 0.004166145 +apiserver_delegated_authn_request_duration_seconds_count{code="201"} 1 +# HELP apiserver_delegated_authn_request_total [ALPHA] Number of HTTP requests partitioned by status code. +# TYPE apiserver_delegated_authn_request_total counter +apiserver_delegated_authn_request_total{code="201"} 1 +# HELP apiserver_delegated_authz_request_duration_seconds [ALPHA] Request latency in seconds. Broken down by status code. +# TYPE apiserver_delegated_authz_request_duration_seconds histogram +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="0.25"} 1 +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="0.5"} 1 +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="0.7"} 1 +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="1"} 1 +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="1.5"} 1 +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="3"} 1 +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="5"} 1 +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="10"} 1 +apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="+Inf"} 1 +apiserver_delegated_authz_request_duration_seconds_sum{code="201"} 0.003826062 +apiserver_delegated_authz_request_duration_seconds_count{code="201"} 1 +# HELP apiserver_delegated_authz_request_total [ALPHA] Number of HTTP requests partitioned by status code. +# TYPE apiserver_delegated_authz_request_total counter +apiserver_delegated_authz_request_total{code="201"} 1 +# HELP apiserver_envelope_encryption_dek_cache_fill_percent [ALPHA] Percent of the cache slots currently occupied by cached DEKs. +# TYPE apiserver_envelope_encryption_dek_cache_fill_percent gauge +apiserver_envelope_encryption_dek_cache_fill_percent 0 +# HELP apiserver_storage_data_key_generation_duration_seconds [ALPHA] Latencies in seconds of data encryption key(DEK) generation operations. +# TYPE apiserver_storage_data_key_generation_duration_seconds histogram +apiserver_storage_data_key_generation_duration_seconds_bucket{le="5e-06"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="1e-05"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="2e-05"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="4e-05"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="8e-05"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.00016"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.00032"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.00064"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.00128"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.00256"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.00512"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.01024"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.02048"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="0.04096"} 0 +apiserver_storage_data_key_generation_duration_seconds_bucket{le="+Inf"} 0 +apiserver_storage_data_key_generation_duration_seconds_sum 0 +apiserver_storage_data_key_generation_duration_seconds_count 0 +# HELP apiserver_storage_data_key_generation_failures_total [ALPHA] Total number of failed data encryption key(DEK) generation operations. +# TYPE apiserver_storage_data_key_generation_failures_total counter +apiserver_storage_data_key_generation_failures_total 0 +# HELP apiserver_storage_envelope_transformation_cache_misses_total [ALPHA] Total number of cache misses while accessing key decryption key(KEK). +# TYPE apiserver_storage_envelope_transformation_cache_misses_total counter +apiserver_storage_envelope_transformation_cache_misses_total 0 +# HELP apiserver_webhooks_x509_insecure_sha1_total [ALPHA] Counts the number of requests to servers with insecure SHA1 signatures in their serving certificate OR the number of connection failures due to the insecure SHA1 signatures (either/or, based on the runtime environment) +# TYPE apiserver_webhooks_x509_insecure_sha1_total counter +apiserver_webhooks_x509_insecure_sha1_total 0 +# HELP apiserver_webhooks_x509_missing_san_total [ALPHA] Counts the number of requests to servers missing SAN extension in their serving certificate OR the number of connection failures due to the lack of x509 certificate SAN extension missing (either/or, based on the runtime environment) +# TYPE apiserver_webhooks_x509_missing_san_total counter +apiserver_webhooks_x509_missing_san_total 0 +# HELP authenticated_user_requests [ALPHA] Counter of authenticated requests broken out by username. +# TYPE authenticated_user_requests counter +authenticated_user_requests{username="other"} 243 +# HELP authentication_attempts [ALPHA] Counter of authenticated attempts. +# TYPE authentication_attempts counter +authentication_attempts{result="success"} 243 +# HELP authentication_duration_seconds [ALPHA] Authentication duration in seconds broken out by result. +# TYPE authentication_duration_seconds histogram +authentication_duration_seconds_bucket{result="success",le="0.001"} 243 +authentication_duration_seconds_bucket{result="success",le="0.002"} 243 +authentication_duration_seconds_bucket{result="success",le="0.004"} 243 +authentication_duration_seconds_bucket{result="success",le="0.008"} 243 +authentication_duration_seconds_bucket{result="success",le="0.016"} 243 +authentication_duration_seconds_bucket{result="success",le="0.032"} 243 +authentication_duration_seconds_bucket{result="success",le="0.064"} 243 +authentication_duration_seconds_bucket{result="success",le="0.128"} 243 +authentication_duration_seconds_bucket{result="success",le="0.256"} 243 +authentication_duration_seconds_bucket{result="success",le="0.512"} 243 +authentication_duration_seconds_bucket{result="success",le="1.024"} 243 +authentication_duration_seconds_bucket{result="success",le="2.048"} 243 +authentication_duration_seconds_bucket{result="success",le="4.096"} 243 +authentication_duration_seconds_bucket{result="success",le="8.192"} 243 +authentication_duration_seconds_bucket{result="success",le="16.384"} 243 +authentication_duration_seconds_bucket{result="success",le="+Inf"} 243 +authentication_duration_seconds_sum{result="success"} 0.008371243 +authentication_duration_seconds_count{result="success"} 243 +# HELP authentication_token_cache_active_fetch_count [ALPHA] +# TYPE authentication_token_cache_active_fetch_count gauge +authentication_token_cache_active_fetch_count{status="blocked"} 0 +authentication_token_cache_active_fetch_count{status="in_flight"} 0 +# HELP authentication_token_cache_fetch_total [ALPHA] +# TYPE authentication_token_cache_fetch_total counter +authentication_token_cache_fetch_total{status="ok"} 1 +# HELP authentication_token_cache_request_duration_seconds [ALPHA] +# TYPE authentication_token_cache_request_duration_seconds histogram +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="0.005"} 1 +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="0.01"} 1 +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="0.025"} 1 +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="0.05"} 1 +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="0.1"} 1 +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="0.25"} 1 +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="0.5"} 1 +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="1"} 1 +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="2.5"} 1 +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="5"} 1 +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="10"} 1 +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="+Inf"} 1 +authentication_token_cache_request_duration_seconds_sum{status="miss"} 0.004 +authentication_token_cache_request_duration_seconds_count{status="miss"} 1 +# HELP authentication_token_cache_request_total [ALPHA] +# TYPE authentication_token_cache_request_total counter +authentication_token_cache_request_total{status="miss"} 1 +# HELP authorization_attempts_total [ALPHA] Counter of authorization attempts broken down by result. It can be either 'allowed', 'denied', 'no-opinion' or 'error'. +# TYPE authorization_attempts_total counter +authorization_attempts_total{result="allowed"} 243 +# HELP authorization_duration_seconds [ALPHA] Authorization duration in seconds broken out by result. +# TYPE authorization_duration_seconds histogram +authorization_duration_seconds_bucket{result="allowed",le="0.001"} 243 +authorization_duration_seconds_bucket{result="allowed",le="0.002"} 243 +authorization_duration_seconds_bucket{result="allowed",le="0.004"} 243 +authorization_duration_seconds_bucket{result="allowed",le="0.008"} 243 +authorization_duration_seconds_bucket{result="allowed",le="0.016"} 243 +authorization_duration_seconds_bucket{result="allowed",le="0.032"} 243 +authorization_duration_seconds_bucket{result="allowed",le="0.064"} 243 +authorization_duration_seconds_bucket{result="allowed",le="0.128"} 243 +authorization_duration_seconds_bucket{result="allowed",le="0.256"} 243 +authorization_duration_seconds_bucket{result="allowed",le="0.512"} 243 +authorization_duration_seconds_bucket{result="allowed",le="1.024"} 243 +authorization_duration_seconds_bucket{result="allowed",le="2.048"} 243 +authorization_duration_seconds_bucket{result="allowed",le="4.096"} 243 +authorization_duration_seconds_bucket{result="allowed",le="8.192"} 243 +authorization_duration_seconds_bucket{result="allowed",le="16.384"} 243 +authorization_duration_seconds_bucket{result="allowed",le="+Inf"} 243 +authorization_duration_seconds_sum{result="allowed"} 0.0018631259999999992 +authorization_duration_seconds_count{result="allowed"} 243 +# HELP cardinality_enforcement_unexpected_categorizations_total [ALPHA] The count of unexpected categorizations during cardinality enforcement. +# TYPE cardinality_enforcement_unexpected_categorizations_total counter +cardinality_enforcement_unexpected_categorizations_total 0 +# HELP disabled_metrics_total [BETA] The count of disabled metrics. +# TYPE disabled_metrics_total counter +disabled_metrics_total 0 +# HELP go_cgo_go_to_c_calls_calls_total Count of calls made from Go to C by the current process. +# TYPE go_cgo_go_to_c_calls_calls_total counter +go_cgo_go_to_c_calls_calls_total 0 +# HELP go_cpu_classes_gc_mark_assist_cpu_seconds_total Estimated total CPU time goroutines spent performing GC tasks to assist the GC and prevent it from falling behind the application. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +# TYPE go_cpu_classes_gc_mark_assist_cpu_seconds_total counter +go_cpu_classes_gc_mark_assist_cpu_seconds_total 0.011744296 +# HELP go_cpu_classes_gc_mark_dedicated_cpu_seconds_total Estimated total CPU time spent performing GC tasks on processors (as defined by GOMAXPROCS) dedicated to those tasks. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +# TYPE go_cpu_classes_gc_mark_dedicated_cpu_seconds_total counter +go_cpu_classes_gc_mark_dedicated_cpu_seconds_total 0.283499396 +# HELP go_cpu_classes_gc_mark_idle_cpu_seconds_total Estimated total CPU time spent performing GC tasks on spare CPU resources that the Go scheduler could not otherwise find a use for. This should be subtracted from the total GC CPU time to obtain a measure of compulsory GC CPU time. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +# TYPE go_cpu_classes_gc_mark_idle_cpu_seconds_total counter +go_cpu_classes_gc_mark_idle_cpu_seconds_total 0.030548669 +# HELP go_cpu_classes_gc_pause_cpu_seconds_total Estimated total CPU time spent with the application paused by the GC. Even if only one thread is running during the pause, this is computed as GOMAXPROCS times the pause latency because nothing else can be executing. This is the exact sum of samples in /gc/pause:seconds if each sample is multiplied by GOMAXPROCS at the time it is taken. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +# TYPE go_cpu_classes_gc_pause_cpu_seconds_total counter +go_cpu_classes_gc_pause_cpu_seconds_total 0.093005136 +# HELP go_cpu_classes_gc_total_cpu_seconds_total Estimated total CPU time spent performing GC tasks. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. Sum of all metrics in /cpu/classes/gc. +# TYPE go_cpu_classes_gc_total_cpu_seconds_total counter +go_cpu_classes_gc_total_cpu_seconds_total 0.418797497 +# HELP go_cpu_classes_idle_cpu_seconds_total Estimated total available CPU time not spent executing any Go or Go runtime code. In other words, the part of /cpu/classes/total:cpu-seconds that was unused. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +# TYPE go_cpu_classes_idle_cpu_seconds_total counter +go_cpu_classes_idle_cpu_seconds_total 37095.656235078 +# HELP go_cpu_classes_scavenge_assist_cpu_seconds_total Estimated total CPU time spent returning unused memory to the underlying platform in response eagerly in response to memory pressure. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +# TYPE go_cpu_classes_scavenge_assist_cpu_seconds_total counter +go_cpu_classes_scavenge_assist_cpu_seconds_total 3.86e-07 +# HELP go_cpu_classes_scavenge_background_cpu_seconds_total Estimated total CPU time spent performing background tasks to return unused memory to the underlying platform. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +# TYPE go_cpu_classes_scavenge_background_cpu_seconds_total counter +go_cpu_classes_scavenge_background_cpu_seconds_total 0.000335459 +# HELP go_cpu_classes_scavenge_total_cpu_seconds_total Estimated total CPU time spent performing tasks that return unused memory to the underlying platform. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. Sum of all metrics in /cpu/classes/scavenge. +# TYPE go_cpu_classes_scavenge_total_cpu_seconds_total counter +go_cpu_classes_scavenge_total_cpu_seconds_total 0.000335845 +# HELP go_cpu_classes_total_cpu_seconds_total Estimated total available CPU time for user Go code or the Go runtime, as defined by GOMAXPROCS. In other words, GOMAXPROCS integrated over the wall-clock duration this process has been executing for. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. Sum of all metrics in /cpu/classes. +# TYPE go_cpu_classes_total_cpu_seconds_total counter +go_cpu_classes_total_cpu_seconds_total 37106.78391232 +# HELP go_cpu_classes_user_cpu_seconds_total Estimated total CPU time spent running user Go code. This may also include some small amount of time spent in the Go runtime. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +# TYPE go_cpu_classes_user_cpu_seconds_total counter +go_cpu_classes_user_cpu_seconds_total 10.7085439 +# HELP go_gc_cycles_automatic_gc_cycles_total Count of completed GC cycles generated by the Go runtime. +# TYPE go_gc_cycles_automatic_gc_cycles_total counter +go_gc_cycles_automatic_gc_cycles_total 29 +# HELP go_gc_cycles_forced_gc_cycles_total Count of completed GC cycles forced by the application. +# TYPE go_gc_cycles_forced_gc_cycles_total counter +go_gc_cycles_forced_gc_cycles_total 0 +# HELP go_gc_cycles_total_gc_cycles_total Count of all completed GC cycles. +# TYPE go_gc_cycles_total_gc_cycles_total counter +go_gc_cycles_total_gc_cycles_total 29 +# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 7.2067e-05 +go_gc_duration_seconds{quantile="0.25"} 8.6777e-05 +go_gc_duration_seconds{quantile="0.5"} 0.000115853 +go_gc_duration_seconds{quantile="0.75"} 0.000222016 +go_gc_duration_seconds{quantile="1"} 0.000744815 +go_gc_duration_seconds_sum 0.005812821 +go_gc_duration_seconds_count 29 +# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. +# TYPE go_gc_gogc_percent gauge +go_gc_gogc_percent 100 +# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. +# TYPE go_gc_gomemlimit_bytes gauge +go_gc_gomemlimit_bytes 9.223372036854776e+18 +# HELP go_gc_heap_allocs_by_size_bytes Distribution of heap allocations by approximate size. Bucket counts increase monotonically. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. +# TYPE go_gc_heap_allocs_by_size_bytes histogram +go_gc_heap_allocs_by_size_bytes_bucket{le="8.999999999999998"} 11502 +go_gc_heap_allocs_by_size_bytes_bucket{le="24.999999999999996"} 337627 +go_gc_heap_allocs_by_size_bytes_bucket{le="64.99999999999999"} 668021 +go_gc_heap_allocs_by_size_bytes_bucket{le="144.99999999999997"} 912014 +go_gc_heap_allocs_by_size_bytes_bucket{le="320.99999999999994"} 986135 +go_gc_heap_allocs_by_size_bytes_bucket{le="704.9999999999999"} 1.011816e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="1536.9999999999998"} 1.016066e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="3200.9999999999995"} 1.017619e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="6528.999999999999"} 1.018929e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="13568.999999999998"} 1.019088e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="27264.999999999996"} 1.019141e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="+Inf"} 1.019191e+06 +go_gc_heap_allocs_by_size_bytes_sum 9.52492e+07 +go_gc_heap_allocs_by_size_bytes_count 1.019191e+06 +# HELP go_gc_heap_allocs_bytes_total Cumulative sum of memory allocated to the heap by the application. +# TYPE go_gc_heap_allocs_bytes_total counter +go_gc_heap_allocs_bytes_total 9.52492e+07 +# HELP go_gc_heap_allocs_objects_total Cumulative count of heap allocations triggered by the application. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. +# TYPE go_gc_heap_allocs_objects_total counter +go_gc_heap_allocs_objects_total 1.019191e+06 +# HELP go_gc_heap_frees_by_size_bytes Distribution of freed heap allocations by approximate size. Bucket counts increase monotonically. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. +# TYPE go_gc_heap_frees_by_size_bytes histogram +go_gc_heap_frees_by_size_bytes_bucket{le="8.999999999999998"} 8827 +go_gc_heap_frees_by_size_bytes_bucket{le="24.999999999999996"} 309924 +go_gc_heap_frees_by_size_bytes_bucket{le="64.99999999999999"} 616835 +go_gc_heap_frees_by_size_bytes_bucket{le="144.99999999999997"} 845511 +go_gc_heap_frees_by_size_bytes_bucket{le="320.99999999999994"} 915369 +go_gc_heap_frees_by_size_bytes_bucket{le="704.9999999999999"} 939023 +go_gc_heap_frees_by_size_bytes_bucket{le="1536.9999999999998"} 942831 +go_gc_heap_frees_by_size_bytes_bucket{le="3200.9999999999995"} 944135 +go_gc_heap_frees_by_size_bytes_bucket{le="6528.999999999999"} 945346 +go_gc_heap_frees_by_size_bytes_bucket{le="13568.999999999998"} 945432 +go_gc_heap_frees_by_size_bytes_bucket{le="27264.999999999996"} 945449 +go_gc_heap_frees_by_size_bytes_bucket{le="+Inf"} 945485 +go_gc_heap_frees_by_size_bytes_sum 8.6091296e+07 +go_gc_heap_frees_by_size_bytes_count 945485 +# HELP go_gc_heap_frees_bytes_total Cumulative sum of heap memory freed by the garbage collector. +# TYPE go_gc_heap_frees_bytes_total counter +go_gc_heap_frees_bytes_total 8.6091296e+07 +# HELP go_gc_heap_frees_objects_total Cumulative count of heap allocations whose storage was freed by the garbage collector. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. +# TYPE go_gc_heap_frees_objects_total counter +go_gc_heap_frees_objects_total 945485 +# HELP go_gc_heap_goal_bytes Heap size target for the end of the GC cycle. +# TYPE go_gc_heap_goal_bytes gauge +go_gc_heap_goal_bytes 1.3477768e+07 +# HELP go_gc_heap_live_bytes Heap memory occupied by live objects that were marked by the previous GC. +# TYPE go_gc_heap_live_bytes gauge +go_gc_heap_live_bytes 6.428656e+06 +# HELP go_gc_heap_objects_objects Number of objects, live or unswept, occupying heap memory. +# TYPE go_gc_heap_objects_objects gauge +go_gc_heap_objects_objects 73706 +# HELP go_gc_heap_tiny_allocs_objects_total Count of small allocations that are packed together into blocks. These allocations are counted separately from other allocations because each individual allocation is not tracked by the runtime, only their block. Each block is already accounted for in allocs-by-size and frees-by-size. +# TYPE go_gc_heap_tiny_allocs_objects_total counter +go_gc_heap_tiny_allocs_objects_total 94953 +# HELP go_gc_limiter_last_enabled_gc_cycle GC cycle the last time the GC CPU limiter was enabled. This metric is useful for diagnosing the root cause of an out-of-memory error, because the limiter trades memory for CPU time when the GC's CPU time gets too high. This is most likely to occur with use of SetMemoryLimit. The first GC cycle is cycle 1, so a value of 0 indicates that it was never enabled. +# TYPE go_gc_limiter_last_enabled_gc_cycle gauge +go_gc_limiter_last_enabled_gc_cycle 0 +# HELP go_gc_pauses_seconds Distribution of individual GC-related stop-the-world pause latencies. Bucket counts increase monotonically. +# TYPE go_gc_pauses_seconds histogram +go_gc_pauses_seconds_bucket{le="6.399999999999999e-08"} 0 +go_gc_pauses_seconds_bucket{le="6.399999999999999e-07"} 0 +go_gc_pauses_seconds_bucket{le="7.167999999999999e-06"} 1 +go_gc_pauses_seconds_bucket{le="8.191999999999999e-05"} 40 +go_gc_pauses_seconds_bucket{le="0.0009175039999999999"} 58 +go_gc_pauses_seconds_bucket{le="0.010485759999999998"} 58 +go_gc_pauses_seconds_bucket{le="0.11744051199999998"} 58 +go_gc_pauses_seconds_bucket{le="+Inf"} 58 +go_gc_pauses_seconds_sum 0.001754752 +go_gc_pauses_seconds_count 58 +# HELP go_gc_scan_globals_bytes The total amount of global variable space that is scannable. +# TYPE go_gc_scan_globals_bytes gauge +go_gc_scan_globals_bytes 453768 +# HELP go_gc_scan_heap_bytes The total amount of heap space that is scannable. +# TYPE go_gc_scan_heap_bytes gauge +go_gc_scan_heap_bytes 6.264384e+06 +# HELP go_gc_scan_stack_bytes The number of bytes of stack that were scanned last GC cycle. +# TYPE go_gc_scan_stack_bytes gauge +go_gc_scan_stack_bytes 166688 +# HELP go_gc_scan_total_bytes The total amount space that is scannable. Sum of all metrics in /gc/scan. +# TYPE go_gc_scan_total_bytes gauge +go_gc_scan_total_bytes 6.88484e+06 +# HELP go_gc_stack_starting_size_bytes The stack size of new goroutines. +# TYPE go_gc_stack_starting_size_bytes gauge +go_gc_stack_starting_size_bytes 2048 +# HELP go_godebug_non_default_behavior_execerrdot_events_total The number of non-default behaviors executed by the os/exec package due to a non-default GODEBUG=execerrdot=... setting. +# TYPE go_godebug_non_default_behavior_execerrdot_events_total counter +go_godebug_non_default_behavior_execerrdot_events_total 0 +# HELP go_godebug_non_default_behavior_gocachehash_events_total The number of non-default behaviors executed by the cmd/go package due to a non-default GODEBUG=gocachehash=... setting. +# TYPE go_godebug_non_default_behavior_gocachehash_events_total counter +go_godebug_non_default_behavior_gocachehash_events_total 0 +# HELP go_godebug_non_default_behavior_gocachetest_events_total The number of non-default behaviors executed by the cmd/go package due to a non-default GODEBUG=gocachetest=... setting. +# TYPE go_godebug_non_default_behavior_gocachetest_events_total counter +go_godebug_non_default_behavior_gocachetest_events_total 0 +# HELP go_godebug_non_default_behavior_gocacheverify_events_total The number of non-default behaviors executed by the cmd/go package due to a non-default GODEBUG=gocacheverify=... setting. +# TYPE go_godebug_non_default_behavior_gocacheverify_events_total counter +go_godebug_non_default_behavior_gocacheverify_events_total 0 +# HELP go_godebug_non_default_behavior_http2client_events_total The number of non-default behaviors executed by the net/http package due to a non-default GODEBUG=http2client=... setting. +# TYPE go_godebug_non_default_behavior_http2client_events_total counter +go_godebug_non_default_behavior_http2client_events_total 0 +# HELP go_godebug_non_default_behavior_http2server_events_total The number of non-default behaviors executed by the net/http package due to a non-default GODEBUG=http2server=... setting. +# TYPE go_godebug_non_default_behavior_http2server_events_total counter +go_godebug_non_default_behavior_http2server_events_total 0 +# HELP go_godebug_non_default_behavior_installgoroot_events_total The number of non-default behaviors executed by the go/build package due to a non-default GODEBUG=installgoroot=... setting. +# TYPE go_godebug_non_default_behavior_installgoroot_events_total counter +go_godebug_non_default_behavior_installgoroot_events_total 0 +# HELP go_godebug_non_default_behavior_jstmpllitinterp_events_total The number of non-default behaviors executed by the html/template package due to a non-default GODEBUG=jstmpllitinterp=... setting. +# TYPE go_godebug_non_default_behavior_jstmpllitinterp_events_total counter +go_godebug_non_default_behavior_jstmpllitinterp_events_total 0 +# HELP go_godebug_non_default_behavior_multipartmaxheaders_events_total The number of non-default behaviors executed by the mime/multipart package due to a non-default GODEBUG=multipartmaxheaders=... setting. +# TYPE go_godebug_non_default_behavior_multipartmaxheaders_events_total counter +go_godebug_non_default_behavior_multipartmaxheaders_events_total 0 +# HELP go_godebug_non_default_behavior_multipartmaxparts_events_total The number of non-default behaviors executed by the mime/multipart package due to a non-default GODEBUG=multipartmaxparts=... setting. +# TYPE go_godebug_non_default_behavior_multipartmaxparts_events_total counter +go_godebug_non_default_behavior_multipartmaxparts_events_total 0 +# HELP go_godebug_non_default_behavior_multipathtcp_events_total The number of non-default behaviors executed by the net package due to a non-default GODEBUG=multipathtcp=... setting. +# TYPE go_godebug_non_default_behavior_multipathtcp_events_total counter +go_godebug_non_default_behavior_multipathtcp_events_total 0 +# HELP go_godebug_non_default_behavior_panicnil_events_total The number of non-default behaviors executed by the runtime package due to a non-default GODEBUG=panicnil=... setting. +# TYPE go_godebug_non_default_behavior_panicnil_events_total counter +go_godebug_non_default_behavior_panicnil_events_total 0 +# HELP go_godebug_non_default_behavior_randautoseed_events_total The number of non-default behaviors executed by the math/rand package due to a non-default GODEBUG=randautoseed=... setting. +# TYPE go_godebug_non_default_behavior_randautoseed_events_total counter +go_godebug_non_default_behavior_randautoseed_events_total 0 +# HELP go_godebug_non_default_behavior_tarinsecurepath_events_total The number of non-default behaviors executed by the archive/tar package due to a non-default GODEBUG=tarinsecurepath=... setting. +# TYPE go_godebug_non_default_behavior_tarinsecurepath_events_total counter +go_godebug_non_default_behavior_tarinsecurepath_events_total 0 +# HELP go_godebug_non_default_behavior_tlsmaxrsasize_events_total The number of non-default behaviors executed by the crypto/tls package due to a non-default GODEBUG=tlsmaxrsasize=... setting. +# TYPE go_godebug_non_default_behavior_tlsmaxrsasize_events_total counter +go_godebug_non_default_behavior_tlsmaxrsasize_events_total 0 +# HELP go_godebug_non_default_behavior_x509sha1_events_total The number of non-default behaviors executed by the crypto/x509 package due to a non-default GODEBUG=x509sha1=... setting. +# TYPE go_godebug_non_default_behavior_x509sha1_events_total counter +go_godebug_non_default_behavior_x509sha1_events_total 0 +# HELP go_godebug_non_default_behavior_x509usefallbackroots_events_total The number of non-default behaviors executed by the crypto/x509 package due to a non-default GODEBUG=x509usefallbackroots=... setting. +# TYPE go_godebug_non_default_behavior_x509usefallbackroots_events_total counter +go_godebug_non_default_behavior_x509usefallbackroots_events_total 0 +# HELP go_godebug_non_default_behavior_zipinsecurepath_events_total The number of non-default behaviors executed by the archive/zip package due to a non-default GODEBUG=zipinsecurepath=... setting. +# TYPE go_godebug_non_default_behavior_zipinsecurepath_events_total counter +go_godebug_non_default_behavior_zipinsecurepath_events_total 0 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 172 +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +go_info{version="go1.21.5"} 1 +# HELP go_memory_classes_heap_free_bytes Memory that is completely free and eligible to be returned to the underlying system, but has not been. This metric is the runtime's estimate of free address space that is backed by physical memory. +# TYPE go_memory_classes_heap_free_bytes gauge +go_memory_classes_heap_free_bytes 1.081344e+06 +# HELP go_memory_classes_heap_objects_bytes Memory occupied by live objects and dead objects that have not yet been marked free by the garbage collector. +# TYPE go_memory_classes_heap_objects_bytes gauge +go_memory_classes_heap_objects_bytes 9.157904e+06 +# HELP go_memory_classes_heap_released_bytes Memory that is completely free and has been returned to the underlying system. This metric is the runtime's estimate of free address space that is still mapped into the process, but is not backed by physical memory. +# TYPE go_memory_classes_heap_released_bytes gauge +go_memory_classes_heap_released_bytes 1.179648e+06 +# HELP go_memory_classes_heap_stacks_bytes Memory allocated from the heap that is reserved for stack space, whether or not it is currently in-use. Currently, this represents all stack memory for goroutines. It also includes all OS thread stacks in non-cgo programs. Note that stacks may be allocated differently in the future, and this may change. +# TYPE go_memory_classes_heap_stacks_bytes gauge +go_memory_classes_heap_stacks_bytes 2.064384e+06 +# HELP go_memory_classes_heap_unused_bytes Memory that is reserved for heap objects but is not currently used to hold heap objects. +# TYPE go_memory_classes_heap_unused_bytes gauge +go_memory_classes_heap_unused_bytes 3.293936e+06 +# HELP go_memory_classes_metadata_mcache_free_bytes Memory that is reserved for runtime mcache structures, but not in-use. +# TYPE go_memory_classes_metadata_mcache_free_bytes gauge +go_memory_classes_metadata_mcache_free_bytes 12000 +# HELP go_memory_classes_metadata_mcache_inuse_bytes Memory that is occupied by runtime mcache structures that are currently being used. +# TYPE go_memory_classes_metadata_mcache_inuse_bytes gauge +go_memory_classes_metadata_mcache_inuse_bytes 19200 +# HELP go_memory_classes_metadata_mspan_free_bytes Memory that is reserved for runtime mspan structures, but not in-use. +# TYPE go_memory_classes_metadata_mspan_free_bytes gauge +go_memory_classes_metadata_mspan_free_bytes 18984 +# HELP go_memory_classes_metadata_mspan_inuse_bytes Memory that is occupied by runtime mspan structures that are currently being used. +# TYPE go_memory_classes_metadata_mspan_inuse_bytes gauge +go_memory_classes_metadata_mspan_inuse_bytes 339528 +# HELP go_memory_classes_metadata_other_bytes Memory that is reserved for or used to hold runtime metadata. +# TYPE go_memory_classes_metadata_other_bytes gauge +go_memory_classes_metadata_other_bytes 4.734312e+06 +# HELP go_memory_classes_os_stacks_bytes Stack memory allocated by the underlying operating system. In non-cgo programs this metric is currently zero. This may change in the future.In cgo programs this metric includes OS thread stacks allocated directly from the OS. Currently, this only accounts for one stack in c-shared and c-archive build modes, and other sources of stacks from the OS are not measured. This too may change in the future. +# TYPE go_memory_classes_os_stacks_bytes gauge +go_memory_classes_os_stacks_bytes 0 +# HELP go_memory_classes_other_bytes Memory used by execution trace buffers, structures for debugging the runtime, finalizer and profiler specials, and more. +# TYPE go_memory_classes_other_bytes gauge +go_memory_classes_other_bytes 2.379195e+06 +# HELP go_memory_classes_profiling_buckets_bytes Memory that is used by the stack trace hash map used for profiling. +# TYPE go_memory_classes_profiling_buckets_bytes gauge +go_memory_classes_profiling_buckets_bytes 1.513365e+06 +# HELP go_memory_classes_total_bytes All memory mapped by the Go runtime into the current process as read-write. Note that this does not include memory mapped by code called via cgo or via the syscall package. Sum of all metrics in /memory/classes. +# TYPE go_memory_classes_total_bytes gauge +go_memory_classes_total_bytes 2.57938e+07 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 9.157904e+06 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 9.52492e+07 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 1.513365e+06 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total 1.040438e+06 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes 4.734312e+06 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes 9.157904e+06 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes 2.260992e+06 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes 1.245184e+07 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects 73706 +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes gauge +go_memstats_heap_released_bytes 1.179648e+06 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes 1.4712832e+07 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds 1.7048970868727124e+09 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total 0 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total 1.114144e+06 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes 19200 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes 31200 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes 339528 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 358512 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes 1.3477768e+07 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes 2.379195e+06 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes 2.064384e+06 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes 2.064384e+06 +# HELP go_memstats_sys_bytes Number of bytes obtained from system. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes 2.57938e+07 +# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. +# TYPE go_sched_gomaxprocs_threads gauge +go_sched_gomaxprocs_threads 16 +# HELP go_sched_goroutines_goroutines Count of live goroutines. +# TYPE go_sched_goroutines_goroutines gauge +go_sched_goroutines_goroutines 172 +# HELP go_sched_latencies_seconds Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running. Bucket counts increase monotonically. +# TYPE go_sched_latencies_seconds histogram +go_sched_latencies_seconds_bucket{le="6.399999999999999e-08"} 3704 +go_sched_latencies_seconds_bucket{le="6.399999999999999e-07"} 3873 +go_sched_latencies_seconds_bucket{le="7.167999999999999e-06"} 6018 +go_sched_latencies_seconds_bucket{le="8.191999999999999e-05"} 11585 +go_sched_latencies_seconds_bucket{le="0.0009175039999999999"} 12286 +go_sched_latencies_seconds_bucket{le="0.010485759999999998"} 12292 +go_sched_latencies_seconds_bucket{le="0.11744051199999998"} 12292 +go_sched_latencies_seconds_bucket{le="+Inf"} 12292 +go_sched_latencies_seconds_sum 0.10421881599999999 +go_sched_latencies_seconds_count 12292 +# HELP go_sync_mutex_wait_total_seconds_total Approximate cumulative time goroutines have spent blocked on a sync.Mutex or sync.RWMutex. This metric is useful for identifying global changes in lock contention. Collect a mutex or block profile using the runtime/pprof package for more detailed contention data. +# TYPE go_sync_mutex_wait_total_seconds_total counter +go_sync_mutex_wait_total_seconds_total 0 +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +go_threads 19 +# HELP hidden_metrics_total [BETA] The count of hidden metrics. +# TYPE hidden_metrics_total counter +hidden_metrics_total 1 +# HELP kubernetes_build_info [ALPHA] A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running. +# TYPE kubernetes_build_info gauge +kubernetes_build_info{build_date="2023-12-14T19:18:17Z",compiler="gc",git_commit="3f7a50f38688eb332e2a1b013678c6435d539ae6",git_tree_state="clean",git_version="v1.29.0",go_version="go1.21.5",major="1",minor="29",platform="linux/amd64"} 1 +# HELP kubernetes_feature_enabled [BETA] This metric records the data about the stage and enablement of a k8s feature. +# TYPE kubernetes_feature_enabled gauge +kubernetes_feature_enabled{name="APIListChunking",stage=""} 1 +kubernetes_feature_enabled{name="APIPriorityAndFairness",stage=""} 1 +kubernetes_feature_enabled{name="APIResponseCompression",stage="BETA"} 1 +kubernetes_feature_enabled{name="APISelfSubjectReview",stage=""} 1 +kubernetes_feature_enabled{name="APIServerIdentity",stage="BETA"} 1 +kubernetes_feature_enabled{name="APIServerTracing",stage="BETA"} 1 +kubernetes_feature_enabled{name="AdmissionWebhookMatchConditions",stage="BETA"} 1 +kubernetes_feature_enabled{name="AggregatedDiscoveryEndpoint",stage="BETA"} 1 +kubernetes_feature_enabled{name="AllAlpha",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="AllBeta",stage="BETA"} 0 +kubernetes_feature_enabled{name="AllowServiceLBStatusOnNonLB",stage="DEPRECATED"} 0 +kubernetes_feature_enabled{name="AnyVolumeDataSource",stage="BETA"} 1 +kubernetes_feature_enabled{name="AppArmor",stage="BETA"} 1 +kubernetes_feature_enabled{name="CPUManager",stage=""} 1 +kubernetes_feature_enabled{name="CPUManagerPolicyAlphaOptions",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="CPUManagerPolicyBetaOptions",stage="BETA"} 1 +kubernetes_feature_enabled{name="CPUManagerPolicyOptions",stage="BETA"} 1 +kubernetes_feature_enabled{name="CRDValidationRatcheting",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="CSIMigrationAzureFile",stage=""} 1 +kubernetes_feature_enabled{name="CSIMigrationPortworx",stage="BETA"} 0 +kubernetes_feature_enabled{name="CSIMigrationRBD",stage="DEPRECATED"} 0 +kubernetes_feature_enabled{name="CSINodeExpandSecret",stage=""} 1 +kubernetes_feature_enabled{name="CSIVolumeHealth",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="CloudControllerManagerWebhook",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="CloudDualStackNodeIPs",stage="BETA"} 1 +kubernetes_feature_enabled{name="ClusterTrustBundle",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ClusterTrustBundleProjection",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ComponentSLIs",stage="BETA"} 1 +kubernetes_feature_enabled{name="ConsistentHTTPGetHandlers",stage=""} 1 +kubernetes_feature_enabled{name="ConsistentListFromCache",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ContainerCheckpoint",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ContextualLogging",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="CronJobsScheduledAnnotation",stage="BETA"} 1 +kubernetes_feature_enabled{name="CrossNamespaceVolumeDataSource",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="CustomCPUCFSQuotaPeriod",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="CustomResourceValidationExpressions",stage=""} 1 +kubernetes_feature_enabled{name="DefaultHostNetworkHostPortsInPodTemplates",stage="DEPRECATED"} 0 +kubernetes_feature_enabled{name="DevicePluginCDIDevices",stage="BETA"} 1 +kubernetes_feature_enabled{name="DisableCloudProviders",stage="BETA"} 1 +kubernetes_feature_enabled{name="DisableKubeletCloudCredentialProviders",stage="BETA"} 1 +kubernetes_feature_enabled{name="DisableNodeKubeProxyVersion",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="DynamicResourceAllocation",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="EfficientWatchResumption",stage=""} 1 +kubernetes_feature_enabled{name="ElasticIndexedJob",stage="BETA"} 1 +kubernetes_feature_enabled{name="EventedPLEG",stage="BETA"} 0 +kubernetes_feature_enabled{name="ExecProbeTimeout",stage=""} 1 +kubernetes_feature_enabled{name="ExpandedDNSConfig",stage=""} 1 +kubernetes_feature_enabled{name="ExperimentalHostUserNamespaceDefaulting",stage="DEPRECATED"} 0 +kubernetes_feature_enabled{name="GracefulNodeShutdown",stage="BETA"} 1 +kubernetes_feature_enabled{name="GracefulNodeShutdownBasedOnPodPriority",stage="BETA"} 1 +kubernetes_feature_enabled{name="HPAContainerMetrics",stage="BETA"} 1 +kubernetes_feature_enabled{name="HPAScaleToZero",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="HonorPVReclaimPolicy",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="IPTablesOwnershipCleanup",stage=""} 1 +kubernetes_feature_enabled{name="ImageMaximumGCAge",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="InPlacePodVerticalScaling",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="InTreePluginAWSUnregister",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="InTreePluginAzureDiskUnregister",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="InTreePluginAzureFileUnregister",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="InTreePluginGCEUnregister",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="InTreePluginOpenStackUnregister",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="InTreePluginPortworxUnregister",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="InTreePluginRBDUnregister",stage="DEPRECATED"} 0 +kubernetes_feature_enabled{name="InTreePluginvSphereUnregister",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="JobBackoffLimitPerIndex",stage="BETA"} 1 +kubernetes_feature_enabled{name="JobPodFailurePolicy",stage="BETA"} 1 +kubernetes_feature_enabled{name="JobPodReplacementPolicy",stage="BETA"} 1 +kubernetes_feature_enabled{name="JobReadyPods",stage=""} 1 +kubernetes_feature_enabled{name="KMSv1",stage="DEPRECATED"} 0 +kubernetes_feature_enabled{name="KMSv2",stage=""} 1 +kubernetes_feature_enabled{name="KMSv2KDF",stage=""} 1 +kubernetes_feature_enabled{name="KubeProxyDrainingTerminatingNodes",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="KubeletCgroupDriverFromCRI",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="KubeletInUserNamespace",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="KubeletPodResources",stage=""} 1 +kubernetes_feature_enabled{name="KubeletPodResourcesDynamicResources",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="KubeletPodResourcesGet",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="KubeletPodResourcesGetAllocatable",stage=""} 1 +kubernetes_feature_enabled{name="KubeletSeparateDiskGC",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="KubeletTracing",stage="BETA"} 1 +kubernetes_feature_enabled{name="LegacyServiceAccountTokenCleanUp",stage="BETA"} 1 +kubernetes_feature_enabled{name="LegacyServiceAccountTokenTracking",stage=""} 1 +kubernetes_feature_enabled{name="LoadBalancerIPMode",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="LocalStorageCapacityIsolationFSQuotaMonitoring",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="LogarithmicScaleDown",stage="BETA"} 1 +kubernetes_feature_enabled{name="LoggingAlphaOptions",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="LoggingBetaOptions",stage="BETA"} 1 +kubernetes_feature_enabled{name="MatchLabelKeysInPodAffinity",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="MatchLabelKeysInPodTopologySpread",stage="BETA"} 1 +kubernetes_feature_enabled{name="MaxUnavailableStatefulSet",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="MemoryManager",stage="BETA"} 1 +kubernetes_feature_enabled{name="MemoryQoS",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="MinDomainsInPodTopologySpread",stage="BETA"} 1 +kubernetes_feature_enabled{name="MinimizeIPTablesRestore",stage=""} 1 +kubernetes_feature_enabled{name="MultiCIDRServiceAllocator",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="NFTablesProxyMode",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="NewVolumeManagerReconstruction",stage="BETA"} 1 +kubernetes_feature_enabled{name="NodeInclusionPolicyInPodTopologySpread",stage="BETA"} 1 +kubernetes_feature_enabled{name="NodeLogQuery",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="NodeOutOfServiceVolumeDetach",stage=""} 1 +kubernetes_feature_enabled{name="NodeSwap",stage="BETA"} 0 +kubernetes_feature_enabled{name="OpenAPIEnums",stage="BETA"} 1 +kubernetes_feature_enabled{name="PDBUnhealthyPodEvictionPolicy",stage="BETA"} 1 +kubernetes_feature_enabled{name="PersistentVolumeLastPhaseTransitionTime",stage="BETA"} 1 +kubernetes_feature_enabled{name="PodAndContainerStatsFromCRI",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="PodDeletionCost",stage="BETA"} 1 +kubernetes_feature_enabled{name="PodDisruptionConditions",stage="BETA"} 1 +kubernetes_feature_enabled{name="PodHostIPs",stage="BETA"} 1 +kubernetes_feature_enabled{name="PodIndexLabel",stage="BETA"} 1 +kubernetes_feature_enabled{name="PodLifecycleSleepAction",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="PodReadyToStartContainersCondition",stage="BETA"} 1 +kubernetes_feature_enabled{name="PodSchedulingReadiness",stage="BETA"} 1 +kubernetes_feature_enabled{name="ProcMountType",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ProxyTerminatingEndpoints",stage=""} 1 +kubernetes_feature_enabled{name="QOSReserved",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ReadWriteOncePod",stage=""} 1 +kubernetes_feature_enabled{name="RecoverVolumeExpansionFailure",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="RemainingItemCount",stage=""} 1 +kubernetes_feature_enabled{name="RemoveSelfLink",stage=""} 1 +kubernetes_feature_enabled{name="RotateKubeletServerCertificate",stage="BETA"} 1 +kubernetes_feature_enabled{name="RuntimeClassInImageCriApi",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="SELinuxMountReadWriteOncePod",stage="BETA"} 1 +kubernetes_feature_enabled{name="SchedulerQueueingHints",stage="BETA"} 0 +kubernetes_feature_enabled{name="SecurityContextDeny",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="SeparateTaintEvictionController",stage="BETA"} 1 +kubernetes_feature_enabled{name="ServerSideApply",stage=""} 1 +kubernetes_feature_enabled{name="ServerSideFieldValidation",stage=""} 1 +kubernetes_feature_enabled{name="ServiceAccountTokenJTI",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceAccountTokenNodeBinding",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceAccountTokenNodeBindingValidation",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceAccountTokenPodNodeInfo",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceNodePortStaticSubrange",stage=""} 1 +kubernetes_feature_enabled{name="SidecarContainers",stage="BETA"} 1 +kubernetes_feature_enabled{name="SizeMemoryBackedVolumes",stage="BETA"} 1 +kubernetes_feature_enabled{name="SkipReadOnlyValidationGCE",stage="DEPRECATED"} 1 +kubernetes_feature_enabled{name="StableLoadBalancerNodeSet",stage="BETA"} 1 +kubernetes_feature_enabled{name="StatefulSetAutoDeletePVC",stage="BETA"} 1 +kubernetes_feature_enabled{name="StatefulSetStartOrdinal",stage="BETA"} 1 +kubernetes_feature_enabled{name="StorageVersionAPI",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="StorageVersionHash",stage="BETA"} 1 +kubernetes_feature_enabled{name="StructuredAuthenticationConfiguration",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="StructuredAuthorizationConfiguration",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="TopologyAwareHints",stage="BETA"} 1 +kubernetes_feature_enabled{name="TopologyManagerPolicyAlphaOptions",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="TopologyManagerPolicyBetaOptions",stage="BETA"} 1 +kubernetes_feature_enabled{name="TopologyManagerPolicyOptions",stage="BETA"} 1 +kubernetes_feature_enabled{name="TranslateStreamCloseWebsocketRequests",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="UnauthenticatedHTTP2DOSMitigation",stage="BETA"} 1 +kubernetes_feature_enabled{name="UnknownVersionInteroperabilityProxy",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="UserNamespacesPodSecurityStandards",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="UserNamespacesSupport",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ValidatingAdmissionPolicy",stage="BETA"} 0 +kubernetes_feature_enabled{name="VolumeAttributesClass",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="VolumeCapacityPriority",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="WatchBookmark",stage=""} 1 +kubernetes_feature_enabled{name="WatchList",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="WinDSR",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="WinOverlay",stage="BETA"} 1 +kubernetes_feature_enabled{name="WindowsHostNetwork",stage="ALPHA"} 1 +kubernetes_feature_enabled{name="ZeroLimitedNominalConcurrencyShares",stage="BETA"} 0 +# HELP leader_election_master_status [ALPHA] Gauge of if the reporting system is master of the relevant lease, 0 indicates backup, 1 indicates master. 'name' is the string used to identify the lease. Please make sure to group by name. +# TYPE leader_election_master_status gauge +leader_election_master_status{name="kube-scheduler"} 1 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 14.27 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 1.048576e+06 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 10 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 6.3909888e+07 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.70489476707e+09 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 1.316384768e+09 +# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. +# TYPE process_virtual_memory_max_bytes gauge +process_virtual_memory_max_bytes 1.8446744073709552e+19 +# HELP registered_metrics_total [BETA] The count of registered metrics broken by stability level and deprecation version. +# TYPE registered_metrics_total counter +registered_metrics_total{deprecated_version="",stability_level="ALPHA"} 83 +registered_metrics_total{deprecated_version="",stability_level="BETA"} 5 +registered_metrics_total{deprecated_version="",stability_level="STABLE"} 13 +# HELP rest_client_exec_plugin_certificate_rotation_age [ALPHA] Histogram of the number of seconds the last auth exec plugin client certificate lived before being rotated. If auth exec plugin client certificates are unused, histogram will contain no data. +# TYPE rest_client_exec_plugin_certificate_rotation_age histogram +rest_client_exec_plugin_certificate_rotation_age_bucket{le="600"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="1800"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="3600"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="14400"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="86400"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="604800"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="2.592e+06"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="7.776e+06"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="1.5552e+07"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="3.1104e+07"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="1.24416e+08"} 0 +rest_client_exec_plugin_certificate_rotation_age_bucket{le="+Inf"} 0 +rest_client_exec_plugin_certificate_rotation_age_sum 0 +rest_client_exec_plugin_certificate_rotation_age_count 0 +# HELP rest_client_exec_plugin_ttl_seconds [ALPHA] Gauge of the shortest TTL (time-to-live) of the client certificate(s) managed by the auth exec plugin. The value is in seconds until certificate expiry (negative if already expired). If auth exec plugins are unused or manage no TLS certificates, the value will be +INF. +# TYPE rest_client_exec_plugin_ttl_seconds gauge +rest_client_exec_plugin_ttl_seconds +Inf +# HELP rest_client_rate_limiter_duration_seconds [ALPHA] Client side rate limiter latency in seconds. Broken down by verb, and host. +# TYPE rest_client_rate_limiter_duration_seconds histogram +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.005"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.025"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.1"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.25"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.5"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="1"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="2"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="4"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="8"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="15"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="30"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="60"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="+Inf"} 1250 +rest_client_rate_limiter_duration_seconds_sum{host="172.18.0.2:6443",verb="GET"} 0.0044709540000000035 +rest_client_rate_limiter_duration_seconds_count{host="172.18.0.2:6443",verb="GET"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.005"} 3 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.025"} 3 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.1"} 3 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.25"} 3 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.5"} 3 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="1"} 3 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="2"} 3 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="4"} 3 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="8"} 3 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="15"} 3 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="30"} 3 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="60"} 3 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="+Inf"} 3 +rest_client_rate_limiter_duration_seconds_sum{host="172.18.0.2:6443",verb="PATCH"} 2.04e-05 +rest_client_rate_limiter_duration_seconds_count{host="172.18.0.2:6443",verb="PATCH"} 3 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.005"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.025"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.1"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.25"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.5"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="1"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="2"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="4"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="8"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="15"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="30"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="60"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="+Inf"} 19 +rest_client_rate_limiter_duration_seconds_sum{host="172.18.0.2:6443",verb="POST"} 7.005999999999999e-05 +rest_client_rate_limiter_duration_seconds_count{host="172.18.0.2:6443",verb="POST"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.005"} 1202 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.025"} 1202 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.1"} 1202 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.25"} 1202 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.5"} 1202 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="1"} 1202 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="2"} 1202 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="4"} 1202 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="8"} 1202 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="15"} 1202 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="30"} 1202 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="60"} 1202 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="+Inf"} 1202 +rest_client_rate_limiter_duration_seconds_sum{host="172.18.0.2:6443",verb="PUT"} 0.0031652790000000004 +rest_client_rate_limiter_duration_seconds_count{host="172.18.0.2:6443",verb="PUT"} 1202 +# HELP rest_client_request_duration_seconds [ALPHA] Request latency in seconds. Broken down by verb, and host. +# TYPE rest_client_request_duration_seconds histogram +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.005"} 1036 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.025"} 1247 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.1"} 1248 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.25"} 1249 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.5"} 1249 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="1"} 1249 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="2"} 1249 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="4"} 1250 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="8"} 1250 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="15"} 1250 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="30"} 1250 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="60"} 1250 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="+Inf"} 1250 +rest_client_request_duration_seconds_sum{host="172.18.0.2:6443",verb="GET"} 8.867916397999984 +rest_client_request_duration_seconds_count{host="172.18.0.2:6443",verb="GET"} 1250 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.005"} 0 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.025"} 0 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.1"} 3 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.25"} 3 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.5"} 3 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="1"} 3 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="2"} 3 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="4"} 3 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="8"} 3 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="15"} 3 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="30"} 3 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="60"} 3 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="+Inf"} 3 +rest_client_request_duration_seconds_sum{host="172.18.0.2:6443",verb="PATCH"} 0.087315227 +rest_client_request_duration_seconds_count{host="172.18.0.2:6443",verb="PATCH"} 3 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.005"} 2 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.025"} 17 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.1"} 19 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.25"} 19 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.5"} 19 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="1"} 19 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="2"} 19 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="4"} 19 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="8"} 19 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="15"} 19 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="30"} 19 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="60"} 19 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="+Inf"} 19 +rest_client_request_duration_seconds_sum{host="172.18.0.2:6443",verb="POST"} 0.24962320100000002 +rest_client_request_duration_seconds_count{host="172.18.0.2:6443",verb="POST"} 19 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.005"} 6 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.025"} 1201 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.1"} 1201 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.25"} 1202 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.5"} 1202 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="1"} 1202 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="2"} 1202 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="4"} 1202 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="8"} 1202 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="15"} 1202 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="30"} 1202 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="60"} 1202 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="+Inf"} 1202 +rest_client_request_duration_seconds_sum{host="172.18.0.2:6443",verb="PUT"} 14.273085986000012 +rest_client_request_duration_seconds_count{host="172.18.0.2:6443",verb="PUT"} 1202 +# HELP rest_client_request_size_bytes [ALPHA] Request size in bytes. Broken down by verb and host. +# TYPE rest_client_request_size_bytes histogram +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="64"} 1250 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="256"} 1250 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="512"} 1250 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1024"} 1250 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="4096"} 1250 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="16384"} 1250 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="65536"} 1250 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="262144"} 1250 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1.048576e+06"} 1250 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="4.194304e+06"} 1250 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1.6777216e+07"} 1250 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="+Inf"} 1250 +rest_client_request_size_bytes_sum{host="172.18.0.2:6443",verb="GET"} 0 +rest_client_request_size_bytes_count{host="172.18.0.2:6443",verb="GET"} 1250 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="64"} 0 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="256"} 0 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="512"} 3 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1024"} 3 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="4096"} 3 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="16384"} 3 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="65536"} 3 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="262144"} 3 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1.048576e+06"} 3 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="4.194304e+06"} 3 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1.6777216e+07"} 3 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="+Inf"} 3 +rest_client_request_size_bytes_sum{host="172.18.0.2:6443",verb="PATCH"} 1026 +rest_client_request_size_bytes_count{host="172.18.0.2:6443",verb="PATCH"} 3 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="64"} 0 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="256"} 7 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="512"} 17 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1024"} 18 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="4096"} 19 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="16384"} 19 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="65536"} 19 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="262144"} 19 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1.048576e+06"} 19 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="4.194304e+06"} 19 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1.6777216e+07"} 19 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="+Inf"} 19 +rest_client_request_size_bytes_sum{host="172.18.0.2:6443",verb="POST"} 6726 +rest_client_request_size_bytes_count{host="172.18.0.2:6443",verb="POST"} 19 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="64"} 0 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="256"} 0 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="512"} 1202 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1024"} 1202 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="4096"} 1202 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="16384"} 1202 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="65536"} 1202 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="262144"} 1202 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1.048576e+06"} 1202 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="4.194304e+06"} 1202 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1.6777216e+07"} 1202 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="+Inf"} 1202 +rest_client_request_size_bytes_sum{host="172.18.0.2:6443",verb="PUT"} 512715 +rest_client_request_size_bytes_count{host="172.18.0.2:6443",verb="PUT"} 1202 +# HELP rest_client_requests_total [ALPHA] Number of HTTP requests, partitioned by status code, method, and host. +# TYPE rest_client_requests_total counter +rest_client_requests_total{code="200",host="172.18.0.2:6443",method="GET"} 1305 +rest_client_requests_total{code="200",host="172.18.0.2:6443",method="PATCH"} 3 +rest_client_requests_total{code="200",host="172.18.0.2:6443",method="PUT"} 1202 +rest_client_requests_total{code="201",host="172.18.0.2:6443",method="POST"} 19 +rest_client_requests_total{code="403",host="172.18.0.2:6443",method="GET"} 32 +rest_client_requests_total{code="404",host="172.18.0.2:6443",method="GET"} 1 +# HELP rest_client_response_size_bytes [ALPHA] Response size in bytes. Broken down by verb and host. +# TYPE rest_client_response_size_bytes histogram +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="64"} 9 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="256"} 35 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="512"} 1244 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1024"} 1246 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="4096"} 1249 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="16384"} 1250 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="65536"} 1250 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="262144"} 1250 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1.048576e+06"} 1250 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="4.194304e+06"} 1250 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1.6777216e+07"} 1250 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="+Inf"} 1250 +rest_client_response_size_bytes_sum{host="172.18.0.2:6443",verb="GET"} 537154 +rest_client_response_size_bytes_count{host="172.18.0.2:6443",verb="GET"} 1250 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="64"} 0 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="256"} 0 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="512"} 0 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1024"} 0 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="4096"} 3 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="16384"} 3 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="65536"} 3 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="262144"} 3 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1.048576e+06"} 3 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="4.194304e+06"} 3 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1.6777216e+07"} 3 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="+Inf"} 3 +rest_client_response_size_bytes_sum{host="172.18.0.2:6443",verb="PATCH"} 10807 +rest_client_response_size_bytes_count{host="172.18.0.2:6443",verb="PATCH"} 3 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="64"} 6 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="256"} 6 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="512"} 7 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1024"} 17 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="4096"} 19 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="16384"} 19 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="65536"} 19 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="262144"} 19 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1.048576e+06"} 19 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="4.194304e+06"} 19 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1.6777216e+07"} 19 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="+Inf"} 19 +rest_client_response_size_bytes_sum{host="172.18.0.2:6443",verb="POST"} 10350 +rest_client_response_size_bytes_count{host="172.18.0.2:6443",verb="POST"} 19 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="64"} 0 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="256"} 0 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="512"} 1202 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1024"} 1202 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="4096"} 1202 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="16384"} 1202 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="65536"} 1202 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="262144"} 1202 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1.048576e+06"} 1202 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="4.194304e+06"} 1202 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1.6777216e+07"} 1202 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="+Inf"} 1202 +rest_client_response_size_bytes_sum{host="172.18.0.2:6443",verb="PUT"} 512716 +rest_client_response_size_bytes_count{host="172.18.0.2:6443",verb="PUT"} 1202 +# HELP rest_client_transport_cache_entries [ALPHA] Number of transport entries in the internal cache. +# TYPE rest_client_transport_cache_entries gauge +rest_client_transport_cache_entries 2 +# HELP rest_client_transport_create_calls_total [ALPHA] Number of calls to get a new transport, partitioned by the result of the operation hit: obtained from the cache, miss: created and added to the cache, uncacheable: created and not cached +# TYPE rest_client_transport_create_calls_total counter +rest_client_transport_create_calls_total{result="hit"} 4 +rest_client_transport_create_calls_total{result="miss"} 2 +# HELP scheduler_framework_extension_point_duration_seconds [STABLE] Latency for running all plugins of a specific extension point. +# TYPE scheduler_framework_extension_point_duration_seconds histogram +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.0001"} 0 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.0002"} 0 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.0004"} 0 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.0008"} 0 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.0016"} 0 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.0032"} 0 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.0064"} 0 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.0128"} 4 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.0256"} 4 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.0512"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.1024"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.2048"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="+Inf"} 6 +scheduler_framework_extension_point_duration_seconds_sum{extension_point="Bind",profile="default-scheduler",status="Success"} 0.113648547 +scheduler_framework_extension_point_duration_seconds_count{extension_point="Bind",profile="default-scheduler",status="Success"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0001"} 5 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0002"} 8 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0004"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0008"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0016"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0032"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0064"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0128"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0256"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0512"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.1024"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.2048"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="+Inf"} 9 +scheduler_framework_extension_point_duration_seconds_sum{extension_point="Filter",profile="default-scheduler",status="Success"} 0.00099381 +scheduler_framework_extension_point_duration_seconds_count{extension_point="Filter",profile="default-scheduler",status="Success"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0001"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0002"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0004"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0008"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0016"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0032"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0064"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0128"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0256"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0512"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.1024"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.2048"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="+Inf"} 6 +scheduler_framework_extension_point_duration_seconds_sum{extension_point="Permit",profile="default-scheduler",status="Success"} 1.4594999999999998e-05 +scheduler_framework_extension_point_duration_seconds_count{extension_point="Permit",profile="default-scheduler",status="Success"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0001"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0002"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0004"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0008"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0016"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0032"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0064"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0128"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0256"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0512"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.1024"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.2048"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="+Inf"} 6 +scheduler_framework_extension_point_duration_seconds_sum{extension_point="PostBind",profile="default-scheduler",status="Success"} 2.1460999999999997e-05 +scheduler_framework_extension_point_duration_seconds_count{extension_point="PostBind",profile="default-scheduler",status="Success"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostFilter",profile="default-scheduler",status="Unschedulable",le="0.0001"} 3 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostFilter",profile="default-scheduler",status="Unschedulable",le="0.0002"} 3 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostFilter",profile="default-scheduler",status="Unschedulable",le="0.0004"} 3 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostFilter",profile="default-scheduler",status="Unschedulable",le="0.0008"} 3 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostFilter",profile="default-scheduler",status="Unschedulable",le="0.0016"} 3 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostFilter",profile="default-scheduler",status="Unschedulable",le="0.0032"} 3 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostFilter",profile="default-scheduler",status="Unschedulable",le="0.0064"} 3 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostFilter",profile="default-scheduler",status="Unschedulable",le="0.0128"} 3 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostFilter",profile="default-scheduler",status="Unschedulable",le="0.0256"} 3 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostFilter",profile="default-scheduler",status="Unschedulable",le="0.0512"} 3 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostFilter",profile="default-scheduler",status="Unschedulable",le="0.1024"} 3 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostFilter",profile="default-scheduler",status="Unschedulable",le="0.2048"} 3 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostFilter",profile="default-scheduler",status="Unschedulable",le="+Inf"} 3 +scheduler_framework_extension_point_duration_seconds_sum{extension_point="PostFilter",profile="default-scheduler",status="Unschedulable"} 8.5664e-05 +scheduler_framework_extension_point_duration_seconds_count{extension_point="PostFilter",profile="default-scheduler",status="Unschedulable"} 3 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0001"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0002"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0004"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0008"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0016"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0032"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0064"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0128"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0256"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0512"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.1024"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.2048"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="+Inf"} 6 +scheduler_framework_extension_point_duration_seconds_sum{extension_point="PreBind",profile="default-scheduler",status="Success"} 6.4472e-05 +scheduler_framework_extension_point_duration_seconds_count{extension_point="PreBind",profile="default-scheduler",status="Success"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0001"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0002"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0004"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0008"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0016"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0032"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0064"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0128"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0256"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0512"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.1024"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.2048"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="+Inf"} 9 +scheduler_framework_extension_point_duration_seconds_sum{extension_point="PreEnqueue",profile="default-scheduler",status="Success"} 0.00014482 +scheduler_framework_extension_point_duration_seconds_count{extension_point="PreEnqueue",profile="default-scheduler",status="Success"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0001"} 8 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0002"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0004"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0008"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0016"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0032"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0064"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0128"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0256"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0512"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.1024"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.2048"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="+Inf"} 9 +scheduler_framework_extension_point_duration_seconds_sum{extension_point="PreFilter",profile="default-scheduler",status="Success"} 0.0006032540000000001 +scheduler_framework_extension_point_duration_seconds_count{extension_point="PreFilter",profile="default-scheduler",status="Success"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0001"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0002"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0004"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0008"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0016"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0032"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0064"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0128"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0256"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0512"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.1024"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.2048"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="+Inf"} 6 +scheduler_framework_extension_point_duration_seconds_sum{extension_point="Reserve",profile="default-scheduler",status="Success"} 2.3699e-05 +scheduler_framework_extension_point_duration_seconds_count{extension_point="Reserve",profile="default-scheduler",status="Success"} 6 +# HELP scheduler_goroutines [ALPHA] Number of running goroutines split by the work they do such as binding. +# TYPE scheduler_goroutines gauge +scheduler_goroutines{operation="Filter"} 0 +scheduler_goroutines{operation="InterPodAffinity"} 0 +scheduler_goroutines{operation="binding"} 0 +# HELP scheduler_pending_pods [STABLE] Number of pending pods, by the queue type. 'active' means number of pods in activeQ; 'backoff' means number of pods in backoffQ; 'unschedulable' means number of pods in unschedulablePods that the scheduler attempted to schedule and failed; 'gated' is the number of unschedulable pods that the scheduler never attempted to schedule because they are gated. +# TYPE scheduler_pending_pods gauge +scheduler_pending_pods{queue="active"} 0 +scheduler_pending_pods{queue="backoff"} 0 +scheduler_pending_pods{queue="gated"} 0 +scheduler_pending_pods{queue="unschedulable"} 0 +# HELP scheduler_plugin_evaluation_total [ALPHA] Number of attempts to schedule pods by each plugin and the extension point (available only in PreFilter and Filter.). +# TYPE scheduler_plugin_evaluation_total counter +scheduler_plugin_evaluation_total{extension_point="Filter",plugin="AzureDiskLimits",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="Filter",plugin="EBSLimits",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="Filter",plugin="GCEPDLimits",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="Filter",plugin="InterPodAffinity",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="Filter",plugin="NodeAffinity",profile="default-scheduler"} 6 +scheduler_plugin_evaluation_total{extension_point="Filter",plugin="NodeName",profile="default-scheduler"} 9 +scheduler_plugin_evaluation_total{extension_point="Filter",plugin="NodePorts",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="Filter",plugin="NodeResourcesFit",profile="default-scheduler"} 6 +scheduler_plugin_evaluation_total{extension_point="Filter",plugin="NodeUnschedulable",profile="default-scheduler"} 9 +scheduler_plugin_evaluation_total{extension_point="Filter",plugin="NodeVolumeLimits",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="Filter",plugin="PodTopologySpread",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="Filter",plugin="TaintToleration",profile="default-scheduler"} 9 +scheduler_plugin_evaluation_total{extension_point="Filter",plugin="VolumeBinding",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="Filter",plugin="VolumeRestrictions",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="Filter",plugin="VolumeZone",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="PreFilter",plugin="AzureDiskLimits",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="PreFilter",plugin="EBSLimits",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="PreFilter",plugin="GCEPDLimits",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="PreFilter",plugin="InterPodAffinity",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="PreFilter",plugin="NodeAffinity",profile="default-scheduler"} 9 +scheduler_plugin_evaluation_total{extension_point="PreFilter",plugin="NodePorts",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="PreFilter",plugin="NodeResourcesFit",profile="default-scheduler"} 9 +scheduler_plugin_evaluation_total{extension_point="PreFilter",plugin="NodeVolumeLimits",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="PreFilter",plugin="PodTopologySpread",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="PreFilter",plugin="VolumeBinding",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="PreFilter",plugin="VolumeRestrictions",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="PreFilter",plugin="VolumeZone",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="PreScore",plugin="InterPodAffinity",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="PreScore",plugin="NodeAffinity",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="PreScore",plugin="NodeResourcesBalancedAllocation",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="PreScore",plugin="NodeResourcesFit",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="PreScore",plugin="PodTopologySpread",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="PreScore",plugin="TaintToleration",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="Score",plugin="ImageLocality",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="Score",plugin="InterPodAffinity",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="Score",plugin="NodeAffinity",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="Score",plugin="NodeResourcesBalancedAllocation",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="Score",plugin="NodeResourcesFit",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="Score",plugin="PodTopologySpread",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="Score",plugin="TaintToleration",profile="default-scheduler"} 0 +scheduler_plugin_evaluation_total{extension_point="Score",plugin="VolumeBinding",profile="default-scheduler"} 0 +# HELP scheduler_plugin_execution_duration_seconds [ALPHA] Duration for running a plugin at a specific extension point. +# TYPE scheduler_plugin_execution_duration_seconds histogram +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="1e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="1.5000000000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="2.2500000000000005e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="3.375000000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="5.062500000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="7.593750000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="0.00011390625000000003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="0.00017085937500000006"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="0.0002562890625000001"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="0.00038443359375000017"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="0.0005766503906250003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="0.0008649755859375004"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="0.0012974633789062506"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="0.0019461950683593758"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="0.0029192926025390638"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="0.004378938903808595"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="0.006568408355712893"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="0.009852612533569338"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="0.014778918800354007"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="0.02216837820053101"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="+Inf"} 1 +scheduler_plugin_execution_duration_seconds_sum{extension_point="Filter",plugin="NodeName",status="Success"} 2.56e-07 +scheduler_plugin_execution_duration_seconds_count{extension_point="Filter",plugin="NodeName",status="Success"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="1e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="1.5000000000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="2.2500000000000005e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="3.375000000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="5.062500000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="7.593750000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="0.00011390625000000003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="0.00017085937500000006"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="0.0002562890625000001"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="0.00038443359375000017"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="0.0005766503906250003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="0.0008649755859375004"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="0.0012974633789062506"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="0.0019461950683593758"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="0.0029192926025390638"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="0.004378938903808595"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="0.006568408355712893"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="0.009852612533569338"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="0.014778918800354007"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="0.02216837820053101"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="+Inf"} 1 +scheduler_plugin_execution_duration_seconds_sum{extension_point="Filter",plugin="NodeUnschedulable",status="Success"} 6.49e-07 +scheduler_plugin_execution_duration_seconds_count{extension_point="Filter",plugin="NodeUnschedulable",status="Success"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="1e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="1.5000000000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="2.2500000000000005e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="3.375000000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="5.062500000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="7.593750000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.00011390625000000003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.00017085937500000006"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.0002562890625000001"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.00038443359375000017"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.0005766503906250003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.0008649755859375004"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.0012974633789062506"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.0019461950683593758"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.0029192926025390638"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.004378938903808595"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.006568408355712893"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.009852612533569338"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.014778918800354007"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.02216837820053101"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="+Inf"} 1 +scheduler_plugin_execution_duration_seconds_sum{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable"} 7.383e-06 +scheduler_plugin_execution_duration_seconds_count{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="1e-05"} 0 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="1.5000000000000002e-05"} 0 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="2.2500000000000005e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="3.375000000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="5.062500000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="7.593750000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.00011390625000000003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.00017085937500000006"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.0002562890625000001"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.00038443359375000017"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.0005766503906250003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.0008649755859375004"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.0012974633789062506"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.0019461950683593758"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.0029192926025390638"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.004378938903808595"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.006568408355712893"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.009852612533569338"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.014778918800354007"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.02216837820053101"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="+Inf"} 1 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable"} 2.1459e-05 +scheduler_plugin_execution_duration_seconds_count{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="1e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="1.5000000000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="2.2500000000000005e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="3.375000000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="5.062500000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="7.593750000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.00011390625000000003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.00017085937500000006"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.0002562890625000001"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.00038443359375000017"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.0005766503906250003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.0008649755859375004"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.0012974633789062506"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.0019461950683593758"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.0029192926025390638"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.004378938903808595"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.006568408355712893"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.009852612533569338"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.014778918800354007"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.02216837820053101"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="+Inf"} 1 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success"} 1.268e-06 +scheduler_plugin_execution_duration_seconds_count{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="1e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="1.5000000000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="2.2500000000000005e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="3.375000000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="5.062500000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="7.593750000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="0.00011390625000000003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="0.00017085937500000006"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="0.0002562890625000001"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="0.00038443359375000017"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="0.0005766503906250003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="0.0008649755859375004"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="0.0012974633789062506"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="0.0019461950683593758"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="0.0029192926025390638"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="0.004378938903808595"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="0.006568408355712893"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="0.009852612533569338"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="0.014778918800354007"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="0.02216837820053101"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="+Inf"} 1 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip"} 3.98e-07 +scheduler_plugin_execution_duration_seconds_count{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="1e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="1.5000000000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="2.2500000000000005e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="3.375000000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="5.062500000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="7.593750000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="0.00011390625000000003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="0.00017085937500000006"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="0.0002562890625000001"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="0.00038443359375000017"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="0.0005766503906250003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="0.0008649755859375004"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="0.0012974633789062506"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="0.0019461950683593758"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="0.0029192926025390638"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="0.004378938903808595"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="0.006568408355712893"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="0.009852612533569338"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="0.014778918800354007"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="0.02216837820053101"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="+Inf"} 1 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="EBSLimits",status="Skip"} 1.971e-06 +scheduler_plugin_execution_duration_seconds_count{extension_point="PreFilter",plugin="EBSLimits",status="Skip"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="1e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="1.5000000000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="2.2500000000000005e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="3.375000000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="5.062500000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="7.593750000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="0.00011390625000000003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="0.00017085937500000006"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="0.0002562890625000001"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="0.00038443359375000017"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="0.0005766503906250003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="0.0008649755859375004"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="0.0012974633789062506"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="0.0019461950683593758"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="0.0029192926025390638"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="0.004378938903808595"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="0.006568408355712893"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="0.009852612533569338"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="0.014778918800354007"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="0.02216837820053101"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="+Inf"} 1 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip"} 1.265e-06 +scheduler_plugin_execution_duration_seconds_count{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="1e-05"} 0 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="1.5000000000000002e-05"} 0 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="2.2500000000000005e-05"} 0 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="3.375000000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="5.062500000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="7.593750000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="0.00011390625000000003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="0.00017085937500000006"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="0.0002562890625000001"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="0.00038443359375000017"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="0.0005766503906250003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="0.0008649755859375004"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="0.0012974633789062506"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="0.0019461950683593758"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="0.0029192926025390638"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="0.004378938903808595"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="0.006568408355712893"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="0.009852612533569338"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="0.014778918800354007"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="0.02216837820053101"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="+Inf"} 1 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip"} 2.4171e-05 +scheduler_plugin_execution_duration_seconds_count{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="1e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="1.5000000000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="2.2500000000000005e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="3.375000000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="5.062500000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="7.593750000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="0.00011390625000000003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="0.00017085937500000006"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="0.0002562890625000001"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="0.00038443359375000017"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="0.0005766503906250003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="0.0008649755859375004"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="0.0012974633789062506"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="0.0019461950683593758"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="0.0029192926025390638"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="0.004378938903808595"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="0.006568408355712893"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="0.009852612533569338"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="0.014778918800354007"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="0.02216837820053101"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="+Inf"} 1 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="NodeAffinity",status="Success"} 7.303e-06 +scheduler_plugin_execution_duration_seconds_count{extension_point="PreFilter",plugin="NodeAffinity",status="Success"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="1e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="1.5000000000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="2.2500000000000005e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="3.375000000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="5.062500000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="7.593750000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="0.00011390625000000003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="0.00017085937500000006"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="0.0002562890625000001"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="0.00038443359375000017"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="0.0005766503906250003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="0.0008649755859375004"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="0.0012974633789062506"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="0.0019461950683593758"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="0.0029192926025390638"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="0.004378938903808595"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="0.006568408355712893"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="0.009852612533569338"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="0.014778918800354007"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="0.02216837820053101"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="+Inf"} 1 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="NodePorts",status="Skip"} 1.227e-06 +scheduler_plugin_execution_duration_seconds_count{extension_point="PreFilter",plugin="NodePorts",status="Skip"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="1e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="1.5000000000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="2.2500000000000005e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="3.375000000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="5.062500000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="7.593750000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="0.00011390625000000003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="0.00017085937500000006"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="0.0002562890625000001"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="0.00038443359375000017"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="0.0005766503906250003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="0.0008649755859375004"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="0.0012974633789062506"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="0.0019461950683593758"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="0.0029192926025390638"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="0.004378938903808595"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="0.006568408355712893"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="0.009852612533569338"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="0.014778918800354007"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="0.02216837820053101"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="+Inf"} 1 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success"} 8.455e-06 +scheduler_plugin_execution_duration_seconds_count{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="1e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="1.5000000000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="2.2500000000000005e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="3.375000000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="5.062500000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="7.593750000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="0.00011390625000000003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="0.00017085937500000006"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="0.0002562890625000001"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="0.00038443359375000017"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="0.0005766503906250003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="0.0008649755859375004"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="0.0012974633789062506"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="0.0019461950683593758"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="0.0029192926025390638"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="0.004378938903808595"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="0.006568408355712893"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="0.009852612533569338"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="0.014778918800354007"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="0.02216837820053101"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="+Inf"} 1 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip"} 5.827e-06 +scheduler_plugin_execution_duration_seconds_count{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="1e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="1.5000000000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="2.2500000000000005e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="3.375000000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="5.062500000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="7.593750000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="0.00011390625000000003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="0.00017085937500000006"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="0.0002562890625000001"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="0.00038443359375000017"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="0.0005766503906250003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="0.0008649755859375004"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="0.0012974633789062506"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="0.0019461950683593758"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="0.0029192926025390638"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="0.004378938903808595"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="0.006568408355712893"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="0.009852612533569338"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="0.014778918800354007"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="0.02216837820053101"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="+Inf"} 1 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip"} 2.198e-06 +scheduler_plugin_execution_duration_seconds_count{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="1e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="1.5000000000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="2.2500000000000005e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="3.375000000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="5.062500000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="7.593750000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="0.00011390625000000003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="0.00017085937500000006"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="0.0002562890625000001"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="0.00038443359375000017"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="0.0005766503906250003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="0.0008649755859375004"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="0.0012974633789062506"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="0.0019461950683593758"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="0.0029192926025390638"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="0.004378938903808595"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="0.006568408355712893"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="0.009852612533569338"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="0.014778918800354007"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="0.02216837820053101"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="+Inf"} 1 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="VolumeBinding",status="Skip"} 3.028e-06 +scheduler_plugin_execution_duration_seconds_count{extension_point="PreFilter",plugin="VolumeBinding",status="Skip"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="1e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="1.5000000000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="2.2500000000000005e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="3.375000000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="5.062500000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="7.593750000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="0.00011390625000000003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="0.00017085937500000006"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="0.0002562890625000001"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="0.00038443359375000017"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="0.0005766503906250003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="0.0008649755859375004"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="0.0012974633789062506"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="0.0019461950683593758"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="0.0029192926025390638"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="0.004378938903808595"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="0.006568408355712893"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="0.009852612533569338"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="0.014778918800354007"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="0.02216837820053101"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="+Inf"} 1 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip"} 3.122e-06 +scheduler_plugin_execution_duration_seconds_count{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="1e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="1.5000000000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="2.2500000000000005e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="3.375000000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="5.062500000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="7.593750000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="0.00011390625000000003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="0.00017085937500000006"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="0.0002562890625000001"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="0.00038443359375000017"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="0.0005766503906250003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="0.0008649755859375004"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="0.0012974633789062506"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="0.0019461950683593758"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="0.0029192926025390638"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="0.004378938903808595"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="0.006568408355712893"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="0.009852612533569338"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="0.014778918800354007"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="0.02216837820053101"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="+Inf"} 1 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="VolumeZone",status="Skip"} 1.37e-06 +scheduler_plugin_execution_duration_seconds_count{extension_point="PreFilter",plugin="VolumeZone",status="Skip"} 1 +# HELP scheduler_pod_scheduling_attempts [STABLE] Number of attempts to successfully schedule a pod. +# TYPE scheduler_pod_scheduling_attempts histogram +scheduler_pod_scheduling_attempts_bucket{le="1"} 3 +scheduler_pod_scheduling_attempts_bucket{le="2"} 6 +scheduler_pod_scheduling_attempts_bucket{le="4"} 6 +scheduler_pod_scheduling_attempts_bucket{le="8"} 6 +scheduler_pod_scheduling_attempts_bucket{le="16"} 6 +scheduler_pod_scheduling_attempts_bucket{le="+Inf"} 6 +scheduler_pod_scheduling_attempts_sum 9 +scheduler_pod_scheduling_attempts_count 6 +# HELP scheduler_pod_scheduling_sli_duration_seconds [BETA] E2e latency for a pod being scheduled, from the time the pod enters the scheduling queue an d might involve multiple scheduling attempts. +# TYPE scheduler_pod_scheduling_sli_duration_seconds histogram +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="0.01"} 1 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="0.02"} 1 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="0.04"} 2 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="0.08"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="0.16"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="0.32"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="0.64"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="1.28"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="2.56"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="5.12"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="10.24"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="20.48"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="40.96"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="81.92"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="163.84"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="327.68"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="655.36"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="1310.72"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="2621.44"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="5242.88"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="+Inf"} 3 +scheduler_pod_scheduling_sli_duration_seconds_sum{attempts="1"} 0.080696909 +scheduler_pod_scheduling_sli_duration_seconds_count{attempts="1"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="0.01"} 0 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="0.02"} 0 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="0.04"} 0 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="0.08"} 0 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="0.16"} 0 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="0.32"} 0 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="0.64"} 0 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="1.28"} 0 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="2.56"} 0 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="5.12"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="10.24"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="20.48"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="40.96"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="81.92"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="163.84"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="327.68"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="655.36"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="1310.72"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="2621.44"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="5242.88"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="+Inf"} 3 +scheduler_pod_scheduling_sli_duration_seconds_sum{attempts="2"} 14.839669345 +scheduler_pod_scheduling_sli_duration_seconds_count{attempts="2"} 3 +# HELP scheduler_preemption_attempts_total [STABLE] Total preemption attempts in the cluster till now +# TYPE scheduler_preemption_attempts_total counter +scheduler_preemption_attempts_total 3 +# HELP scheduler_preemption_victims [STABLE] Number of selected preemption victims +# TYPE scheduler_preemption_victims histogram +scheduler_preemption_victims_bucket{le="1"} 0 +scheduler_preemption_victims_bucket{le="2"} 0 +scheduler_preemption_victims_bucket{le="4"} 0 +scheduler_preemption_victims_bucket{le="8"} 0 +scheduler_preemption_victims_bucket{le="16"} 0 +scheduler_preemption_victims_bucket{le="32"} 0 +scheduler_preemption_victims_bucket{le="64"} 0 +scheduler_preemption_victims_bucket{le="+Inf"} 0 +scheduler_preemption_victims_sum 0 +scheduler_preemption_victims_count 0 +# HELP scheduler_queue_incoming_pods_total [STABLE] Number of pods added to scheduling queues by event and queue type. +# TYPE scheduler_queue_incoming_pods_total counter +scheduler_queue_incoming_pods_total{event="NodeTaintChange",queue="active"} 3 +scheduler_queue_incoming_pods_total{event="PodAdd",queue="active"} 6 +scheduler_queue_incoming_pods_total{event="ScheduleAttemptFailure",queue="unschedulable"} 3 +# HELP scheduler_schedule_attempts_total [STABLE] Number of attempts to schedule pods, by the result. 'unschedulable' means a pod could not be scheduled, while 'error' means an internal scheduler problem. +# TYPE scheduler_schedule_attempts_total counter +scheduler_schedule_attempts_total{profile="default-scheduler",result="scheduled"} 6 +scheduler_schedule_attempts_total{profile="default-scheduler",result="unschedulable"} 3 +# HELP scheduler_scheduler_cache_size [ALPHA] Number of nodes, pods, and assumed (bound) pods in the scheduler cache. +# TYPE scheduler_scheduler_cache_size gauge +scheduler_scheduler_cache_size{type="assumed_pods"} 0 +scheduler_scheduler_cache_size{type="nodes"} 1 +scheduler_scheduler_cache_size{type="pods"} 10 +# HELP scheduler_scheduling_algorithm_duration_seconds [ALPHA] Scheduling algorithm latency in seconds +# TYPE scheduler_scheduling_algorithm_duration_seconds histogram +scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.001"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.002"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.004"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.008"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.016"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.032"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.064"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.128"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.256"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.512"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="1.024"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="2.048"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="4.096"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="8.192"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="16.384"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="+Inf"} 6 +scheduler_scheduling_algorithm_duration_seconds_sum 0.00114044 +scheduler_scheduling_algorithm_duration_seconds_count 6 +# HELP scheduler_scheduling_attempt_duration_seconds [STABLE] Scheduling attempt latency in seconds (scheduling algorithm + binding) +# TYPE scheduler_scheduling_attempt_duration_seconds histogram +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.001"} 0 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.002"} 0 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.004"} 0 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.008"} 0 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.016"} 4 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.032"} 5 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.064"} 6 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.128"} 6 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.256"} 6 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.512"} 6 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="1.024"} 6 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="2.048"} 6 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="4.096"} 6 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="8.192"} 6 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="16.384"} 6 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="+Inf"} 6 +scheduler_scheduling_attempt_duration_seconds_sum{profile="default-scheduler",result="scheduled"} 0.116016227 +scheduler_scheduling_attempt_duration_seconds_count{profile="default-scheduler",result="scheduled"} 6 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="unschedulable",le="0.001"} 3 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="unschedulable",le="0.002"} 3 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="unschedulable",le="0.004"} 3 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="unschedulable",le="0.008"} 3 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="unschedulable",le="0.016"} 3 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="unschedulable",le="0.032"} 3 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="unschedulable",le="0.064"} 3 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="unschedulable",le="0.128"} 3 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="unschedulable",le="0.256"} 3 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="unschedulable",le="0.512"} 3 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="unschedulable",le="1.024"} 3 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="unschedulable",le="2.048"} 3 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="unschedulable",le="4.096"} 3 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="unschedulable",le="8.192"} 3 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="unschedulable",le="16.384"} 3 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="unschedulable",le="+Inf"} 3 +scheduler_scheduling_attempt_duration_seconds_sum{profile="default-scheduler",result="unschedulable"} 0.0009928839999999999 +scheduler_scheduling_attempt_duration_seconds_count{profile="default-scheduler",result="unschedulable"} 3 +# HELP scheduler_unschedulable_pods [ALPHA] The number of unschedulable pods broken down by plugin name. A pod will increment the gauge for all plugins that caused it to not schedule and so this metric have meaning only when broken down by plugin. +# TYPE scheduler_unschedulable_pods gauge +scheduler_unschedulable_pods{plugin="TaintToleration",profile="default-scheduler"} 0 +# HELP workqueue_adds_total [ALPHA] Total number of adds handled by workqueue +# TYPE workqueue_adds_total counter +workqueue_adds_total{name="DynamicConfigMapCABundle-client-ca"} 42 +workqueue_adds_total{name="DynamicServingCertificateController"} 42 +workqueue_adds_total{name="RequestHeaderAuthRequestController"} 0 +# HELP workqueue_depth [ALPHA] Current depth of workqueue +# TYPE workqueue_depth gauge +workqueue_depth{name="DynamicConfigMapCABundle-client-ca"} 0 +workqueue_depth{name="DynamicServingCertificateController"} 0 +workqueue_depth{name="RequestHeaderAuthRequestController"} 0 +# HELP workqueue_longest_running_processor_seconds [ALPHA] How many seconds has the longest running processor for workqueue been running. +# TYPE workqueue_longest_running_processor_seconds gauge +workqueue_longest_running_processor_seconds{name="DynamicConfigMapCABundle-client-ca"} 0 +workqueue_longest_running_processor_seconds{name="DynamicServingCertificateController"} 0 +workqueue_longest_running_processor_seconds{name="RequestHeaderAuthRequestController"} 0 +# HELP workqueue_queue_duration_seconds [ALPHA] How long in seconds an item stays in workqueue before being requested. +# TYPE workqueue_queue_duration_seconds histogram +workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="9.999999999999999e-05"} 39 +workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="0.001"} 41 +workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="0.01"} 41 +workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="0.1"} 41 +workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="1"} 42 +workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="10"} 42 +workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="+Inf"} 42 +workqueue_queue_duration_seconds_sum{name="DynamicConfigMapCABundle-client-ca"} 0.101941402 +workqueue_queue_duration_seconds_count{name="DynamicConfigMapCABundle-client-ca"} 42 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="9.999999999999999e-05"} 41 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.001"} 42 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.01"} 42 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.1"} 42 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="1"} 42 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="10"} 42 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="+Inf"} 42 +workqueue_queue_duration_seconds_sum{name="DynamicServingCertificateController"} 0.001841972 +workqueue_queue_duration_seconds_count{name="DynamicServingCertificateController"} 42 +workqueue_queue_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="1e-08"} 0 +workqueue_queue_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="1e-07"} 0 +workqueue_queue_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="1e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="9.999999999999999e-06"} 0 +workqueue_queue_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="9.999999999999999e-05"} 0 +workqueue_queue_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="0.001"} 0 +workqueue_queue_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="0.01"} 0 +workqueue_queue_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="0.1"} 0 +workqueue_queue_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="1"} 0 +workqueue_queue_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="10"} 0 +workqueue_queue_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="+Inf"} 0 +workqueue_queue_duration_seconds_sum{name="RequestHeaderAuthRequestController"} 0 +workqueue_queue_duration_seconds_count{name="RequestHeaderAuthRequestController"} 0 +# HELP workqueue_retries_total [ALPHA] Total number of retries handled by workqueue +# TYPE workqueue_retries_total counter +workqueue_retries_total{name="DynamicConfigMapCABundle-client-ca"} 0 +workqueue_retries_total{name="DynamicServingCertificateController"} 0 +workqueue_retries_total{name="RequestHeaderAuthRequestController"} 0 +# HELP workqueue_unfinished_work_seconds [ALPHA] How many seconds of work has done that is in progress and hasn't been observed by work_duration. Large values indicate stuck threads. One can deduce the number of stuck threads by observing the rate at which this increases. +# TYPE workqueue_unfinished_work_seconds gauge +workqueue_unfinished_work_seconds{name="DynamicConfigMapCABundle-client-ca"} 0 +workqueue_unfinished_work_seconds{name="DynamicServingCertificateController"} 0 +workqueue_unfinished_work_seconds{name="RequestHeaderAuthRequestController"} 0 +# HELP workqueue_work_duration_seconds [ALPHA] How long in seconds processing an item from workqueue takes. +# TYPE workqueue_work_duration_seconds histogram +workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="9.999999999999999e-06"} 1 +workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="9.999999999999999e-05"} 41 +workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="0.001"} 42 +workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="0.01"} 42 +workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="0.1"} 42 +workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="1"} 42 +workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="10"} 42 +workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="+Inf"} 42 +workqueue_work_duration_seconds_sum{name="DynamicConfigMapCABundle-client-ca"} 0.001374066 +workqueue_work_duration_seconds_count{name="DynamicConfigMapCABundle-client-ca"} 42 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="9.999999999999999e-05"} 39 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.001"} 41 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.01"} 42 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.1"} 42 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="1"} 42 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="10"} 42 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="+Inf"} 42 +workqueue_work_duration_seconds_sum{name="DynamicServingCertificateController"} 0.0027605059999999994 +workqueue_work_duration_seconds_count{name="DynamicServingCertificateController"} 42 +workqueue_work_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="1e-08"} 0 +workqueue_work_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="1e-07"} 0 +workqueue_work_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="1e-06"} 0 +workqueue_work_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="9.999999999999999e-06"} 0 +workqueue_work_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="9.999999999999999e-05"} 0 +workqueue_work_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="0.001"} 0 +workqueue_work_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="0.01"} 0 +workqueue_work_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="0.1"} 0 +workqueue_work_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="1"} 0 +workqueue_work_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="10"} 0 +workqueue_work_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="+Inf"} 0 +workqueue_work_duration_seconds_sum{name="RequestHeaderAuthRequestController"} 0 +workqueue_work_duration_seconds_count{name="RequestHeaderAuthRequestController"} 0 diff --git a/metricbeat/module/kubernetes/scheduler/_meta/test/metrics.1.29.expected b/metricbeat/module/kubernetes/scheduler/_meta/test/metrics.1.29.expected new file mode 100644 index 000000000000..595263d46ce8 --- /dev/null +++ b/metricbeat/module/kubernetes/scheduler/_meta/test/metrics.1.29.expected @@ -0,0 +1,857 @@ +[ + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "count": 19 + } + }, + "code": "201", + "host": "172.18.0.2:6443", + "method": "POST" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "profile": "default-scheduler", + "result": "unschedulable", + "scheduling": { + "attempts": { + "duration": { + "us": { + "bucket": { + "+Inf": 3, + "1000": 3, + "1024000": 3, + "128000": 3, + "16000": 3, + "16384000": 3, + "2000": 3, + "2048000": 3, + "256000": 3, + "32000": 3, + "4000": 3, + "4096000": 3, + "512000": 3, + "64000": 3, + "8000": 3, + "8192000": 3 + }, + "count": 3, + "sum": 992.8839999999999 + } + } + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "count": 1202 + } + }, + "code": "200", + "host": "172.18.0.2:6443", + "method": "PUT" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "count": 3 + } + }, + "code": "200", + "host": "172.18.0.2:6443", + "method": "PATCH" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "count": 1 + } + }, + "code": "404", + "host": "172.18.0.2:6443", + "method": "GET" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "duration": { + "us": { + "bucket": { + "+Inf": 1250, + "100000": 1248, + "1000000": 1249, + "15000000": 1250, + "2000000": 1249, + "25000": 1247, + "250000": 1249, + "30000000": 1250, + "4000000": 1250, + "5000": 1036, + "500000": 1249, + "60000000": 1250, + "8000000": 1250 + }, + "count": 1250, + "sum": 8867916.397999985 + } + }, + "size": { + "bytes": { + "bucket": { + "+Inf": 1250, + "1024": 1250, + "1048576": 1250, + "16384": 1250, + "16777216": 1250, + "256": 1250, + "262144": 1250, + "4096": 1250, + "4194304": 1250, + "512": 1250, + "64": 1250, + "65536": 1250 + }, + "count": 1250, + "sum": 0 + } + } + }, + "response": { + "size": { + "bytes": { + "bucket": { + "+Inf": 1250, + "1024": 1246, + "1048576": 1250, + "16384": 1250, + "16777216": 1250, + "256": 35, + "262144": 1250, + "4096": 1249, + "4194304": 1250, + "512": 1244, + "64": 9, + "65536": 1250 + }, + "count": 1250, + "sum": 537154 + } + } + } + }, + "host": "172.18.0.2:6443", + "verb": "GET" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "count": 1305 + } + }, + "code": "200", + "host": "172.18.0.2:6443", + "method": "GET" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "queue": "gated", + "scheduling": { + "pending": { + "pods": { + "count": 0 + } + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "duration": { + "us": { + "bucket": { + "+Inf": 3, + "100000": 3, + "1000000": 3, + "15000000": 3, + "2000000": 3, + "25000": 0, + "250000": 3, + "30000000": 3, + "4000000": 3, + "5000": 0, + "500000": 3, + "60000000": 3, + "8000000": 3 + }, + "count": 3, + "sum": 87315.227 + } + }, + "size": { + "bytes": { + "bucket": { + "+Inf": 3, + "1024": 3, + "1048576": 3, + "16384": 3, + "16777216": 3, + "256": 0, + "262144": 3, + "4096": 3, + "4194304": 3, + "512": 3, + "64": 0, + "65536": 3 + }, + "count": 3, + "sum": 1026 + } + } + }, + "response": { + "size": { + "bytes": { + "bucket": { + "+Inf": 3, + "1024": 0, + "1048576": 3, + "16384": 3, + "16777216": 3, + "256": 0, + "262144": 3, + "4096": 3, + "4194304": 3, + "512": 0, + "64": 0, + "65536": 3 + }, + "count": 3, + "sum": 10807 + } + } + } + }, + "host": "172.18.0.2:6443", + "verb": "PATCH" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "leader": { + "is_master": true + }, + "name": "kube-scheduler" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "queue": "unschedulable", + "scheduling": { + "pending": { + "pods": { + "count": 0 + } + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "RequestHeaderAuthRequestController", + "workqueue": { + "adds": { + "count": 0 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "duration": { + "us": { + "bucket": { + "+Inf": 19, + "100000": 19, + "1000000": 19, + "15000000": 19, + "2000000": 19, + "25000": 17, + "250000": 19, + "30000000": 19, + "4000000": 19, + "5000": 2, + "500000": 19, + "60000000": 19, + "8000000": 19 + }, + "count": 19, + "sum": 249623.20100000003 + } + }, + "size": { + "bytes": { + "bucket": { + "+Inf": 19, + "1024": 18, + "1048576": 19, + "16384": 19, + "16777216": 19, + "256": 7, + "262144": 19, + "4096": 19, + "4194304": 19, + "512": 17, + "64": 0, + "65536": 19 + }, + "count": 19, + "sum": 6726 + } + } + }, + "response": { + "size": { + "bytes": { + "bucket": { + "+Inf": 19, + "1024": 17, + "1048576": 19, + "16384": 19, + "16777216": 19, + "256": 6, + "262144": 19, + "4096": 19, + "4194304": 19, + "512": 7, + "64": 6, + "65536": 19 + }, + "count": 19, + "sum": 10350 + } + } + } + }, + "host": "172.18.0.2:6443", + "verb": "POST" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "duration": { + "us": { + "bucket": { + "+Inf": 1202, + "100000": 1201, + "1000000": 1202, + "15000000": 1202, + "2000000": 1202, + "25000": 1201, + "250000": 1202, + "30000000": 1202, + "4000000": 1202, + "5000": 6, + "500000": 1202, + "60000000": 1202, + "8000000": 1202 + }, + "count": 1202, + "sum": 14273085.986000013 + } + }, + "size": { + "bytes": { + "bucket": { + "+Inf": 1202, + "1024": 1202, + "1048576": 1202, + "16384": 1202, + "16777216": 1202, + "256": 0, + "262144": 1202, + "4096": 1202, + "4194304": 1202, + "512": 1202, + "64": 0, + "65536": 1202 + }, + "count": 1202, + "sum": 512715 + } + } + }, + "response": { + "size": { + "bytes": { + "bucket": { + "+Inf": 1202, + "1024": 1202, + "1048576": 1202, + "16384": 1202, + "16777216": 1202, + "256": 0, + "262144": 1202, + "4096": 1202, + "4194304": 1202, + "512": 1202, + "64": 0, + "65536": 1202 + }, + "count": 1202, + "sum": 512716 + } + } + } + }, + "host": "172.18.0.2:6443", + "verb": "PUT" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "queue": "backoff", + "scheduling": { + "pending": { + "pods": { + "count": 0 + } + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "client": { + "request": { + "count": 32 + } + }, + "code": "403", + "host": "172.18.0.2:6443", + "method": "GET" + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "DynamicConfigMapCABundle-client-ca", + "workqueue": { + "adds": { + "count": 42 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "process": { + "cpu": { + "sec": 14 + }, + "fds": { + "max": { + "count": 1048576 + }, + "open": { + "count": 10 + } + }, + "memory": { + "resident": { + "bytes": 63909888 + }, + "virtual": { + "bytes": 1316384768 + } + }, + "started": { + "sec": 1704894767.07 + } + }, + "scheduling": { + "preemption": { + "attempts": { + "count": 3 + }, + "victims": { + "bucket": { + "+Inf": 0, + "1": 0, + "16": 0, + "2": 0, + "32": 0, + "4": 0, + "64": 0, + "8": 0 + }, + "count": 0, + "sum": 0 + } + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "queue": "active", + "scheduling": { + "pending": { + "pods": { + "count": 0 + } + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "name": "DynamicServingCertificateController", + "workqueue": { + "adds": { + "count": 42 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": null, + "MetricSetFields": { + "profile": "default-scheduler", + "result": "scheduled", + "scheduling": { + "attempts": { + "duration": { + "us": { + "bucket": { + "+Inf": 6, + "1000": 0, + "1024000": 6, + "128000": 6, + "16000": 4, + "16384000": 6, + "2000": 0, + "2048000": 6, + "256000": 6, + "32000": 5, + "4000": 0, + "4096000": 6, + "512000": 6, + "64000": 6, + "8000": 0, + "8192000": 6 + }, + "count": 6, + "sum": 116016.227 + } + } + } + } + }, + "Index": "", + "ID": "", + "Namespace": "", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + } +] \ No newline at end of file diff --git a/metricbeat/module/kubernetes/scheduler/_meta/testdata/docs.plain b/metricbeat/module/kubernetes/scheduler/_meta/testdata/docs.plain index ccf5ea4c1604..dd5700fee34a 100644 --- a/metricbeat/module/kubernetes/scheduler/_meta/testdata/docs.plain +++ b/metricbeat/module/kubernetes/scheduler/_meta/testdata/docs.plain @@ -37,7 +37,7 @@ apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="3"} 1 apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="5"} 1 apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="10"} 1 apiserver_delegated_authn_request_duration_seconds_bucket{code="201",le="+Inf"} 1 -apiserver_delegated_authn_request_duration_seconds_sum{code="201"} 0.009021638 +apiserver_delegated_authn_request_duration_seconds_sum{code="201"} 0.004166145 apiserver_delegated_authn_request_duration_seconds_count{code="201"} 1 # HELP apiserver_delegated_authn_request_total [ALPHA] Number of HTTP requests partitioned by status code. # TYPE apiserver_delegated_authn_request_total counter @@ -53,7 +53,7 @@ apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="3"} 1 apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="5"} 1 apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="10"} 1 apiserver_delegated_authz_request_duration_seconds_bucket{code="201",le="+Inf"} 1 -apiserver_delegated_authz_request_duration_seconds_sum{code="201"} 0.003091905 +apiserver_delegated_authz_request_duration_seconds_sum{code="201"} 0.003826062 apiserver_delegated_authz_request_duration_seconds_count{code="201"} 1 # HELP apiserver_delegated_authz_request_total [ALPHA] Number of HTTP requests partitioned by status code. # TYPE apiserver_delegated_authz_request_total counter @@ -94,30 +94,30 @@ apiserver_webhooks_x509_insecure_sha1_total 0 apiserver_webhooks_x509_missing_san_total 0 # HELP authenticated_user_requests [ALPHA] Counter of authenticated requests broken out by username. # TYPE authenticated_user_requests counter -authenticated_user_requests{username="other"} 38 +authenticated_user_requests{username="other"} 243 # HELP authentication_attempts [ALPHA] Counter of authenticated attempts. # TYPE authentication_attempts counter -authentication_attempts{result="success"} 38 +authentication_attempts{result="success"} 243 # HELP authentication_duration_seconds [ALPHA] Authentication duration in seconds broken out by result. # TYPE authentication_duration_seconds histogram -authentication_duration_seconds_bucket{result="success",le="0.001"} 38 -authentication_duration_seconds_bucket{result="success",le="0.002"} 38 -authentication_duration_seconds_bucket{result="success",le="0.004"} 38 -authentication_duration_seconds_bucket{result="success",le="0.008"} 38 -authentication_duration_seconds_bucket{result="success",le="0.016"} 38 -authentication_duration_seconds_bucket{result="success",le="0.032"} 38 -authentication_duration_seconds_bucket{result="success",le="0.064"} 38 -authentication_duration_seconds_bucket{result="success",le="0.128"} 38 -authentication_duration_seconds_bucket{result="success",le="0.256"} 38 -authentication_duration_seconds_bucket{result="success",le="0.512"} 38 -authentication_duration_seconds_bucket{result="success",le="1.024"} 38 -authentication_duration_seconds_bucket{result="success",le="2.048"} 38 -authentication_duration_seconds_bucket{result="success",le="4.096"} 38 -authentication_duration_seconds_bucket{result="success",le="8.192"} 38 -authentication_duration_seconds_bucket{result="success",le="16.384"} 38 -authentication_duration_seconds_bucket{result="success",le="+Inf"} 38 -authentication_duration_seconds_sum{result="success"} 0.0012335150000000003 -authentication_duration_seconds_count{result="success"} 38 +authentication_duration_seconds_bucket{result="success",le="0.001"} 243 +authentication_duration_seconds_bucket{result="success",le="0.002"} 243 +authentication_duration_seconds_bucket{result="success",le="0.004"} 243 +authentication_duration_seconds_bucket{result="success",le="0.008"} 243 +authentication_duration_seconds_bucket{result="success",le="0.016"} 243 +authentication_duration_seconds_bucket{result="success",le="0.032"} 243 +authentication_duration_seconds_bucket{result="success",le="0.064"} 243 +authentication_duration_seconds_bucket{result="success",le="0.128"} 243 +authentication_duration_seconds_bucket{result="success",le="0.256"} 243 +authentication_duration_seconds_bucket{result="success",le="0.512"} 243 +authentication_duration_seconds_bucket{result="success",le="1.024"} 243 +authentication_duration_seconds_bucket{result="success",le="2.048"} 243 +authentication_duration_seconds_bucket{result="success",le="4.096"} 243 +authentication_duration_seconds_bucket{result="success",le="8.192"} 243 +authentication_duration_seconds_bucket{result="success",le="16.384"} 243 +authentication_duration_seconds_bucket{result="success",le="+Inf"} 243 +authentication_duration_seconds_sum{result="success"} 0.008371243 +authentication_duration_seconds_count{result="success"} 243 # HELP authentication_token_cache_active_fetch_count [ALPHA] # TYPE authentication_token_cache_active_fetch_count gauge authentication_token_cache_active_fetch_count{status="blocked"} 0 @@ -127,8 +127,8 @@ authentication_token_cache_active_fetch_count{status="in_flight"} 0 authentication_token_cache_fetch_total{status="ok"} 1 # HELP authentication_token_cache_request_duration_seconds [ALPHA] # TYPE authentication_token_cache_request_duration_seconds histogram -authentication_token_cache_request_duration_seconds_bucket{status="miss",le="0.005"} 0 -authentication_token_cache_request_duration_seconds_bucket{status="miss",le="0.01"} 0 +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="0.005"} 1 +authentication_token_cache_request_duration_seconds_bucket{status="miss",le="0.01"} 1 authentication_token_cache_request_duration_seconds_bucket{status="miss",le="0.025"} 1 authentication_token_cache_request_duration_seconds_bucket{status="miss",le="0.05"} 1 authentication_token_cache_request_duration_seconds_bucket{status="miss",le="0.1"} 1 @@ -139,34 +139,37 @@ authentication_token_cache_request_duration_seconds_bucket{status="miss",le="2.5 authentication_token_cache_request_duration_seconds_bucket{status="miss",le="5"} 1 authentication_token_cache_request_duration_seconds_bucket{status="miss",le="10"} 1 authentication_token_cache_request_duration_seconds_bucket{status="miss",le="+Inf"} 1 -authentication_token_cache_request_duration_seconds_sum{status="miss"} 0.013 +authentication_token_cache_request_duration_seconds_sum{status="miss"} 0.004 authentication_token_cache_request_duration_seconds_count{status="miss"} 1 # HELP authentication_token_cache_request_total [ALPHA] # TYPE authentication_token_cache_request_total counter authentication_token_cache_request_total{status="miss"} 1 # HELP authorization_attempts_total [ALPHA] Counter of authorization attempts broken down by result. It can be either 'allowed', 'denied', 'no-opinion' or 'error'. # TYPE authorization_attempts_total counter -authorization_attempts_total{result="allowed"} 38 +authorization_attempts_total{result="allowed"} 243 # HELP authorization_duration_seconds [ALPHA] Authorization duration in seconds broken out by result. # TYPE authorization_duration_seconds histogram -authorization_duration_seconds_bucket{result="allowed",le="0.001"} 38 -authorization_duration_seconds_bucket{result="allowed",le="0.002"} 38 -authorization_duration_seconds_bucket{result="allowed",le="0.004"} 38 -authorization_duration_seconds_bucket{result="allowed",le="0.008"} 38 -authorization_duration_seconds_bucket{result="allowed",le="0.016"} 38 -authorization_duration_seconds_bucket{result="allowed",le="0.032"} 38 -authorization_duration_seconds_bucket{result="allowed",le="0.064"} 38 -authorization_duration_seconds_bucket{result="allowed",le="0.128"} 38 -authorization_duration_seconds_bucket{result="allowed",le="0.256"} 38 -authorization_duration_seconds_bucket{result="allowed",le="0.512"} 38 -authorization_duration_seconds_bucket{result="allowed",le="1.024"} 38 -authorization_duration_seconds_bucket{result="allowed",le="2.048"} 38 -authorization_duration_seconds_bucket{result="allowed",le="4.096"} 38 -authorization_duration_seconds_bucket{result="allowed",le="8.192"} 38 -authorization_duration_seconds_bucket{result="allowed",le="16.384"} 38 -authorization_duration_seconds_bucket{result="allowed",le="+Inf"} 38 -authorization_duration_seconds_sum{result="allowed"} 0.000301018 -authorization_duration_seconds_count{result="allowed"} 38 +authorization_duration_seconds_bucket{result="allowed",le="0.001"} 243 +authorization_duration_seconds_bucket{result="allowed",le="0.002"} 243 +authorization_duration_seconds_bucket{result="allowed",le="0.004"} 243 +authorization_duration_seconds_bucket{result="allowed",le="0.008"} 243 +authorization_duration_seconds_bucket{result="allowed",le="0.016"} 243 +authorization_duration_seconds_bucket{result="allowed",le="0.032"} 243 +authorization_duration_seconds_bucket{result="allowed",le="0.064"} 243 +authorization_duration_seconds_bucket{result="allowed",le="0.128"} 243 +authorization_duration_seconds_bucket{result="allowed",le="0.256"} 243 +authorization_duration_seconds_bucket{result="allowed",le="0.512"} 243 +authorization_duration_seconds_bucket{result="allowed",le="1.024"} 243 +authorization_duration_seconds_bucket{result="allowed",le="2.048"} 243 +authorization_duration_seconds_bucket{result="allowed",le="4.096"} 243 +authorization_duration_seconds_bucket{result="allowed",le="8.192"} 243 +authorization_duration_seconds_bucket{result="allowed",le="16.384"} 243 +authorization_duration_seconds_bucket{result="allowed",le="+Inf"} 243 +authorization_duration_seconds_sum{result="allowed"} 0.0018631259999999992 +authorization_duration_seconds_count{result="allowed"} 243 +# HELP cardinality_enforcement_unexpected_categorizations_total [ALPHA] The count of unexpected categorizations during cardinality enforcement. +# TYPE cardinality_enforcement_unexpected_categorizations_total counter +cardinality_enforcement_unexpected_categorizations_total 0 # HELP disabled_metrics_total [BETA] The count of disabled metrics. # TYPE disabled_metrics_total counter disabled_metrics_total 0 @@ -175,277 +178,352 @@ disabled_metrics_total 0 go_cgo_go_to_c_calls_calls_total 0 # HELP go_cpu_classes_gc_mark_assist_cpu_seconds_total Estimated total CPU time goroutines spent performing GC tasks to assist the GC and prevent it from falling behind the application. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. # TYPE go_cpu_classes_gc_mark_assist_cpu_seconds_total counter -go_cpu_classes_gc_mark_assist_cpu_seconds_total 0.002014685 -# HELP go_cpu_classes_gc_mark_dedicated_cpu_seconds_total Estimated total CPU time spent performing GC tasks on processors (as defined by GOMAXPROCS) dedicated to those tasks. This includes time spent with the world stopped due to the GC. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. +go_cpu_classes_gc_mark_assist_cpu_seconds_total 0.011744296 +# HELP go_cpu_classes_gc_mark_dedicated_cpu_seconds_total Estimated total CPU time spent performing GC tasks on processors (as defined by GOMAXPROCS) dedicated to those tasks. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. # TYPE go_cpu_classes_gc_mark_dedicated_cpu_seconds_total counter -go_cpu_classes_gc_mark_dedicated_cpu_seconds_total 0.055565699 +go_cpu_classes_gc_mark_dedicated_cpu_seconds_total 0.283499396 # HELP go_cpu_classes_gc_mark_idle_cpu_seconds_total Estimated total CPU time spent performing GC tasks on spare CPU resources that the Go scheduler could not otherwise find a use for. This should be subtracted from the total GC CPU time to obtain a measure of compulsory GC CPU time. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. # TYPE go_cpu_classes_gc_mark_idle_cpu_seconds_total counter -go_cpu_classes_gc_mark_idle_cpu_seconds_total 0.014884382 +go_cpu_classes_gc_mark_idle_cpu_seconds_total 0.030548669 # HELP go_cpu_classes_gc_pause_cpu_seconds_total Estimated total CPU time spent with the application paused by the GC. Even if only one thread is running during the pause, this is computed as GOMAXPROCS times the pause latency because nothing else can be executing. This is the exact sum of samples in /gc/pause:seconds if each sample is multiplied by GOMAXPROCS at the time it is taken. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. # TYPE go_cpu_classes_gc_pause_cpu_seconds_total counter -go_cpu_classes_gc_pause_cpu_seconds_total 0.013685448 +go_cpu_classes_gc_pause_cpu_seconds_total 0.093005136 # HELP go_cpu_classes_gc_total_cpu_seconds_total Estimated total CPU time spent performing GC tasks. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. Sum of all metrics in /cpu/classes/gc. # TYPE go_cpu_classes_gc_total_cpu_seconds_total counter -go_cpu_classes_gc_total_cpu_seconds_total 0.086150214 +go_cpu_classes_gc_total_cpu_seconds_total 0.418797497 # HELP go_cpu_classes_idle_cpu_seconds_total Estimated total available CPU time not spent executing any Go or Go runtime code. In other words, the part of /cpu/classes/total:cpu-seconds that was unused. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. # TYPE go_cpu_classes_idle_cpu_seconds_total counter -go_cpu_classes_idle_cpu_seconds_total 2708.20701688 +go_cpu_classes_idle_cpu_seconds_total 37095.656235078 # HELP go_cpu_classes_scavenge_assist_cpu_seconds_total Estimated total CPU time spent returning unused memory to the underlying platform in response eagerly in response to memory pressure. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. # TYPE go_cpu_classes_scavenge_assist_cpu_seconds_total counter -go_cpu_classes_scavenge_assist_cpu_seconds_total 3.91e-07 +go_cpu_classes_scavenge_assist_cpu_seconds_total 3.86e-07 # HELP go_cpu_classes_scavenge_background_cpu_seconds_total Estimated total CPU time spent performing background tasks to return unused memory to the underlying platform. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. # TYPE go_cpu_classes_scavenge_background_cpu_seconds_total counter -go_cpu_classes_scavenge_background_cpu_seconds_total 1.29e-07 +go_cpu_classes_scavenge_background_cpu_seconds_total 0.000335459 # HELP go_cpu_classes_scavenge_total_cpu_seconds_total Estimated total CPU time spent performing tasks that return unused memory to the underlying platform. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. Sum of all metrics in /cpu/classes/scavenge. # TYPE go_cpu_classes_scavenge_total_cpu_seconds_total counter -go_cpu_classes_scavenge_total_cpu_seconds_total 5.2e-07 +go_cpu_classes_scavenge_total_cpu_seconds_total 0.000335845 # HELP go_cpu_classes_total_cpu_seconds_total Estimated total available CPU time for user Go code or the Go runtime, as defined by GOMAXPROCS. In other words, GOMAXPROCS integrated over the wall-clock duration this process has been executing for. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. Sum of all metrics in /cpu/classes. # TYPE go_cpu_classes_total_cpu_seconds_total counter -go_cpu_classes_total_cpu_seconds_total 3095.81241836 +go_cpu_classes_total_cpu_seconds_total 37106.78391232 # HELP go_cpu_classes_user_cpu_seconds_total Estimated total CPU time spent running user Go code. This may also include some small amount of time spent in the Go runtime. This metric is an overestimate, and not directly comparable to system CPU time measurements. Compare only with other /cpu/classes metrics. # TYPE go_cpu_classes_user_cpu_seconds_total counter -go_cpu_classes_user_cpu_seconds_total 387.519250746 +go_cpu_classes_user_cpu_seconds_total 10.7085439 # HELP go_gc_cycles_automatic_gc_cycles_total Count of completed GC cycles generated by the Go runtime. # TYPE go_gc_cycles_automatic_gc_cycles_total counter -go_gc_cycles_automatic_gc_cycles_total 12 +go_gc_cycles_automatic_gc_cycles_total 29 # HELP go_gc_cycles_forced_gc_cycles_total Count of completed GC cycles forced by the application. # TYPE go_gc_cycles_forced_gc_cycles_total counter go_gc_cycles_forced_gc_cycles_total 0 # HELP go_gc_cycles_total_gc_cycles_total Count of all completed GC cycles. # TYPE go_gc_cycles_total_gc_cycles_total counter -go_gc_cycles_total_gc_cycles_total 12 +go_gc_cycles_total_gc_cycles_total 29 # HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. # TYPE go_gc_duration_seconds summary -go_gc_duration_seconds{quantile="0"} 4.9982e-05 -go_gc_duration_seconds{quantile="0.25"} 8.823e-05 -go_gc_duration_seconds{quantile="0.5"} 0.000100372 -go_gc_duration_seconds{quantile="0.75"} 0.000243385 -go_gc_duration_seconds{quantile="1"} 0.000299856 -go_gc_duration_seconds_sum 0.001710681 -go_gc_duration_seconds_count 12 -# HELP go_gc_heap_allocs_by_size_bytes Distribution of heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. +go_gc_duration_seconds{quantile="0"} 7.2067e-05 +go_gc_duration_seconds{quantile="0.25"} 8.6777e-05 +go_gc_duration_seconds{quantile="0.5"} 0.000115853 +go_gc_duration_seconds{quantile="0.75"} 0.000222016 +go_gc_duration_seconds{quantile="1"} 0.000744815 +go_gc_duration_seconds_sum 0.005812821 +go_gc_duration_seconds_count 29 +# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. +# TYPE go_gc_gogc_percent gauge +go_gc_gogc_percent 100 +# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. +# TYPE go_gc_gomemlimit_bytes gauge +go_gc_gomemlimit_bytes 9.223372036854776e+18 +# HELP go_gc_heap_allocs_by_size_bytes Distribution of heap allocations by approximate size. Bucket counts increase monotonically. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. # TYPE go_gc_heap_allocs_by_size_bytes histogram -go_gc_heap_allocs_by_size_bytes_bucket{le="8.999999999999998"} 6569 -go_gc_heap_allocs_by_size_bytes_bucket{le="24.999999999999996"} 102581 -go_gc_heap_allocs_by_size_bytes_bucket{le="64.99999999999999"} 185096 -go_gc_heap_allocs_by_size_bytes_bucket{le="144.99999999999997"} 246667 -go_gc_heap_allocs_by_size_bytes_bucket{le="320.99999999999994"} 278923 -go_gc_heap_allocs_by_size_bytes_bucket{le="704.9999999999999"} 286024 -go_gc_heap_allocs_by_size_bytes_bucket{le="1536.9999999999998"} 287739 -go_gc_heap_allocs_by_size_bytes_bucket{le="3200.9999999999995"} 288395 -go_gc_heap_allocs_by_size_bytes_bucket{le="6528.999999999999"} 289332 -go_gc_heap_allocs_by_size_bytes_bucket{le="13568.999999999998"} 289499 -go_gc_heap_allocs_by_size_bytes_bucket{le="27264.999999999996"} 289550 -go_gc_heap_allocs_by_size_bytes_bucket{le="+Inf"} 289601 -go_gc_heap_allocs_by_size_bytes_sum 3.6535336e+07 -go_gc_heap_allocs_by_size_bytes_count 289601 +go_gc_heap_allocs_by_size_bytes_bucket{le="8.999999999999998"} 11502 +go_gc_heap_allocs_by_size_bytes_bucket{le="24.999999999999996"} 337627 +go_gc_heap_allocs_by_size_bytes_bucket{le="64.99999999999999"} 668021 +go_gc_heap_allocs_by_size_bytes_bucket{le="144.99999999999997"} 912014 +go_gc_heap_allocs_by_size_bytes_bucket{le="320.99999999999994"} 986135 +go_gc_heap_allocs_by_size_bytes_bucket{le="704.9999999999999"} 1.011816e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="1536.9999999999998"} 1.016066e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="3200.9999999999995"} 1.017619e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="6528.999999999999"} 1.018929e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="13568.999999999998"} 1.019088e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="27264.999999999996"} 1.019141e+06 +go_gc_heap_allocs_by_size_bytes_bucket{le="+Inf"} 1.019191e+06 +go_gc_heap_allocs_by_size_bytes_sum 9.52492e+07 +go_gc_heap_allocs_by_size_bytes_count 1.019191e+06 # HELP go_gc_heap_allocs_bytes_total Cumulative sum of memory allocated to the heap by the application. # TYPE go_gc_heap_allocs_bytes_total counter -go_gc_heap_allocs_bytes_total 3.6535336e+07 +go_gc_heap_allocs_bytes_total 9.52492e+07 # HELP go_gc_heap_allocs_objects_total Cumulative count of heap allocations triggered by the application. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. # TYPE go_gc_heap_allocs_objects_total counter -go_gc_heap_allocs_objects_total 289601 -# HELP go_gc_heap_frees_by_size_bytes Distribution of freed heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. +go_gc_heap_allocs_objects_total 1.019191e+06 +# HELP go_gc_heap_frees_by_size_bytes Distribution of freed heap allocations by approximate size. Bucket counts increase monotonically. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. # TYPE go_gc_heap_frees_by_size_bytes histogram -go_gc_heap_frees_by_size_bytes_bucket{le="8.999999999999998"} 4038 -go_gc_heap_frees_by_size_bytes_bucket{le="24.999999999999996"} 85701 -go_gc_heap_frees_by_size_bytes_bucket{le="64.99999999999999"} 158104 -go_gc_heap_frees_by_size_bytes_bucket{le="144.99999999999997"} 214533 -go_gc_heap_frees_by_size_bytes_bucket{le="320.99999999999994"} 244063 -go_gc_heap_frees_by_size_bytes_bucket{le="704.9999999999999"} 249989 -go_gc_heap_frees_by_size_bytes_bucket{le="1536.9999999999998"} 251384 -go_gc_heap_frees_by_size_bytes_bucket{le="3200.9999999999995"} 251831 -go_gc_heap_frees_by_size_bytes_bucket{le="6528.999999999999"} 252669 -go_gc_heap_frees_by_size_bytes_bucket{le="13568.999999999998"} 252780 -go_gc_heap_frees_by_size_bytes_bucket{le="27264.999999999996"} 252796 -go_gc_heap_frees_by_size_bytes_bucket{le="+Inf"} 252833 -go_gc_heap_frees_by_size_bytes_sum 3.034272e+07 -go_gc_heap_frees_by_size_bytes_count 252833 +go_gc_heap_frees_by_size_bytes_bucket{le="8.999999999999998"} 8827 +go_gc_heap_frees_by_size_bytes_bucket{le="24.999999999999996"} 309924 +go_gc_heap_frees_by_size_bytes_bucket{le="64.99999999999999"} 616835 +go_gc_heap_frees_by_size_bytes_bucket{le="144.99999999999997"} 845511 +go_gc_heap_frees_by_size_bytes_bucket{le="320.99999999999994"} 915369 +go_gc_heap_frees_by_size_bytes_bucket{le="704.9999999999999"} 939023 +go_gc_heap_frees_by_size_bytes_bucket{le="1536.9999999999998"} 942831 +go_gc_heap_frees_by_size_bytes_bucket{le="3200.9999999999995"} 944135 +go_gc_heap_frees_by_size_bytes_bucket{le="6528.999999999999"} 945346 +go_gc_heap_frees_by_size_bytes_bucket{le="13568.999999999998"} 945432 +go_gc_heap_frees_by_size_bytes_bucket{le="27264.999999999996"} 945449 +go_gc_heap_frees_by_size_bytes_bucket{le="+Inf"} 945485 +go_gc_heap_frees_by_size_bytes_sum 8.6091296e+07 +go_gc_heap_frees_by_size_bytes_count 945485 # HELP go_gc_heap_frees_bytes_total Cumulative sum of heap memory freed by the garbage collector. # TYPE go_gc_heap_frees_bytes_total counter -go_gc_heap_frees_bytes_total 3.034272e+07 +go_gc_heap_frees_bytes_total 8.6091296e+07 # HELP go_gc_heap_frees_objects_total Cumulative count of heap allocations whose storage was freed by the garbage collector. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks. # TYPE go_gc_heap_frees_objects_total counter -go_gc_heap_frees_objects_total 252833 +go_gc_heap_frees_objects_total 945485 # HELP go_gc_heap_goal_bytes Heap size target for the end of the GC cycle. # TYPE go_gc_heap_goal_bytes gauge -go_gc_heap_goal_bytes 1.2824032e+07 +go_gc_heap_goal_bytes 1.3477768e+07 +# HELP go_gc_heap_live_bytes Heap memory occupied by live objects that were marked by the previous GC. +# TYPE go_gc_heap_live_bytes gauge +go_gc_heap_live_bytes 6.428656e+06 # HELP go_gc_heap_objects_objects Number of objects, live or unswept, occupying heap memory. # TYPE go_gc_heap_objects_objects gauge -go_gc_heap_objects_objects 36768 +go_gc_heap_objects_objects 73706 # HELP go_gc_heap_tiny_allocs_objects_total Count of small allocations that are packed together into blocks. These allocations are counted separately from other allocations because each individual allocation is not tracked by the runtime, only their block. Each block is already accounted for in allocs-by-size and frees-by-size. # TYPE go_gc_heap_tiny_allocs_objects_total counter -go_gc_heap_tiny_allocs_objects_total 24964 +go_gc_heap_tiny_allocs_objects_total 94953 # HELP go_gc_limiter_last_enabled_gc_cycle GC cycle the last time the GC CPU limiter was enabled. This metric is useful for diagnosing the root cause of an out-of-memory error, because the limiter trades memory for CPU time when the GC's CPU time gets too high. This is most likely to occur with use of SetMemoryLimit. The first GC cycle is cycle 1, so a value of 0 indicates that it was never enabled. # TYPE go_gc_limiter_last_enabled_gc_cycle gauge go_gc_limiter_last_enabled_gc_cycle 0 -# HELP go_gc_pauses_seconds Distribution individual GC-related stop-the-world pause latencies. +# HELP go_gc_pauses_seconds Distribution of individual GC-related stop-the-world pause latencies. Bucket counts increase monotonically. # TYPE go_gc_pauses_seconds histogram go_gc_pauses_seconds_bucket{le="6.399999999999999e-08"} 0 go_gc_pauses_seconds_bucket{le="6.399999999999999e-07"} 0 -go_gc_pauses_seconds_bucket{le="7.167999999999999e-06"} 4 -go_gc_pauses_seconds_bucket{le="8.191999999999999e-05"} 18 -go_gc_pauses_seconds_bucket{le="0.0009175039999999999"} 24 -go_gc_pauses_seconds_bucket{le="0.010485759999999998"} 24 -go_gc_pauses_seconds_bucket{le="0.11744051199999998"} 24 -go_gc_pauses_seconds_bucket{le="+Inf"} 24 -go_gc_pauses_seconds_sum 0.0005944320000000001 -go_gc_pauses_seconds_count 24 +go_gc_pauses_seconds_bucket{le="7.167999999999999e-06"} 1 +go_gc_pauses_seconds_bucket{le="8.191999999999999e-05"} 40 +go_gc_pauses_seconds_bucket{le="0.0009175039999999999"} 58 +go_gc_pauses_seconds_bucket{le="0.010485759999999998"} 58 +go_gc_pauses_seconds_bucket{le="0.11744051199999998"} 58 +go_gc_pauses_seconds_bucket{le="+Inf"} 58 +go_gc_pauses_seconds_sum 0.001754752 +go_gc_pauses_seconds_count 58 +# HELP go_gc_scan_globals_bytes The total amount of global variable space that is scannable. +# TYPE go_gc_scan_globals_bytes gauge +go_gc_scan_globals_bytes 453768 +# HELP go_gc_scan_heap_bytes The total amount of heap space that is scannable. +# TYPE go_gc_scan_heap_bytes gauge +go_gc_scan_heap_bytes 6.264384e+06 +# HELP go_gc_scan_stack_bytes The number of bytes of stack that were scanned last GC cycle. +# TYPE go_gc_scan_stack_bytes gauge +go_gc_scan_stack_bytes 166688 +# HELP go_gc_scan_total_bytes The total amount space that is scannable. Sum of all metrics in /gc/scan. +# TYPE go_gc_scan_total_bytes gauge +go_gc_scan_total_bytes 6.88484e+06 # HELP go_gc_stack_starting_size_bytes The stack size of new goroutines. # TYPE go_gc_stack_starting_size_bytes gauge go_gc_stack_starting_size_bytes 2048 +# HELP go_godebug_non_default_behavior_execerrdot_events_total The number of non-default behaviors executed by the os/exec package due to a non-default GODEBUG=execerrdot=... setting. +# TYPE go_godebug_non_default_behavior_execerrdot_events_total counter +go_godebug_non_default_behavior_execerrdot_events_total 0 +# HELP go_godebug_non_default_behavior_gocachehash_events_total The number of non-default behaviors executed by the cmd/go package due to a non-default GODEBUG=gocachehash=... setting. +# TYPE go_godebug_non_default_behavior_gocachehash_events_total counter +go_godebug_non_default_behavior_gocachehash_events_total 0 +# HELP go_godebug_non_default_behavior_gocachetest_events_total The number of non-default behaviors executed by the cmd/go package due to a non-default GODEBUG=gocachetest=... setting. +# TYPE go_godebug_non_default_behavior_gocachetest_events_total counter +go_godebug_non_default_behavior_gocachetest_events_total 0 +# HELP go_godebug_non_default_behavior_gocacheverify_events_total The number of non-default behaviors executed by the cmd/go package due to a non-default GODEBUG=gocacheverify=... setting. +# TYPE go_godebug_non_default_behavior_gocacheverify_events_total counter +go_godebug_non_default_behavior_gocacheverify_events_total 0 +# HELP go_godebug_non_default_behavior_http2client_events_total The number of non-default behaviors executed by the net/http package due to a non-default GODEBUG=http2client=... setting. +# TYPE go_godebug_non_default_behavior_http2client_events_total counter +go_godebug_non_default_behavior_http2client_events_total 0 +# HELP go_godebug_non_default_behavior_http2server_events_total The number of non-default behaviors executed by the net/http package due to a non-default GODEBUG=http2server=... setting. +# TYPE go_godebug_non_default_behavior_http2server_events_total counter +go_godebug_non_default_behavior_http2server_events_total 0 +# HELP go_godebug_non_default_behavior_installgoroot_events_total The number of non-default behaviors executed by the go/build package due to a non-default GODEBUG=installgoroot=... setting. +# TYPE go_godebug_non_default_behavior_installgoroot_events_total counter +go_godebug_non_default_behavior_installgoroot_events_total 0 +# HELP go_godebug_non_default_behavior_jstmpllitinterp_events_total The number of non-default behaviors executed by the html/template package due to a non-default GODEBUG=jstmpllitinterp=... setting. +# TYPE go_godebug_non_default_behavior_jstmpllitinterp_events_total counter +go_godebug_non_default_behavior_jstmpllitinterp_events_total 0 +# HELP go_godebug_non_default_behavior_multipartmaxheaders_events_total The number of non-default behaviors executed by the mime/multipart package due to a non-default GODEBUG=multipartmaxheaders=... setting. +# TYPE go_godebug_non_default_behavior_multipartmaxheaders_events_total counter +go_godebug_non_default_behavior_multipartmaxheaders_events_total 0 +# HELP go_godebug_non_default_behavior_multipartmaxparts_events_total The number of non-default behaviors executed by the mime/multipart package due to a non-default GODEBUG=multipartmaxparts=... setting. +# TYPE go_godebug_non_default_behavior_multipartmaxparts_events_total counter +go_godebug_non_default_behavior_multipartmaxparts_events_total 0 +# HELP go_godebug_non_default_behavior_multipathtcp_events_total The number of non-default behaviors executed by the net package due to a non-default GODEBUG=multipathtcp=... setting. +# TYPE go_godebug_non_default_behavior_multipathtcp_events_total counter +go_godebug_non_default_behavior_multipathtcp_events_total 0 +# HELP go_godebug_non_default_behavior_panicnil_events_total The number of non-default behaviors executed by the runtime package due to a non-default GODEBUG=panicnil=... setting. +# TYPE go_godebug_non_default_behavior_panicnil_events_total counter +go_godebug_non_default_behavior_panicnil_events_total 0 +# HELP go_godebug_non_default_behavior_randautoseed_events_total The number of non-default behaviors executed by the math/rand package due to a non-default GODEBUG=randautoseed=... setting. +# TYPE go_godebug_non_default_behavior_randautoseed_events_total counter +go_godebug_non_default_behavior_randautoseed_events_total 0 +# HELP go_godebug_non_default_behavior_tarinsecurepath_events_total The number of non-default behaviors executed by the archive/tar package due to a non-default GODEBUG=tarinsecurepath=... setting. +# TYPE go_godebug_non_default_behavior_tarinsecurepath_events_total counter +go_godebug_non_default_behavior_tarinsecurepath_events_total 0 +# HELP go_godebug_non_default_behavior_tlsmaxrsasize_events_total The number of non-default behaviors executed by the crypto/tls package due to a non-default GODEBUG=tlsmaxrsasize=... setting. +# TYPE go_godebug_non_default_behavior_tlsmaxrsasize_events_total counter +go_godebug_non_default_behavior_tlsmaxrsasize_events_total 0 +# HELP go_godebug_non_default_behavior_x509sha1_events_total The number of non-default behaviors executed by the crypto/x509 package due to a non-default GODEBUG=x509sha1=... setting. +# TYPE go_godebug_non_default_behavior_x509sha1_events_total counter +go_godebug_non_default_behavior_x509sha1_events_total 0 +# HELP go_godebug_non_default_behavior_x509usefallbackroots_events_total The number of non-default behaviors executed by the crypto/x509 package due to a non-default GODEBUG=x509usefallbackroots=... setting. +# TYPE go_godebug_non_default_behavior_x509usefallbackroots_events_total counter +go_godebug_non_default_behavior_x509usefallbackroots_events_total 0 +# HELP go_godebug_non_default_behavior_zipinsecurepath_events_total The number of non-default behaviors executed by the archive/zip package due to a non-default GODEBUG=zipinsecurepath=... setting. +# TYPE go_godebug_non_default_behavior_zipinsecurepath_events_total counter +go_godebug_non_default_behavior_zipinsecurepath_events_total 0 # HELP go_goroutines Number of goroutines that currently exist. # TYPE go_goroutines gauge go_goroutines 172 # HELP go_info Information about the Go environment. # TYPE go_info gauge -go_info{version="go1.20.7"} 1 +go_info{version="go1.21.5"} 1 # HELP go_memory_classes_heap_free_bytes Memory that is completely free and eligible to be returned to the underlying system, but has not been. This metric is the runtime's estimate of free address space that is backed by physical memory. # TYPE go_memory_classes_heap_free_bytes gauge -go_memory_classes_heap_free_bytes 2.41664e+06 +go_memory_classes_heap_free_bytes 1.081344e+06 # HELP go_memory_classes_heap_objects_bytes Memory occupied by live objects and dead objects that have not yet been marked free by the garbage collector. # TYPE go_memory_classes_heap_objects_bytes gauge -go_memory_classes_heap_objects_bytes 6.192616e+06 +go_memory_classes_heap_objects_bytes 9.157904e+06 # HELP go_memory_classes_heap_released_bytes Memory that is completely free and has been returned to the underlying system. This metric is the runtime's estimate of free address space that is still mapped into the process, but is not backed by physical memory. # TYPE go_memory_classes_heap_released_bytes gauge -go_memory_classes_heap_released_bytes 1.810432e+06 -# HELP go_memory_classes_heap_stacks_bytes Memory allocated from the heap that is reserved for stack space, whether or not it is currently in-use. +go_memory_classes_heap_released_bytes 1.179648e+06 +# HELP go_memory_classes_heap_stacks_bytes Memory allocated from the heap that is reserved for stack space, whether or not it is currently in-use. Currently, this represents all stack memory for goroutines. It also includes all OS thread stacks in non-cgo programs. Note that stacks may be allocated differently in the future, and this may change. # TYPE go_memory_classes_heap_stacks_bytes gauge -go_memory_classes_heap_stacks_bytes 1.507328e+06 +go_memory_classes_heap_stacks_bytes 2.064384e+06 # HELP go_memory_classes_heap_unused_bytes Memory that is reserved for heap objects but is not currently used to hold heap objects. # TYPE go_memory_classes_heap_unused_bytes gauge -go_memory_classes_heap_unused_bytes 4.8502e+06 +go_memory_classes_heap_unused_bytes 3.293936e+06 # HELP go_memory_classes_metadata_mcache_free_bytes Memory that is reserved for runtime mcache structures, but not in-use. # TYPE go_memory_classes_metadata_mcache_free_bytes gauge -go_memory_classes_metadata_mcache_free_bytes 6000 +go_memory_classes_metadata_mcache_free_bytes 12000 # HELP go_memory_classes_metadata_mcache_inuse_bytes Memory that is occupied by runtime mcache structures that are currently being used. # TYPE go_memory_classes_metadata_mcache_inuse_bytes gauge -go_memory_classes_metadata_mcache_inuse_bytes 9600 +go_memory_classes_metadata_mcache_inuse_bytes 19200 # HELP go_memory_classes_metadata_mspan_free_bytes Memory that is reserved for runtime mspan structures, but not in-use. # TYPE go_memory_classes_metadata_mspan_free_bytes gauge -go_memory_classes_metadata_mspan_free_bytes 30560 +go_memory_classes_metadata_mspan_free_bytes 18984 # HELP go_memory_classes_metadata_mspan_inuse_bytes Memory that is occupied by runtime mspan structures that are currently being used. # TYPE go_memory_classes_metadata_mspan_inuse_bytes gauge -go_memory_classes_metadata_mspan_inuse_bytes 230560 +go_memory_classes_metadata_mspan_inuse_bytes 339528 # HELP go_memory_classes_metadata_other_bytes Memory that is reserved for or used to hold runtime metadata. # TYPE go_memory_classes_metadata_other_bytes gauge -go_memory_classes_metadata_other_bytes 8.977888e+06 -# HELP go_memory_classes_os_stacks_bytes Stack memory allocated by the underlying operating system. +go_memory_classes_metadata_other_bytes 4.734312e+06 +# HELP go_memory_classes_os_stacks_bytes Stack memory allocated by the underlying operating system. In non-cgo programs this metric is currently zero. This may change in the future.In cgo programs this metric includes OS thread stacks allocated directly from the OS. Currently, this only accounts for one stack in c-shared and c-archive build modes, and other sources of stacks from the OS are not measured. This too may change in the future. # TYPE go_memory_classes_os_stacks_bytes gauge go_memory_classes_os_stacks_bytes 0 # HELP go_memory_classes_other_bytes Memory used by execution trace buffers, structures for debugging the runtime, finalizer and profiler specials, and more. # TYPE go_memory_classes_other_bytes gauge -go_memory_classes_other_bytes 1.702081e+06 +go_memory_classes_other_bytes 2.379195e+06 # HELP go_memory_classes_profiling_buckets_bytes Memory that is used by the stack trace hash map used for profiling. # TYPE go_memory_classes_profiling_buckets_bytes gauge -go_memory_classes_profiling_buckets_bytes 1.492343e+06 +go_memory_classes_profiling_buckets_bytes 1.513365e+06 # HELP go_memory_classes_total_bytes All memory mapped by the Go runtime into the current process as read-write. Note that this does not include memory mapped by code called via cgo or via the syscall package. Sum of all metrics in /memory/classes. # TYPE go_memory_classes_total_bytes gauge -go_memory_classes_total_bytes 2.9226248e+07 +go_memory_classes_total_bytes 2.57938e+07 # HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. # TYPE go_memstats_alloc_bytes gauge -go_memstats_alloc_bytes 6.192616e+06 +go_memstats_alloc_bytes 9.157904e+06 # HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. # TYPE go_memstats_alloc_bytes_total counter -go_memstats_alloc_bytes_total 3.6535336e+07 +go_memstats_alloc_bytes_total 9.52492e+07 # HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. # TYPE go_memstats_buck_hash_sys_bytes gauge -go_memstats_buck_hash_sys_bytes 1.492343e+06 +go_memstats_buck_hash_sys_bytes 1.513365e+06 # HELP go_memstats_frees_total Total number of frees. # TYPE go_memstats_frees_total counter -go_memstats_frees_total 277797 +go_memstats_frees_total 1.040438e+06 # HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. # TYPE go_memstats_gc_sys_bytes gauge -go_memstats_gc_sys_bytes 8.977888e+06 +go_memstats_gc_sys_bytes 4.734312e+06 # HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. # TYPE go_memstats_heap_alloc_bytes gauge -go_memstats_heap_alloc_bytes 6.192616e+06 +go_memstats_heap_alloc_bytes 9.157904e+06 # HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. # TYPE go_memstats_heap_idle_bytes gauge -go_memstats_heap_idle_bytes 4.227072e+06 +go_memstats_heap_idle_bytes 2.260992e+06 # HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. # TYPE go_memstats_heap_inuse_bytes gauge -go_memstats_heap_inuse_bytes 1.1042816e+07 +go_memstats_heap_inuse_bytes 1.245184e+07 # HELP go_memstats_heap_objects Number of allocated objects. # TYPE go_memstats_heap_objects gauge -go_memstats_heap_objects 36768 +go_memstats_heap_objects 73706 # HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. # TYPE go_memstats_heap_released_bytes gauge -go_memstats_heap_released_bytes 1.810432e+06 +go_memstats_heap_released_bytes 1.179648e+06 # HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. # TYPE go_memstats_heap_sys_bytes gauge -go_memstats_heap_sys_bytes 1.5269888e+07 +go_memstats_heap_sys_bytes 1.4712832e+07 # HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. # TYPE go_memstats_last_gc_time_seconds gauge -go_memstats_last_gc_time_seconds 1.698752772363916e+09 +go_memstats_last_gc_time_seconds 1.7048970868727124e+09 # HELP go_memstats_lookups_total Total number of pointer lookups. # TYPE go_memstats_lookups_total counter go_memstats_lookups_total 0 # HELP go_memstats_mallocs_total Total number of mallocs. # TYPE go_memstats_mallocs_total counter -go_memstats_mallocs_total 314565 +go_memstats_mallocs_total 1.114144e+06 # HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. # TYPE go_memstats_mcache_inuse_bytes gauge -go_memstats_mcache_inuse_bytes 9600 +go_memstats_mcache_inuse_bytes 19200 # HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. # TYPE go_memstats_mcache_sys_bytes gauge -go_memstats_mcache_sys_bytes 15600 +go_memstats_mcache_sys_bytes 31200 # HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. # TYPE go_memstats_mspan_inuse_bytes gauge -go_memstats_mspan_inuse_bytes 230560 +go_memstats_mspan_inuse_bytes 339528 # HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. # TYPE go_memstats_mspan_sys_bytes gauge -go_memstats_mspan_sys_bytes 261120 +go_memstats_mspan_sys_bytes 358512 # HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. # TYPE go_memstats_next_gc_bytes gauge -go_memstats_next_gc_bytes 1.2824032e+07 +go_memstats_next_gc_bytes 1.3477768e+07 # HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. # TYPE go_memstats_other_sys_bytes gauge -go_memstats_other_sys_bytes 1.702081e+06 +go_memstats_other_sys_bytes 2.379195e+06 # HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. # TYPE go_memstats_stack_inuse_bytes gauge -go_memstats_stack_inuse_bytes 1.507328e+06 +go_memstats_stack_inuse_bytes 2.064384e+06 # HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. # TYPE go_memstats_stack_sys_bytes gauge -go_memstats_stack_sys_bytes 1.507328e+06 +go_memstats_stack_sys_bytes 2.064384e+06 # HELP go_memstats_sys_bytes Number of bytes obtained from system. # TYPE go_memstats_sys_bytes gauge -go_memstats_sys_bytes 2.9226248e+07 +go_memstats_sys_bytes 2.57938e+07 # HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. # TYPE go_sched_gomaxprocs_threads gauge -go_sched_gomaxprocs_threads 8 +go_sched_gomaxprocs_threads 16 # HELP go_sched_goroutines_goroutines Count of live goroutines. # TYPE go_sched_goroutines_goroutines gauge go_sched_goroutines_goroutines 172 -# HELP go_sched_latencies_seconds Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running. +# HELP go_sched_latencies_seconds Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running. Bucket counts increase monotonically. # TYPE go_sched_latencies_seconds histogram -go_sched_latencies_seconds_bucket{le="6.399999999999999e-08"} 902 -go_sched_latencies_seconds_bucket{le="6.399999999999999e-07"} 985 -go_sched_latencies_seconds_bucket{le="7.167999999999999e-06"} 1409 -go_sched_latencies_seconds_bucket{le="8.191999999999999e-05"} 2335 -go_sched_latencies_seconds_bucket{le="0.0009175039999999999"} 2440 -go_sched_latencies_seconds_bucket{le="0.010485759999999998"} 2446 -go_sched_latencies_seconds_bucket{le="0.11744051199999998"} 2446 -go_sched_latencies_seconds_bucket{le="+Inf"} 2446 -go_sched_latencies_seconds_sum 0.021020864 -go_sched_latencies_seconds_count 2446 +go_sched_latencies_seconds_bucket{le="6.399999999999999e-08"} 3704 +go_sched_latencies_seconds_bucket{le="6.399999999999999e-07"} 3873 +go_sched_latencies_seconds_bucket{le="7.167999999999999e-06"} 6018 +go_sched_latencies_seconds_bucket{le="8.191999999999999e-05"} 11585 +go_sched_latencies_seconds_bucket{le="0.0009175039999999999"} 12286 +go_sched_latencies_seconds_bucket{le="0.010485759999999998"} 12292 +go_sched_latencies_seconds_bucket{le="0.11744051199999998"} 12292 +go_sched_latencies_seconds_bucket{le="+Inf"} 12292 +go_sched_latencies_seconds_sum 0.10421881599999999 +go_sched_latencies_seconds_count 12292 # HELP go_sync_mutex_wait_total_seconds_total Approximate cumulative time goroutines have spent blocked on a sync.Mutex or sync.RWMutex. This metric is useful for identifying global changes in lock contention. Collect a mutex or block profile using the runtime/pprof package for more detailed contention data. # TYPE go_sync_mutex_wait_total_seconds_total counter -go_sync_mutex_wait_total_seconds_total 0.03976712 +go_sync_mutex_wait_total_seconds_total 0 # HELP go_threads Number of OS threads created. # TYPE go_threads gauge -go_threads 12 +go_threads 19 # HELP hidden_metrics_total [BETA] The count of hidden metrics. # TYPE hidden_metrics_total counter -hidden_metrics_total 0 +hidden_metrics_total 1 # HELP kubernetes_build_info [ALPHA] A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running. # TYPE kubernetes_build_info gauge -kubernetes_build_info{build_date="2023-08-15T21:24:51Z",compiler="gc",git_commit="855e7c48de7388eb330da0f8d9d2394ee818fb8d",git_tree_state="clean",git_version="v1.28.0",go_version="go1.20.7",major="1",minor="28",platform="linux/amd64"} 1 +kubernetes_build_info{build_date="2023-12-14T19:18:17Z",compiler="gc",git_commit="3f7a50f38688eb332e2a1b013678c6435d539ae6",git_tree_state="clean",git_version="v1.29.0",go_version="go1.21.5",major="1",minor="29",platform="linux/amd64"} 1 # HELP kubernetes_feature_enabled [BETA] This metric records the data about the stage and enablement of a k8s feature. # TYPE kubernetes_feature_enabled gauge -kubernetes_feature_enabled{name="APIListChunking",stage="BETA"} 1 -kubernetes_feature_enabled{name="APIPriorityAndFairness",stage="BETA"} 1 +kubernetes_feature_enabled{name="APIListChunking",stage=""} 1 +kubernetes_feature_enabled{name="APIPriorityAndFairness",stage=""} 1 kubernetes_feature_enabled{name="APIResponseCompression",stage="BETA"} 1 kubernetes_feature_enabled{name="APISelfSubjectReview",stage=""} 1 kubernetes_feature_enabled{name="APIServerIdentity",stage="BETA"} 1 @@ -454,6 +532,7 @@ kubernetes_feature_enabled{name="AdmissionWebhookMatchConditions",stage="BETA"} kubernetes_feature_enabled{name="AggregatedDiscoveryEndpoint",stage="BETA"} 1 kubernetes_feature_enabled{name="AllAlpha",stage="ALPHA"} 0 kubernetes_feature_enabled{name="AllBeta",stage="BETA"} 0 +kubernetes_feature_enabled{name="AllowServiceLBStatusOnNonLB",stage="DEPRECATED"} 0 kubernetes_feature_enabled{name="AnyVolumeDataSource",stage="BETA"} 1 kubernetes_feature_enabled{name="AppArmor",stage="BETA"} 1 kubernetes_feature_enabled{name="CPUManager",stage=""} 1 @@ -464,27 +543,26 @@ kubernetes_feature_enabled{name="CRDValidationRatcheting",stage="ALPHA"} 0 kubernetes_feature_enabled{name="CSIMigrationAzureFile",stage=""} 1 kubernetes_feature_enabled{name="CSIMigrationPortworx",stage="BETA"} 0 kubernetes_feature_enabled{name="CSIMigrationRBD",stage="DEPRECATED"} 0 -kubernetes_feature_enabled{name="CSIMigrationvSphere",stage=""} 1 -kubernetes_feature_enabled{name="CSINodeExpandSecret",stage="BETA"} 1 +kubernetes_feature_enabled{name="CSINodeExpandSecret",stage=""} 1 kubernetes_feature_enabled{name="CSIVolumeHealth",stage="ALPHA"} 0 kubernetes_feature_enabled{name="CloudControllerManagerWebhook",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="CloudDualStackNodeIPs",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="CloudDualStackNodeIPs",stage="BETA"} 1 kubernetes_feature_enabled{name="ClusterTrustBundle",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ClusterTrustBundleProjection",stage="ALPHA"} 0 kubernetes_feature_enabled{name="ComponentSLIs",stage="BETA"} 1 kubernetes_feature_enabled{name="ConsistentHTTPGetHandlers",stage=""} 1 kubernetes_feature_enabled{name="ConsistentListFromCache",stage="ALPHA"} 0 kubernetes_feature_enabled{name="ContainerCheckpoint",stage="ALPHA"} 0 kubernetes_feature_enabled{name="ContextualLogging",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="CronJobTimeZone",stage=""} 1 kubernetes_feature_enabled{name="CronJobsScheduledAnnotation",stage="BETA"} 1 kubernetes_feature_enabled{name="CrossNamespaceVolumeDataSource",stage="ALPHA"} 0 kubernetes_feature_enabled{name="CustomCPUCFSQuotaPeriod",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="CustomResourceValidationExpressions",stage="BETA"} 1 +kubernetes_feature_enabled{name="CustomResourceValidationExpressions",stage=""} 1 kubernetes_feature_enabled{name="DefaultHostNetworkHostPortsInPodTemplates",stage="DEPRECATED"} 0 -kubernetes_feature_enabled{name="DevicePluginCDIDevices",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="DisableCloudProviders",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="DisableKubeletCloudCredentialProviders",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="DownwardAPIHugePages",stage=""} 1 +kubernetes_feature_enabled{name="DevicePluginCDIDevices",stage="BETA"} 1 +kubernetes_feature_enabled{name="DisableCloudProviders",stage="BETA"} 1 +kubernetes_feature_enabled{name="DisableKubeletCloudCredentialProviders",stage="BETA"} 1 +kubernetes_feature_enabled{name="DisableNodeKubeProxyVersion",stage="ALPHA"} 0 kubernetes_feature_enabled{name="DynamicResourceAllocation",stage="ALPHA"} 0 kubernetes_feature_enabled{name="EfficientWatchResumption",stage=""} 1 kubernetes_feature_enabled{name="ElasticIndexedJob",stage="BETA"} 1 @@ -492,13 +570,13 @@ kubernetes_feature_enabled{name="EventedPLEG",stage="BETA"} 0 kubernetes_feature_enabled{name="ExecProbeTimeout",stage=""} 1 kubernetes_feature_enabled{name="ExpandedDNSConfig",stage=""} 1 kubernetes_feature_enabled{name="ExperimentalHostUserNamespaceDefaulting",stage="DEPRECATED"} 0 -kubernetes_feature_enabled{name="GRPCContainerProbe",stage=""} 1 kubernetes_feature_enabled{name="GracefulNodeShutdown",stage="BETA"} 1 kubernetes_feature_enabled{name="GracefulNodeShutdownBasedOnPodPriority",stage="BETA"} 1 kubernetes_feature_enabled{name="HPAContainerMetrics",stage="BETA"} 1 kubernetes_feature_enabled{name="HPAScaleToZero",stage="ALPHA"} 0 kubernetes_feature_enabled{name="HonorPVReclaimPolicy",stage="ALPHA"} 0 kubernetes_feature_enabled{name="IPTablesOwnershipCleanup",stage=""} 1 +kubernetes_feature_enabled{name="ImageMaximumGCAge",stage="ALPHA"} 0 kubernetes_feature_enabled{name="InPlacePodVerticalScaling",stage="ALPHA"} 0 kubernetes_feature_enabled{name="InTreePluginAWSUnregister",stage="ALPHA"} 0 kubernetes_feature_enabled{name="InTreePluginAzureDiskUnregister",stage="ALPHA"} 0 @@ -508,15 +586,13 @@ kubernetes_feature_enabled{name="InTreePluginOpenStackUnregister",stage="ALPHA"} kubernetes_feature_enabled{name="InTreePluginPortworxUnregister",stage="ALPHA"} 0 kubernetes_feature_enabled{name="InTreePluginRBDUnregister",stage="DEPRECATED"} 0 kubernetes_feature_enabled{name="InTreePluginvSphereUnregister",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="JobBackoffLimitPerIndex",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="JobMutableNodeSchedulingDirectives",stage=""} 1 +kubernetes_feature_enabled{name="JobBackoffLimitPerIndex",stage="BETA"} 1 kubernetes_feature_enabled{name="JobPodFailurePolicy",stage="BETA"} 1 -kubernetes_feature_enabled{name="JobPodReplacementPolicy",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="JobReadyPods",stage="BETA"} 1 -kubernetes_feature_enabled{name="JobTrackingWithFinalizers",stage=""} 1 -kubernetes_feature_enabled{name="KMSv1",stage="DEPRECATED"} 1 -kubernetes_feature_enabled{name="KMSv2",stage="BETA"} 1 -kubernetes_feature_enabled{name="KMSv2KDF",stage="BETA"} 0 +kubernetes_feature_enabled{name="JobPodReplacementPolicy",stage="BETA"} 1 +kubernetes_feature_enabled{name="JobReadyPods",stage=""} 1 +kubernetes_feature_enabled{name="KMSv1",stage="DEPRECATED"} 0 +kubernetes_feature_enabled{name="KMSv2",stage=""} 1 +kubernetes_feature_enabled{name="KMSv2KDF",stage=""} 1 kubernetes_feature_enabled{name="KubeProxyDrainingTerminatingNodes",stage="ALPHA"} 0 kubernetes_feature_enabled{name="KubeletCgroupDriverFromCRI",stage="ALPHA"} 0 kubernetes_feature_enabled{name="KubeletInUserNamespace",stage="ALPHA"} 0 @@ -524,83 +600,94 @@ kubernetes_feature_enabled{name="KubeletPodResources",stage=""} 1 kubernetes_feature_enabled{name="KubeletPodResourcesDynamicResources",stage="ALPHA"} 0 kubernetes_feature_enabled{name="KubeletPodResourcesGet",stage="ALPHA"} 0 kubernetes_feature_enabled{name="KubeletPodResourcesGetAllocatable",stage=""} 1 +kubernetes_feature_enabled{name="KubeletSeparateDiskGC",stage="ALPHA"} 0 kubernetes_feature_enabled{name="KubeletTracing",stage="BETA"} 1 -kubernetes_feature_enabled{name="LegacyServiceAccountTokenCleanUp",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="LegacyServiceAccountTokenNoAutoGeneration",stage=""} 1 +kubernetes_feature_enabled{name="LegacyServiceAccountTokenCleanUp",stage="BETA"} 1 kubernetes_feature_enabled{name="LegacyServiceAccountTokenTracking",stage=""} 1 +kubernetes_feature_enabled{name="LoadBalancerIPMode",stage="ALPHA"} 0 kubernetes_feature_enabled{name="LocalStorageCapacityIsolationFSQuotaMonitoring",stage="ALPHA"} 0 kubernetes_feature_enabled{name="LogarithmicScaleDown",stage="BETA"} 1 kubernetes_feature_enabled{name="LoggingAlphaOptions",stage="ALPHA"} 0 kubernetes_feature_enabled{name="LoggingBetaOptions",stage="BETA"} 1 +kubernetes_feature_enabled{name="MatchLabelKeysInPodAffinity",stage="ALPHA"} 0 kubernetes_feature_enabled{name="MatchLabelKeysInPodTopologySpread",stage="BETA"} 1 kubernetes_feature_enabled{name="MaxUnavailableStatefulSet",stage="ALPHA"} 0 kubernetes_feature_enabled{name="MemoryManager",stage="BETA"} 1 kubernetes_feature_enabled{name="MemoryQoS",stage="ALPHA"} 0 kubernetes_feature_enabled{name="MinDomainsInPodTopologySpread",stage="BETA"} 1 kubernetes_feature_enabled{name="MinimizeIPTablesRestore",stage=""} 1 -kubernetes_feature_enabled{name="MultiCIDRRangeAllocator",stage="ALPHA"} 0 kubernetes_feature_enabled{name="MultiCIDRServiceAllocator",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="NFTablesProxyMode",stage="ALPHA"} 0 kubernetes_feature_enabled{name="NewVolumeManagerReconstruction",stage="BETA"} 1 kubernetes_feature_enabled{name="NodeInclusionPolicyInPodTopologySpread",stage="BETA"} 1 kubernetes_feature_enabled{name="NodeLogQuery",stage="ALPHA"} 0 kubernetes_feature_enabled{name="NodeOutOfServiceVolumeDetach",stage=""} 1 kubernetes_feature_enabled{name="NodeSwap",stage="BETA"} 0 kubernetes_feature_enabled{name="OpenAPIEnums",stage="BETA"} 1 -kubernetes_feature_enabled{name="OpenAPIV3",stage=""} 1 kubernetes_feature_enabled{name="PDBUnhealthyPodEvictionPolicy",stage="BETA"} 1 -kubernetes_feature_enabled{name="PersistentVolumeLastPhaseTransitionTime",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="PersistentVolumeLastPhaseTransitionTime",stage="BETA"} 1 kubernetes_feature_enabled{name="PodAndContainerStatsFromCRI",stage="ALPHA"} 0 kubernetes_feature_enabled{name="PodDeletionCost",stage="BETA"} 1 kubernetes_feature_enabled{name="PodDisruptionConditions",stage="BETA"} 1 -kubernetes_feature_enabled{name="PodHostIPs",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="PodHostIPs",stage="BETA"} 1 kubernetes_feature_enabled{name="PodIndexLabel",stage="BETA"} 1 -kubernetes_feature_enabled{name="PodReadyToStartContainersCondition",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="PodLifecycleSleepAction",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="PodReadyToStartContainersCondition",stage="BETA"} 1 kubernetes_feature_enabled{name="PodSchedulingReadiness",stage="BETA"} 1 -kubernetes_feature_enabled{name="ProbeTerminationGracePeriod",stage=""} 1 kubernetes_feature_enabled{name="ProcMountType",stage="ALPHA"} 0 kubernetes_feature_enabled{name="ProxyTerminatingEndpoints",stage=""} 1 kubernetes_feature_enabled{name="QOSReserved",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="ReadWriteOncePod",stage="BETA"} 1 +kubernetes_feature_enabled{name="ReadWriteOncePod",stage=""} 1 kubernetes_feature_enabled{name="RecoverVolumeExpansionFailure",stage="ALPHA"} 0 -kubernetes_feature_enabled{name="RemainingItemCount",stage="BETA"} 1 +kubernetes_feature_enabled{name="RemainingItemCount",stage=""} 1 kubernetes_feature_enabled{name="RemoveSelfLink",stage=""} 1 -kubernetes_feature_enabled{name="RetroactiveDefaultStorageClass",stage=""} 1 kubernetes_feature_enabled{name="RotateKubeletServerCertificate",stage="BETA"} 1 +kubernetes_feature_enabled{name="RuntimeClassInImageCriApi",stage="ALPHA"} 0 kubernetes_feature_enabled{name="SELinuxMountReadWriteOncePod",stage="BETA"} 1 -kubernetes_feature_enabled{name="SchedulerQueueingHints",stage="BETA"} 1 -kubernetes_feature_enabled{name="SeccompDefault",stage=""} 1 +kubernetes_feature_enabled{name="SchedulerQueueingHints",stage="BETA"} 0 kubernetes_feature_enabled{name="SecurityContextDeny",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="SeparateTaintEvictionController",stage="BETA"} 1 kubernetes_feature_enabled{name="ServerSideApply",stage=""} 1 kubernetes_feature_enabled{name="ServerSideFieldValidation",stage=""} 1 -kubernetes_feature_enabled{name="ServiceNodePortStaticSubrange",stage="BETA"} 1 -kubernetes_feature_enabled{name="SidecarContainers",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceAccountTokenJTI",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceAccountTokenNodeBinding",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceAccountTokenNodeBindingValidation",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceAccountTokenPodNodeInfo",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="ServiceNodePortStaticSubrange",stage=""} 1 +kubernetes_feature_enabled{name="SidecarContainers",stage="BETA"} 1 kubernetes_feature_enabled{name="SizeMemoryBackedVolumes",stage="BETA"} 1 -kubernetes_feature_enabled{name="SkipReadOnlyValidationGCE",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="SkipReadOnlyValidationGCE",stage="DEPRECATED"} 1 kubernetes_feature_enabled{name="StableLoadBalancerNodeSet",stage="BETA"} 1 kubernetes_feature_enabled{name="StatefulSetAutoDeletePVC",stage="BETA"} 1 kubernetes_feature_enabled{name="StatefulSetStartOrdinal",stage="BETA"} 1 kubernetes_feature_enabled{name="StorageVersionAPI",stage="ALPHA"} 0 kubernetes_feature_enabled{name="StorageVersionHash",stage="BETA"} 1 +kubernetes_feature_enabled{name="StructuredAuthenticationConfiguration",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="StructuredAuthorizationConfiguration",stage="ALPHA"} 0 kubernetes_feature_enabled{name="TopologyAwareHints",stage="BETA"} 1 -kubernetes_feature_enabled{name="TopologyManager",stage=""} 1 kubernetes_feature_enabled{name="TopologyManagerPolicyAlphaOptions",stage="ALPHA"} 0 kubernetes_feature_enabled{name="TopologyManagerPolicyBetaOptions",stage="BETA"} 1 kubernetes_feature_enabled{name="TopologyManagerPolicyOptions",stage="BETA"} 1 +kubernetes_feature_enabled{name="TranslateStreamCloseWebsocketRequests",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="UnauthenticatedHTTP2DOSMitigation",stage="BETA"} 1 kubernetes_feature_enabled{name="UnknownVersionInteroperabilityProxy",stage="ALPHA"} 0 +kubernetes_feature_enabled{name="UserNamespacesPodSecurityStandards",stage="ALPHA"} 0 kubernetes_feature_enabled{name="UserNamespacesSupport",stage="ALPHA"} 0 kubernetes_feature_enabled{name="ValidatingAdmissionPolicy",stage="BETA"} 0 +kubernetes_feature_enabled{name="VolumeAttributesClass",stage="ALPHA"} 0 kubernetes_feature_enabled{name="VolumeCapacityPriority",stage="ALPHA"} 0 kubernetes_feature_enabled{name="WatchBookmark",stage=""} 1 kubernetes_feature_enabled{name="WatchList",stage="ALPHA"} 0 kubernetes_feature_enabled{name="WinDSR",stage="ALPHA"} 0 kubernetes_feature_enabled{name="WinOverlay",stage="BETA"} 1 kubernetes_feature_enabled{name="WindowsHostNetwork",stage="ALPHA"} 1 +kubernetes_feature_enabled{name="ZeroLimitedNominalConcurrencyShares",stage="BETA"} 0 # HELP leader_election_master_status [ALPHA] Gauge of if the reporting system is master of the relevant lease, 0 indicates backup, 1 indicates master. 'name' is the string used to identify the lease. Please make sure to group by name. # TYPE leader_election_master_status gauge leader_election_master_status{name="kube-scheduler"} 1 # HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. # TYPE process_cpu_seconds_total counter -process_cpu_seconds_total 3.01 +process_cpu_seconds_total 14.27 # HELP process_max_fds Maximum number of open file descriptors. # TYPE process_max_fds gauge process_max_fds 1.048576e+06 @@ -609,21 +696,21 @@ process_max_fds 1.048576e+06 process_open_fds 10 # HELP process_resident_memory_bytes Resident memory size in bytes. # TYPE process_resident_memory_bytes gauge -process_resident_memory_bytes 4.54656e+07 +process_resident_memory_bytes 6.3909888e+07 # HELP process_start_time_seconds Start time of the process since unix epoch in seconds. # TYPE process_start_time_seconds gauge -process_start_time_seconds 1.69875238442e+09 +process_start_time_seconds 1.70489476707e+09 # HELP process_virtual_memory_bytes Virtual memory size in bytes. # TYPE process_virtual_memory_bytes gauge -process_virtual_memory_bytes 7.8721024e+08 +process_virtual_memory_bytes 1.316384768e+09 # HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. # TYPE process_virtual_memory_max_bytes gauge process_virtual_memory_max_bytes 1.8446744073709552e+19 # HELP registered_metrics_total [BETA] The count of registered metrics broken by stability level and deprecation version. # TYPE registered_metrics_total counter -registered_metrics_total{deprecated_version="",stability_level="ALPHA"} 78 -registered_metrics_total{deprecated_version="",stability_level="BETA"} 6 -registered_metrics_total{deprecated_version="",stability_level="STABLE"} 12 +registered_metrics_total{deprecated_version="",stability_level="ALPHA"} 83 +registered_metrics_total{deprecated_version="",stability_level="BETA"} 5 +registered_metrics_total{deprecated_version="",stability_level="STABLE"} 13 # HELP rest_client_exec_plugin_certificate_rotation_age [ALPHA] Histogram of the number of seconds the last auth exec plugin client certificate lived before being rotated. If auth exec plugin client certificates are unused, histogram will contain no data. # TYPE rest_client_exec_plugin_certificate_rotation_age histogram rest_client_exec_plugin_certificate_rotation_age_bucket{le="600"} 0 @@ -645,21 +732,21 @@ rest_client_exec_plugin_certificate_rotation_age_count 0 rest_client_exec_plugin_ttl_seconds +Inf # HELP rest_client_rate_limiter_duration_seconds [ALPHA] Client side rate limiter latency in seconds. Broken down by verb, and host. # TYPE rest_client_rate_limiter_duration_seconds histogram -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.005"} 237 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.025"} 237 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.1"} 237 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.25"} 237 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.5"} 237 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="1"} 237 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="2"} 237 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="4"} 237 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="8"} 237 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="15"} 237 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="30"} 237 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="60"} 237 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="+Inf"} 237 -rest_client_rate_limiter_duration_seconds_sum{host="172.18.0.2:6443",verb="GET"} 0.0009123660000000004 -rest_client_rate_limiter_duration_seconds_count{host="172.18.0.2:6443",verb="GET"} 237 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.005"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.025"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.1"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.25"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.5"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="1"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="2"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="4"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="8"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="15"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="30"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="60"} 1250 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="+Inf"} 1250 +rest_client_rate_limiter_duration_seconds_sum{host="172.18.0.2:6443",verb="GET"} 0.0044709540000000035 +rest_client_rate_limiter_duration_seconds_count{host="172.18.0.2:6443",verb="GET"} 1250 rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.005"} 3 rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.025"} 3 rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.1"} 3 @@ -673,57 +760,57 @@ rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PA rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="30"} 3 rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="60"} 3 rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="+Inf"} 3 -rest_client_rate_limiter_duration_seconds_sum{host="172.18.0.2:6443",verb="PATCH"} 3.5005e-05 +rest_client_rate_limiter_duration_seconds_sum{host="172.18.0.2:6443",verb="PATCH"} 2.04e-05 rest_client_rate_limiter_duration_seconds_count{host="172.18.0.2:6443",verb="PATCH"} 3 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.005"} 21 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.025"} 21 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.1"} 21 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.25"} 21 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.5"} 21 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="1"} 21 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="2"} 21 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="4"} 21 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="8"} 21 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="15"} 21 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="30"} 21 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="60"} 21 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="+Inf"} 21 -rest_client_rate_limiter_duration_seconds_sum{host="172.18.0.2:6443",verb="POST"} 9.512799999999997e-05 -rest_client_rate_limiter_duration_seconds_count{host="172.18.0.2:6443",verb="POST"} 21 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.005"} 189 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.025"} 189 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.1"} 189 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.25"} 189 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.5"} 189 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="1"} 189 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="2"} 189 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="4"} 189 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="8"} 189 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="15"} 189 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="30"} 189 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="60"} 189 -rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="+Inf"} 189 -rest_client_rate_limiter_duration_seconds_sum{host="172.18.0.2:6443",verb="PUT"} 0.000467786 -rest_client_rate_limiter_duration_seconds_count{host="172.18.0.2:6443",verb="PUT"} 189 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.005"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.025"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.1"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.25"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.5"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="1"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="2"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="4"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="8"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="15"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="30"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="60"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="+Inf"} 19 +rest_client_rate_limiter_duration_seconds_sum{host="172.18.0.2:6443",verb="POST"} 7.005999999999999e-05 +rest_client_rate_limiter_duration_seconds_count{host="172.18.0.2:6443",verb="POST"} 19 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.005"} 1202 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.025"} 1202 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.1"} 1202 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.25"} 1202 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.5"} 1202 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="1"} 1202 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="2"} 1202 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="4"} 1202 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="8"} 1202 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="15"} 1202 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="30"} 1202 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="60"} 1202 +rest_client_rate_limiter_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="+Inf"} 1202 +rest_client_rate_limiter_duration_seconds_sum{host="172.18.0.2:6443",verb="PUT"} 0.0031652790000000004 +rest_client_rate_limiter_duration_seconds_count{host="172.18.0.2:6443",verb="PUT"} 1202 # HELP rest_client_request_duration_seconds [ALPHA] Request latency in seconds. Broken down by verb, and host. # TYPE rest_client_request_duration_seconds histogram -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.005"} 226 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.025"} 236 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.1"} 236 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.25"} 236 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.5"} 236 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="1"} 236 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="2"} 236 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="4"} 237 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="8"} 237 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="15"} 237 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="30"} 237 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="60"} 237 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="+Inf"} 237 -rest_client_request_duration_seconds_sum{host="172.18.0.2:6443",verb="GET"} 4.451666564999998 -rest_client_request_duration_seconds_count{host="172.18.0.2:6443",verb="GET"} 237 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.005"} 1036 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.025"} 1247 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.1"} 1248 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.25"} 1249 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="0.5"} 1249 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="1"} 1249 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="2"} 1249 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="4"} 1250 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="8"} 1250 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="15"} 1250 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="30"} 1250 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="60"} 1250 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="GET",le="+Inf"} 1250 +rest_client_request_duration_seconds_sum{host="172.18.0.2:6443",verb="GET"} 8.867916397999984 +rest_client_request_duration_seconds_count{host="172.18.0.2:6443",verb="GET"} 1250 rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.005"} 0 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.025"} 3 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.025"} 0 rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.1"} 3 rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.25"} 3 rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="0.5"} 3 @@ -735,54 +822,54 @@ rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH", rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="30"} 3 rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="60"} 3 rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PATCH",le="+Inf"} 3 -rest_client_request_duration_seconds_sum{host="172.18.0.2:6443",verb="PATCH"} 0.04556753399999999 +rest_client_request_duration_seconds_sum{host="172.18.0.2:6443",verb="PATCH"} 0.087315227 rest_client_request_duration_seconds_count{host="172.18.0.2:6443",verb="PATCH"} 3 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.005"} 5 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.025"} 21 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.1"} 21 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.25"} 21 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.5"} 21 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="1"} 21 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="2"} 21 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="4"} 21 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="8"} 21 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="15"} 21 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="30"} 21 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="60"} 21 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="+Inf"} 21 -rest_client_request_duration_seconds_sum{host="172.18.0.2:6443",verb="POST"} 0.14683643499999996 -rest_client_request_duration_seconds_count{host="172.18.0.2:6443",verb="POST"} 21 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.005"} 27 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.025"} 182 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.1"} 186 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.25"} 189 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.5"} 189 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="1"} 189 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="2"} 189 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="4"} 189 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="8"} 189 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="15"} 189 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="30"} 189 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="60"} 189 -rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="+Inf"} 189 -rest_client_request_duration_seconds_sum{host="172.18.0.2:6443",verb="PUT"} 2.021037979000001 -rest_client_request_duration_seconds_count{host="172.18.0.2:6443",verb="PUT"} 189 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.005"} 2 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.025"} 17 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.1"} 19 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.25"} 19 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="0.5"} 19 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="1"} 19 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="2"} 19 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="4"} 19 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="8"} 19 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="15"} 19 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="30"} 19 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="60"} 19 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="POST",le="+Inf"} 19 +rest_client_request_duration_seconds_sum{host="172.18.0.2:6443",verb="POST"} 0.24962320100000002 +rest_client_request_duration_seconds_count{host="172.18.0.2:6443",verb="POST"} 19 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.005"} 6 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.025"} 1201 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.1"} 1201 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.25"} 1202 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="0.5"} 1202 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="1"} 1202 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="2"} 1202 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="4"} 1202 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="8"} 1202 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="15"} 1202 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="30"} 1202 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="60"} 1202 +rest_client_request_duration_seconds_bucket{host="172.18.0.2:6443",verb="PUT",le="+Inf"} 1202 +rest_client_request_duration_seconds_sum{host="172.18.0.2:6443",verb="PUT"} 14.273085986000012 +rest_client_request_duration_seconds_count{host="172.18.0.2:6443",verb="PUT"} 1202 # HELP rest_client_request_size_bytes [ALPHA] Request size in bytes. Broken down by verb and host. # TYPE rest_client_request_size_bytes histogram -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="64"} 237 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="256"} 237 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="512"} 237 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1024"} 237 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="4096"} 237 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="16384"} 237 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="65536"} 237 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="262144"} 237 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1.048576e+06"} 237 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="4.194304e+06"} 237 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1.6777216e+07"} 237 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="+Inf"} 237 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="64"} 1250 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="256"} 1250 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="512"} 1250 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1024"} 1250 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="4096"} 1250 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="16384"} 1250 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="65536"} 1250 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="262144"} 1250 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1.048576e+06"} 1250 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="4.194304e+06"} 1250 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1.6777216e+07"} 1250 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="+Inf"} 1250 rest_client_request_size_bytes_sum{host="172.18.0.2:6443",verb="GET"} 0 -rest_client_request_size_bytes_count{host="172.18.0.2:6443",verb="GET"} 237 +rest_client_request_size_bytes_count{host="172.18.0.2:6443",verb="GET"} 1250 rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="64"} 0 rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="256"} 0 rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="512"} 3 @@ -795,60 +882,60 @@ rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1. rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="4.194304e+06"} 3 rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1.6777216e+07"} 3 rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="+Inf"} 3 -rest_client_request_size_bytes_sum{host="172.18.0.2:6443",verb="PATCH"} 1029 +rest_client_request_size_bytes_sum{host="172.18.0.2:6443",verb="PATCH"} 1026 rest_client_request_size_bytes_count{host="172.18.0.2:6443",verb="PATCH"} 3 rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="64"} 0 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="256"} 8 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="512"} 19 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1024"} 20 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="4096"} 21 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="16384"} 21 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="65536"} 21 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="262144"} 21 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1.048576e+06"} 21 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="4.194304e+06"} 21 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1.6777216e+07"} 21 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="+Inf"} 21 -rest_client_request_size_bytes_sum{host="172.18.0.2:6443",verb="POST"} 7220 -rest_client_request_size_bytes_count{host="172.18.0.2:6443",verb="POST"} 21 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="256"} 7 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="512"} 17 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1024"} 18 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="4096"} 19 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="16384"} 19 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="65536"} 19 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="262144"} 19 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1.048576e+06"} 19 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="4.194304e+06"} 19 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1.6777216e+07"} 19 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="+Inf"} 19 +rest_client_request_size_bytes_sum{host="172.18.0.2:6443",verb="POST"} 6726 +rest_client_request_size_bytes_count{host="172.18.0.2:6443",verb="POST"} 19 rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="64"} 0 rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="256"} 0 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="512"} 189 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1024"} 189 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="4096"} 189 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="16384"} 189 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="65536"} 189 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="262144"} 189 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1.048576e+06"} 189 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="4.194304e+06"} 189 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1.6777216e+07"} 189 -rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="+Inf"} 189 -rest_client_request_size_bytes_sum{host="172.18.0.2:6443",verb="PUT"} 80519 -rest_client_request_size_bytes_count{host="172.18.0.2:6443",verb="PUT"} 189 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="512"} 1202 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1024"} 1202 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="4096"} 1202 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="16384"} 1202 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="65536"} 1202 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="262144"} 1202 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1.048576e+06"} 1202 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="4.194304e+06"} 1202 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1.6777216e+07"} 1202 +rest_client_request_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="+Inf"} 1202 +rest_client_request_size_bytes_sum{host="172.18.0.2:6443",verb="PUT"} 512715 +rest_client_request_size_bytes_count{host="172.18.0.2:6443",verb="PUT"} 1202 # HELP rest_client_requests_total [ALPHA] Number of HTTP requests, partitioned by status code, method, and host. # TYPE rest_client_requests_total counter -rest_client_requests_total{code="200",host="172.18.0.2:6443",method="GET"} 224 +rest_client_requests_total{code="200",host="172.18.0.2:6443",method="GET"} 1305 rest_client_requests_total{code="200",host="172.18.0.2:6443",method="PATCH"} 3 -rest_client_requests_total{code="200",host="172.18.0.2:6443",method="PUT"} 189 -rest_client_requests_total{code="201",host="172.18.0.2:6443",method="POST"} 21 +rest_client_requests_total{code="200",host="172.18.0.2:6443",method="PUT"} 1202 +rest_client_requests_total{code="201",host="172.18.0.2:6443",method="POST"} 19 rest_client_requests_total{code="403",host="172.18.0.2:6443",method="GET"} 32 rest_client_requests_total{code="404",host="172.18.0.2:6443",method="GET"} 1 # HELP rest_client_response_size_bytes [ALPHA] Response size in bytes. Broken down by verb and host. # TYPE rest_client_response_size_bytes histogram rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="64"} 9 rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="256"} 35 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="512"} 231 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1024"} 233 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="4096"} 236 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="16384"} 237 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="65536"} 237 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="262144"} 237 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1.048576e+06"} 237 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="4.194304e+06"} 237 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1.6777216e+07"} 237 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="+Inf"} 237 -rest_client_response_size_bytes_sum{host="172.18.0.2:6443",verb="GET"} 103711 -rest_client_response_size_bytes_count{host="172.18.0.2:6443",verb="GET"} 237 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="512"} 1244 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1024"} 1246 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="4096"} 1249 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="16384"} 1250 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="65536"} 1250 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="262144"} 1250 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1.048576e+06"} 1250 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="4.194304e+06"} 1250 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="1.6777216e+07"} 1250 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="GET",le="+Inf"} 1250 +rest_client_response_size_bytes_sum{host="172.18.0.2:6443",verb="GET"} 537154 +rest_client_response_size_bytes_count{host="172.18.0.2:6443",verb="GET"} 1250 rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="64"} 0 rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="256"} 0 rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="512"} 0 @@ -861,36 +948,36 @@ rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1 rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="4.194304e+06"} 3 rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="1.6777216e+07"} 3 rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PATCH",le="+Inf"} 3 -rest_client_response_size_bytes_sum{host="172.18.0.2:6443",verb="PATCH"} 10810 +rest_client_response_size_bytes_sum{host="172.18.0.2:6443",verb="PATCH"} 10807 rest_client_response_size_bytes_count{host="172.18.0.2:6443",verb="PATCH"} 3 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="64"} 7 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="256"} 7 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="512"} 8 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1024"} 19 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="4096"} 21 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="16384"} 21 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="65536"} 21 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="262144"} 21 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1.048576e+06"} 21 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="4.194304e+06"} 21 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1.6777216e+07"} 21 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="+Inf"} 21 -rest_client_response_size_bytes_sum{host="172.18.0.2:6443",verb="POST"} 11028 -rest_client_response_size_bytes_count{host="172.18.0.2:6443",verb="POST"} 21 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="64"} 6 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="256"} 6 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="512"} 7 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1024"} 17 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="4096"} 19 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="16384"} 19 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="65536"} 19 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="262144"} 19 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1.048576e+06"} 19 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="4.194304e+06"} 19 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="1.6777216e+07"} 19 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="POST",le="+Inf"} 19 +rest_client_response_size_bytes_sum{host="172.18.0.2:6443",verb="POST"} 10350 +rest_client_response_size_bytes_count{host="172.18.0.2:6443",verb="POST"} 19 rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="64"} 0 rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="256"} 0 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="512"} 189 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1024"} 189 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="4096"} 189 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="16384"} 189 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="65536"} 189 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="262144"} 189 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1.048576e+06"} 189 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="4.194304e+06"} 189 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1.6777216e+07"} 189 -rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="+Inf"} 189 -rest_client_response_size_bytes_sum{host="172.18.0.2:6443",verb="PUT"} 80520 -rest_client_response_size_bytes_count{host="172.18.0.2:6443",verb="PUT"} 189 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="512"} 1202 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1024"} 1202 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="4096"} 1202 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="16384"} 1202 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="65536"} 1202 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="262144"} 1202 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1.048576e+06"} 1202 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="4.194304e+06"} 1202 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="1.6777216e+07"} 1202 +rest_client_response_size_bytes_bucket{host="172.18.0.2:6443",verb="PUT",le="+Inf"} 1202 +rest_client_response_size_bytes_sum{host="172.18.0.2:6443",verb="PUT"} 512716 +rest_client_response_size_bytes_count{host="172.18.0.2:6443",verb="PUT"} 1202 # HELP rest_client_transport_cache_entries [ALPHA] Number of transport entries in the internal cache. # TYPE rest_client_transport_cache_entries gauge rest_client_transport_cache_entries 2 @@ -906,60 +993,60 @@ scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bin scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.0008"} 0 scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.0016"} 0 scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.0032"} 0 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.0064"} 1 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.0128"} 5 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.0256"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.0512"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.1024"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.2048"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="+Inf"} 7 -scheduler_framework_extension_point_duration_seconds_sum{extension_point="Bind",profile="default-scheduler",status="Success"} 0.06508699500000001 -scheduler_framework_extension_point_duration_seconds_count{extension_point="Bind",profile="default-scheduler",status="Success"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0001"} 8 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0002"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0004"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0008"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0016"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0032"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0064"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0128"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0256"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0512"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.1024"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.2048"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="+Inf"} 10 -scheduler_framework_extension_point_duration_seconds_sum{extension_point="Filter",profile="default-scheduler",status="Success"} 0.000804865 -scheduler_framework_extension_point_duration_seconds_count{extension_point="Filter",profile="default-scheduler",status="Success"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0001"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0002"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0004"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0008"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0016"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0032"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0064"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0128"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0256"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0512"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.1024"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.2048"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="+Inf"} 7 -scheduler_framework_extension_point_duration_seconds_sum{extension_point="Permit",profile="default-scheduler",status="Success"} 2.0112e-05 -scheduler_framework_extension_point_duration_seconds_count{extension_point="Permit",profile="default-scheduler",status="Success"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0001"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0002"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0004"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0008"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0016"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0032"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0064"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0128"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0256"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0512"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.1024"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.2048"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="+Inf"} 7 -scheduler_framework_extension_point_duration_seconds_sum{extension_point="PostBind",profile="default-scheduler",status="Success"} 6.3082e-05 -scheduler_framework_extension_point_duration_seconds_count{extension_point="PostBind",profile="default-scheduler",status="Success"} 7 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.0064"} 0 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.0128"} 4 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.0256"} 4 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.0512"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.1024"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="0.2048"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Bind",profile="default-scheduler",status="Success",le="+Inf"} 6 +scheduler_framework_extension_point_duration_seconds_sum{extension_point="Bind",profile="default-scheduler",status="Success"} 0.113648547 +scheduler_framework_extension_point_duration_seconds_count{extension_point="Bind",profile="default-scheduler",status="Success"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0001"} 5 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0002"} 8 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0004"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0008"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0016"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0032"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0064"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0128"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0256"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.0512"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.1024"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="0.2048"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Filter",profile="default-scheduler",status="Success",le="+Inf"} 9 +scheduler_framework_extension_point_duration_seconds_sum{extension_point="Filter",profile="default-scheduler",status="Success"} 0.00099381 +scheduler_framework_extension_point_duration_seconds_count{extension_point="Filter",profile="default-scheduler",status="Success"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0001"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0002"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0004"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0008"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0016"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0032"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0064"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0128"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0256"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.0512"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.1024"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="0.2048"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Permit",profile="default-scheduler",status="Success",le="+Inf"} 6 +scheduler_framework_extension_point_duration_seconds_sum{extension_point="Permit",profile="default-scheduler",status="Success"} 1.4594999999999998e-05 +scheduler_framework_extension_point_duration_seconds_count{extension_point="Permit",profile="default-scheduler",status="Success"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0001"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0002"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0004"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0008"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0016"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0032"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0064"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0128"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0256"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.0512"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.1024"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="0.2048"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostBind",profile="default-scheduler",status="Success",le="+Inf"} 6 +scheduler_framework_extension_point_duration_seconds_sum{extension_point="PostBind",profile="default-scheduler",status="Success"} 2.1460999999999997e-05 +scheduler_framework_extension_point_duration_seconds_count{extension_point="PostBind",profile="default-scheduler",status="Success"} 6 scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostFilter",profile="default-scheduler",status="Unschedulable",le="0.0001"} 3 scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostFilter",profile="default-scheduler",status="Unschedulable",le="0.0002"} 3 scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostFilter",profile="default-scheduler",status="Unschedulable",le="0.0004"} 3 @@ -973,68 +1060,68 @@ scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Pos scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostFilter",profile="default-scheduler",status="Unschedulable",le="0.1024"} 3 scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostFilter",profile="default-scheduler",status="Unschedulable",le="0.2048"} 3 scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PostFilter",profile="default-scheduler",status="Unschedulable",le="+Inf"} 3 -scheduler_framework_extension_point_duration_seconds_sum{extension_point="PostFilter",profile="default-scheduler",status="Unschedulable"} 5.443e-05 +scheduler_framework_extension_point_duration_seconds_sum{extension_point="PostFilter",profile="default-scheduler",status="Unschedulable"} 8.5664e-05 scheduler_framework_extension_point_duration_seconds_count{extension_point="PostFilter",profile="default-scheduler",status="Unschedulable"} 3 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0001"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0002"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0004"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0008"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0016"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0032"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0064"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0128"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0256"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0512"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.1024"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.2048"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="+Inf"} 7 -scheduler_framework_extension_point_duration_seconds_sum{extension_point="PreBind",profile="default-scheduler",status="Success"} 0.000137523 -scheduler_framework_extension_point_duration_seconds_count{extension_point="PreBind",profile="default-scheduler",status="Success"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0001"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0002"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0004"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0008"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0016"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0032"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0064"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0128"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0256"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0512"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.1024"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.2048"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="+Inf"} 10 -scheduler_framework_extension_point_duration_seconds_sum{extension_point="PreEnqueue",profile="default-scheduler",status="Success"} 0.00012312899999999998 -scheduler_framework_extension_point_duration_seconds_count{extension_point="PreEnqueue",profile="default-scheduler",status="Success"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0001"} 9 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0002"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0004"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0008"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0016"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0032"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0064"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0128"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0256"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0512"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.1024"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.2048"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="+Inf"} 10 -scheduler_framework_extension_point_duration_seconds_sum{extension_point="PreFilter",profile="default-scheduler",status="Success"} 0.000671459 -scheduler_framework_extension_point_duration_seconds_count{extension_point="PreFilter",profile="default-scheduler",status="Success"} 10 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0001"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0002"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0004"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0008"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0016"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0032"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0064"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0128"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0256"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0512"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.1024"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.2048"} 7 -scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="+Inf"} 7 -scheduler_framework_extension_point_duration_seconds_sum{extension_point="Reserve",profile="default-scheduler",status="Success"} 4.0518999999999996e-05 -scheduler_framework_extension_point_duration_seconds_count{extension_point="Reserve",profile="default-scheduler",status="Success"} 7 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0001"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0002"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0004"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0008"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0016"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0032"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0064"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0128"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0256"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.0512"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.1024"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="0.2048"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreBind",profile="default-scheduler",status="Success",le="+Inf"} 6 +scheduler_framework_extension_point_duration_seconds_sum{extension_point="PreBind",profile="default-scheduler",status="Success"} 6.4472e-05 +scheduler_framework_extension_point_duration_seconds_count{extension_point="PreBind",profile="default-scheduler",status="Success"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0001"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0002"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0004"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0008"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0016"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0032"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0064"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0128"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0256"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.0512"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.1024"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="0.2048"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreEnqueue",profile="default-scheduler",status="Success",le="+Inf"} 9 +scheduler_framework_extension_point_duration_seconds_sum{extension_point="PreEnqueue",profile="default-scheduler",status="Success"} 0.00014482 +scheduler_framework_extension_point_duration_seconds_count{extension_point="PreEnqueue",profile="default-scheduler",status="Success"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0001"} 8 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0002"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0004"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0008"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0016"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0032"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0064"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0128"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0256"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.0512"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.1024"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="0.2048"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="PreFilter",profile="default-scheduler",status="Success",le="+Inf"} 9 +scheduler_framework_extension_point_duration_seconds_sum{extension_point="PreFilter",profile="default-scheduler",status="Success"} 0.0006032540000000001 +scheduler_framework_extension_point_duration_seconds_count{extension_point="PreFilter",profile="default-scheduler",status="Success"} 9 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0001"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0002"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0004"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0008"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0016"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0032"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0064"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0128"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0256"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.0512"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.1024"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="0.2048"} 6 +scheduler_framework_extension_point_duration_seconds_bucket{extension_point="Reserve",profile="default-scheduler",status="Success",le="+Inf"} 6 +scheduler_framework_extension_point_duration_seconds_sum{extension_point="Reserve",profile="default-scheduler",status="Success"} 2.3699e-05 +scheduler_framework_extension_point_duration_seconds_count{extension_point="Reserve",profile="default-scheduler",status="Success"} 6 # HELP scheduler_goroutines [ALPHA] Number of running goroutines split by the work they do such as binding. # TYPE scheduler_goroutines gauge scheduler_goroutines{operation="Filter"} 0 @@ -1052,14 +1139,14 @@ scheduler_plugin_evaluation_total{extension_point="Filter",plugin="AzureDiskLimi scheduler_plugin_evaluation_total{extension_point="Filter",plugin="EBSLimits",profile="default-scheduler"} 0 scheduler_plugin_evaluation_total{extension_point="Filter",plugin="GCEPDLimits",profile="default-scheduler"} 0 scheduler_plugin_evaluation_total{extension_point="Filter",plugin="InterPodAffinity",profile="default-scheduler"} 0 -scheduler_plugin_evaluation_total{extension_point="Filter",plugin="NodeAffinity",profile="default-scheduler"} 7 -scheduler_plugin_evaluation_total{extension_point="Filter",plugin="NodeName",profile="default-scheduler"} 10 +scheduler_plugin_evaluation_total{extension_point="Filter",plugin="NodeAffinity",profile="default-scheduler"} 6 +scheduler_plugin_evaluation_total{extension_point="Filter",plugin="NodeName",profile="default-scheduler"} 9 scheduler_plugin_evaluation_total{extension_point="Filter",plugin="NodePorts",profile="default-scheduler"} 0 -scheduler_plugin_evaluation_total{extension_point="Filter",plugin="NodeResourcesFit",profile="default-scheduler"} 7 -scheduler_plugin_evaluation_total{extension_point="Filter",plugin="NodeUnschedulable",profile="default-scheduler"} 10 +scheduler_plugin_evaluation_total{extension_point="Filter",plugin="NodeResourcesFit",profile="default-scheduler"} 6 +scheduler_plugin_evaluation_total{extension_point="Filter",plugin="NodeUnschedulable",profile="default-scheduler"} 9 scheduler_plugin_evaluation_total{extension_point="Filter",plugin="NodeVolumeLimits",profile="default-scheduler"} 0 scheduler_plugin_evaluation_total{extension_point="Filter",plugin="PodTopologySpread",profile="default-scheduler"} 0 -scheduler_plugin_evaluation_total{extension_point="Filter",plugin="TaintToleration",profile="default-scheduler"} 10 +scheduler_plugin_evaluation_total{extension_point="Filter",plugin="TaintToleration",profile="default-scheduler"} 9 scheduler_plugin_evaluation_total{extension_point="Filter",plugin="VolumeBinding",profile="default-scheduler"} 0 scheduler_plugin_evaluation_total{extension_point="Filter",plugin="VolumeRestrictions",profile="default-scheduler"} 0 scheduler_plugin_evaluation_total{extension_point="Filter",plugin="VolumeZone",profile="default-scheduler"} 0 @@ -1067,9 +1154,9 @@ scheduler_plugin_evaluation_total{extension_point="PreFilter",plugin="AzureDiskL scheduler_plugin_evaluation_total{extension_point="PreFilter",plugin="EBSLimits",profile="default-scheduler"} 0 scheduler_plugin_evaluation_total{extension_point="PreFilter",plugin="GCEPDLimits",profile="default-scheduler"} 0 scheduler_plugin_evaluation_total{extension_point="PreFilter",plugin="InterPodAffinity",profile="default-scheduler"} 0 -scheduler_plugin_evaluation_total{extension_point="PreFilter",plugin="NodeAffinity",profile="default-scheduler"} 10 +scheduler_plugin_evaluation_total{extension_point="PreFilter",plugin="NodeAffinity",profile="default-scheduler"} 9 scheduler_plugin_evaluation_total{extension_point="PreFilter",plugin="NodePorts",profile="default-scheduler"} 0 -scheduler_plugin_evaluation_total{extension_point="PreFilter",plugin="NodeResourcesFit",profile="default-scheduler"} 10 +scheduler_plugin_evaluation_total{extension_point="PreFilter",plugin="NodeResourcesFit",profile="default-scheduler"} 9 scheduler_plugin_evaluation_total{extension_point="PreFilter",plugin="NodeVolumeLimits",profile="default-scheduler"} 0 scheduler_plugin_evaluation_total{extension_point="PreFilter",plugin="PodTopologySpread",profile="default-scheduler"} 0 scheduler_plugin_evaluation_total{extension_point="PreFilter",plugin="VolumeBinding",profile="default-scheduler"} 0 @@ -1091,52 +1178,6 @@ scheduler_plugin_evaluation_total{extension_point="Score",plugin="TaintToleratio scheduler_plugin_evaluation_total{extension_point="Score",plugin="VolumeBinding",profile="default-scheduler"} 0 # HELP scheduler_plugin_execution_duration_seconds [ALPHA] Duration for running a plugin at a specific extension point. # TYPE scheduler_plugin_execution_duration_seconds histogram -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Bind",plugin="DefaultBinder",status="Success",le="1e-05"} 0 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Bind",plugin="DefaultBinder",status="Success",le="1.5000000000000002e-05"} 0 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Bind",plugin="DefaultBinder",status="Success",le="2.2500000000000005e-05"} 0 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Bind",plugin="DefaultBinder",status="Success",le="3.375000000000001e-05"} 0 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Bind",plugin="DefaultBinder",status="Success",le="5.062500000000001e-05"} 0 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Bind",plugin="DefaultBinder",status="Success",le="7.593750000000002e-05"} 0 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Bind",plugin="DefaultBinder",status="Success",le="0.00011390625000000003"} 0 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Bind",plugin="DefaultBinder",status="Success",le="0.00017085937500000006"} 0 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Bind",plugin="DefaultBinder",status="Success",le="0.0002562890625000001"} 0 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Bind",plugin="DefaultBinder",status="Success",le="0.00038443359375000017"} 0 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Bind",plugin="DefaultBinder",status="Success",le="0.0005766503906250003"} 0 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Bind",plugin="DefaultBinder",status="Success",le="0.0008649755859375004"} 0 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Bind",plugin="DefaultBinder",status="Success",le="0.0012974633789062506"} 0 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Bind",plugin="DefaultBinder",status="Success",le="0.0019461950683593758"} 0 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Bind",plugin="DefaultBinder",status="Success",le="0.0029192926025390638"} 0 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Bind",plugin="DefaultBinder",status="Success",le="0.004378938903808595"} 0 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Bind",plugin="DefaultBinder",status="Success",le="0.006568408355712893"} 0 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Bind",plugin="DefaultBinder",status="Success",le="0.009852612533569338"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Bind",plugin="DefaultBinder",status="Success",le="0.014778918800354007"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Bind",plugin="DefaultBinder",status="Success",le="0.02216837820053101"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Bind",plugin="DefaultBinder",status="Success",le="+Inf"} 1 -scheduler_plugin_execution_duration_seconds_sum{extension_point="Bind",plugin="DefaultBinder",status="Success"} 0.008267277 -scheduler_plugin_execution_duration_seconds_count{extension_point="Bind",plugin="DefaultBinder",status="Success"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeAffinity",status="Success",le="1e-05"} 0 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeAffinity",status="Success",le="1.5000000000000002e-05"} 0 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeAffinity",status="Success",le="2.2500000000000005e-05"} 0 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeAffinity",status="Success",le="3.375000000000001e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeAffinity",status="Success",le="5.062500000000001e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeAffinity",status="Success",le="7.593750000000002e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeAffinity",status="Success",le="0.00011390625000000003"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeAffinity",status="Success",le="0.00017085937500000006"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeAffinity",status="Success",le="0.0002562890625000001"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeAffinity",status="Success",le="0.00038443359375000017"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeAffinity",status="Success",le="0.0005766503906250003"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeAffinity",status="Success",le="0.0008649755859375004"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeAffinity",status="Success",le="0.0012974633789062506"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeAffinity",status="Success",le="0.0019461950683593758"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeAffinity",status="Success",le="0.0029192926025390638"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeAffinity",status="Success",le="0.004378938903808595"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeAffinity",status="Success",le="0.006568408355712893"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeAffinity",status="Success",le="0.009852612533569338"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeAffinity",status="Success",le="0.014778918800354007"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeAffinity",status="Success",le="0.02216837820053101"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeAffinity",status="Success",le="+Inf"} 1 -scheduler_plugin_execution_duration_seconds_sum{extension_point="Filter",plugin="NodeAffinity",status="Success"} 2.3894e-05 -scheduler_plugin_execution_duration_seconds_count{extension_point="Filter",plugin="NodeAffinity",status="Success"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="1e-05"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="1.5000000000000002e-05"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="2.2500000000000005e-05"} 1 @@ -1158,31 +1199,8 @@ scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plug scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="0.014778918800354007"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="0.02216837820053101"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeName",status="Success",le="+Inf"} 1 -scheduler_plugin_execution_duration_seconds_sum{extension_point="Filter",plugin="NodeName",status="Success"} 3.94e-07 +scheduler_plugin_execution_duration_seconds_sum{extension_point="Filter",plugin="NodeName",status="Success"} 2.56e-07 scheduler_plugin_execution_duration_seconds_count{extension_point="Filter",plugin="NodeName",status="Success"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeResourcesFit",status="Success",le="1e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeResourcesFit",status="Success",le="1.5000000000000002e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeResourcesFit",status="Success",le="2.2500000000000005e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeResourcesFit",status="Success",le="3.375000000000001e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeResourcesFit",status="Success",le="5.062500000000001e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeResourcesFit",status="Success",le="7.593750000000002e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeResourcesFit",status="Success",le="0.00011390625000000003"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeResourcesFit",status="Success",le="0.00017085937500000006"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeResourcesFit",status="Success",le="0.0002562890625000001"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeResourcesFit",status="Success",le="0.00038443359375000017"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeResourcesFit",status="Success",le="0.0005766503906250003"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeResourcesFit",status="Success",le="0.0008649755859375004"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeResourcesFit",status="Success",le="0.0012974633789062506"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeResourcesFit",status="Success",le="0.0019461950683593758"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeResourcesFit",status="Success",le="0.0029192926025390638"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeResourcesFit",status="Success",le="0.004378938903808595"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeResourcesFit",status="Success",le="0.006568408355712893"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeResourcesFit",status="Success",le="0.009852612533569338"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeResourcesFit",status="Success",le="0.014778918800354007"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeResourcesFit",status="Success",le="0.02216837820053101"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeResourcesFit",status="Success",le="+Inf"} 1 -scheduler_plugin_execution_duration_seconds_sum{extension_point="Filter",plugin="NodeResourcesFit",status="Success"} 5.703e-06 -scheduler_plugin_execution_duration_seconds_count{extension_point="Filter",plugin="NodeResourcesFit",status="Success"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="1e-05"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="1.5000000000000002e-05"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="2.2500000000000005e-05"} 1 @@ -1204,54 +1222,77 @@ scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plug scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="0.014778918800354007"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="0.02216837820053101"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="NodeUnschedulable",status="Success",le="+Inf"} 1 -scheduler_plugin_execution_duration_seconds_sum{extension_point="Filter",plugin="NodeUnschedulable",status="Success"} 1.257e-06 +scheduler_plugin_execution_duration_seconds_sum{extension_point="Filter",plugin="NodeUnschedulable",status="Success"} 6.49e-07 scheduler_plugin_execution_duration_seconds_count{extension_point="Filter",plugin="NodeUnschedulable",status="Success"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="Success",le="1e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="Success",le="1.5000000000000002e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="Success",le="2.2500000000000005e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="Success",le="3.375000000000001e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="Success",le="5.062500000000001e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="Success",le="7.593750000000002e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="Success",le="0.00011390625000000003"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="Success",le="0.00017085937500000006"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="Success",le="0.0002562890625000001"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="Success",le="0.00038443359375000017"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="Success",le="0.0005766503906250003"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="Success",le="0.0008649755859375004"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="Success",le="0.0012974633789062506"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="Success",le="0.0019461950683593758"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="Success",le="0.0029192926025390638"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="Success",le="0.004378938903808595"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="Success",le="0.006568408355712893"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="Success",le="0.009852612533569338"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="Success",le="0.014778918800354007"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="Success",le="0.02216837820053101"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="Success",le="+Inf"} 1 -scheduler_plugin_execution_duration_seconds_sum{extension_point="Filter",plugin="TaintToleration",status="Success"} 2.868e-06 -scheduler_plugin_execution_duration_seconds_count{extension_point="Filter",plugin="TaintToleration",status="Success"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreBind",plugin="VolumeBinding",status="Success",le="1e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreBind",plugin="VolumeBinding",status="Success",le="1.5000000000000002e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreBind",plugin="VolumeBinding",status="Success",le="2.2500000000000005e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreBind",plugin="VolumeBinding",status="Success",le="3.375000000000001e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreBind",plugin="VolumeBinding",status="Success",le="5.062500000000001e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreBind",plugin="VolumeBinding",status="Success",le="7.593750000000002e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreBind",plugin="VolumeBinding",status="Success",le="0.00011390625000000003"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreBind",plugin="VolumeBinding",status="Success",le="0.00017085937500000006"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreBind",plugin="VolumeBinding",status="Success",le="0.0002562890625000001"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreBind",plugin="VolumeBinding",status="Success",le="0.00038443359375000017"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreBind",plugin="VolumeBinding",status="Success",le="0.0005766503906250003"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreBind",plugin="VolumeBinding",status="Success",le="0.0008649755859375004"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreBind",plugin="VolumeBinding",status="Success",le="0.0012974633789062506"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreBind",plugin="VolumeBinding",status="Success",le="0.0019461950683593758"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreBind",plugin="VolumeBinding",status="Success",le="0.0029192926025390638"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreBind",plugin="VolumeBinding",status="Success",le="0.004378938903808595"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreBind",plugin="VolumeBinding",status="Success",le="0.006568408355712893"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreBind",plugin="VolumeBinding",status="Success",le="0.009852612533569338"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreBind",plugin="VolumeBinding",status="Success",le="0.014778918800354007"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreBind",plugin="VolumeBinding",status="Success",le="0.02216837820053101"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreBind",plugin="VolumeBinding",status="Success",le="+Inf"} 1 -scheduler_plugin_execution_duration_seconds_sum{extension_point="PreBind",plugin="VolumeBinding",status="Success"} 8.74e-07 -scheduler_plugin_execution_duration_seconds_count{extension_point="PreBind",plugin="VolumeBinding",status="Success"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="1e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="1.5000000000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="2.2500000000000005e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="3.375000000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="5.062500000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="7.593750000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.00011390625000000003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.00017085937500000006"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.0002562890625000001"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.00038443359375000017"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.0005766503906250003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.0008649755859375004"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.0012974633789062506"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.0019461950683593758"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.0029192926025390638"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.004378938903808595"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.006568408355712893"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.009852612533569338"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.014778918800354007"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="0.02216837820053101"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable",le="+Inf"} 1 +scheduler_plugin_execution_duration_seconds_sum{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable"} 7.383e-06 +scheduler_plugin_execution_duration_seconds_count{extension_point="Filter",plugin="TaintToleration",status="UnschedulableAndUnresolvable"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="1e-05"} 0 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="1.5000000000000002e-05"} 0 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="2.2500000000000005e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="3.375000000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="5.062500000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="7.593750000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.00011390625000000003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.00017085937500000006"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.0002562890625000001"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.00038443359375000017"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.0005766503906250003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.0008649755859375004"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.0012974633789062506"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.0019461950683593758"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.0029192926025390638"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.004378938903808595"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.006568408355712893"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.009852612533569338"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.014778918800354007"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="0.02216837820053101"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable",le="+Inf"} 1 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable"} 2.1459e-05 +scheduler_plugin_execution_duration_seconds_count{extension_point="PostFilter",plugin="DefaultPreemption",status="Unschedulable"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="1e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="1.5000000000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="2.2500000000000005e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="3.375000000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="5.062500000000001e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="7.593750000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.00011390625000000003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.00017085937500000006"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.0002562890625000001"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.00038443359375000017"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.0005766503906250003"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.0008649755859375004"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.0012974633789062506"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.0019461950683593758"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.0029192926025390638"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.004378938903808595"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.006568408355712893"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.009852612533569338"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.014778918800354007"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="0.02216837820053101"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success",le="+Inf"} 1 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success"} 1.268e-06 +scheduler_plugin_execution_duration_seconds_count{extension_point="PreEnqueue",plugin="SchedulingGates",status="Success"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="1e-05"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="1.5000000000000002e-05"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="2.2500000000000005e-05"} 1 @@ -1273,7 +1314,7 @@ scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",p scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="0.014778918800354007"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="0.02216837820053101"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip",le="+Inf"} 1 -scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip"} 9.42e-07 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip"} 3.98e-07 scheduler_plugin_execution_duration_seconds_count{extension_point="PreFilter",plugin="AzureDiskLimits",status="Skip"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="1e-05"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="1.5000000000000002e-05"} 1 @@ -1296,7 +1337,7 @@ scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",p scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="0.014778918800354007"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="0.02216837820053101"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="EBSLimits",status="Skip",le="+Inf"} 1 -scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="EBSLimits",status="Skip"} 1.396e-06 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="EBSLimits",status="Skip"} 1.971e-06 scheduler_plugin_execution_duration_seconds_count{extension_point="PreFilter",plugin="EBSLimits",status="Skip"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="1e-05"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="1.5000000000000002e-05"} 1 @@ -1319,7 +1360,7 @@ scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",p scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="0.014778918800354007"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="0.02216837820053101"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip",le="+Inf"} 1 -scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip"} 6.97e-07 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip"} 1.265e-06 scheduler_plugin_execution_duration_seconds_count{extension_point="PreFilter",plugin="GCEPDLimits",status="Skip"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="1e-05"} 0 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="1.5000000000000002e-05"} 0 @@ -1342,11 +1383,11 @@ scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",p scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="0.014778918800354007"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="0.02216837820053101"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip",le="+Inf"} 1 -scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip"} 2.303e-05 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip"} 2.4171e-05 scheduler_plugin_execution_duration_seconds_count{extension_point="PreFilter",plugin="InterPodAffinity",status="Skip"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="1e-05"} 0 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="1.5000000000000002e-05"} 0 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="2.2500000000000005e-05"} 0 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="1e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="1.5000000000000002e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="2.2500000000000005e-05"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="3.375000000000001e-05"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="5.062500000000001e-05"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="7.593750000000002e-05"} 1 @@ -1365,7 +1406,7 @@ scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",p scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="0.014778918800354007"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="0.02216837820053101"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeAffinity",status="Success",le="+Inf"} 1 -scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="NodeAffinity",status="Success"} 2.6412e-05 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="NodeAffinity",status="Success"} 7.303e-06 scheduler_plugin_execution_duration_seconds_count{extension_point="PreFilter",plugin="NodeAffinity",status="Success"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="1e-05"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="1.5000000000000002e-05"} 1 @@ -1388,9 +1429,9 @@ scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",p scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="0.014778918800354007"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="0.02216837820053101"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodePorts",status="Skip",le="+Inf"} 1 -scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="NodePorts",status="Skip"} 1.366e-06 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="NodePorts",status="Skip"} 1.227e-06 scheduler_plugin_execution_duration_seconds_count{extension_point="PreFilter",plugin="NodePorts",status="Skip"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="1e-05"} 0 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="1e-05"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="1.5000000000000002e-05"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="2.2500000000000005e-05"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="3.375000000000001e-05"} 1 @@ -1411,10 +1452,10 @@ scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",p scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="0.014778918800354007"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="0.02216837820053101"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success",le="+Inf"} 1 -scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success"} 1.2539e-05 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success"} 8.455e-06 scheduler_plugin_execution_duration_seconds_count{extension_point="PreFilter",plugin="NodeResourcesFit",status="Success"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="1e-05"} 0 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="1.5000000000000002e-05"} 0 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="1e-05"} 1 +scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="1.5000000000000002e-05"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="2.2500000000000005e-05"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="3.375000000000001e-05"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="5.062500000000001e-05"} 1 @@ -1434,7 +1475,7 @@ scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",p scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="0.014778918800354007"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="0.02216837820053101"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip",le="+Inf"} 1 -scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip"} 2.178e-05 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip"} 5.827e-06 scheduler_plugin_execution_duration_seconds_count{extension_point="PreFilter",plugin="NodeVolumeLimits",status="Skip"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="1e-05"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="1.5000000000000002e-05"} 1 @@ -1457,7 +1498,7 @@ scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",p scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="0.014778918800354007"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="0.02216837820053101"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip",le="+Inf"} 1 -scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip"} 3.188e-06 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip"} 2.198e-06 scheduler_plugin_execution_duration_seconds_count{extension_point="PreFilter",plugin="PodTopologySpread",status="Skip"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="1e-05"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="1.5000000000000002e-05"} 1 @@ -1480,7 +1521,7 @@ scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",p scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="0.014778918800354007"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="0.02216837820053101"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeBinding",status="Skip",le="+Inf"} 1 -scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="VolumeBinding",status="Skip"} 4.612e-06 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="VolumeBinding",status="Skip"} 3.028e-06 scheduler_plugin_execution_duration_seconds_count{extension_point="PreFilter",plugin="VolumeBinding",status="Skip"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="1e-05"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="1.5000000000000002e-05"} 1 @@ -1503,7 +1544,7 @@ scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",p scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="0.014778918800354007"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="0.02216837820053101"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip",le="+Inf"} 1 -scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip"} 3.713e-06 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip"} 3.122e-06 scheduler_plugin_execution_duration_seconds_count{extension_point="PreFilter",plugin="VolumeRestrictions",status="Skip"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="1e-05"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="1.5000000000000002e-05"} 1 @@ -1526,89 +1567,66 @@ scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",p scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="0.014778918800354007"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="0.02216837820053101"} 1 scheduler_plugin_execution_duration_seconds_bucket{extension_point="PreFilter",plugin="VolumeZone",status="Skip",le="+Inf"} 1 -scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="VolumeZone",status="Skip"} 1.696e-06 +scheduler_plugin_execution_duration_seconds_sum{extension_point="PreFilter",plugin="VolumeZone",status="Skip"} 1.37e-06 scheduler_plugin_execution_duration_seconds_count{extension_point="PreFilter",plugin="VolumeZone",status="Skip"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Reserve",plugin="VolumeBinding",status="Success",le="1e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Reserve",plugin="VolumeBinding",status="Success",le="1.5000000000000002e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Reserve",plugin="VolumeBinding",status="Success",le="2.2500000000000005e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Reserve",plugin="VolumeBinding",status="Success",le="3.375000000000001e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Reserve",plugin="VolumeBinding",status="Success",le="5.062500000000001e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Reserve",plugin="VolumeBinding",status="Success",le="7.593750000000002e-05"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Reserve",plugin="VolumeBinding",status="Success",le="0.00011390625000000003"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Reserve",plugin="VolumeBinding",status="Success",le="0.00017085937500000006"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Reserve",plugin="VolumeBinding",status="Success",le="0.0002562890625000001"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Reserve",plugin="VolumeBinding",status="Success",le="0.00038443359375000017"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Reserve",plugin="VolumeBinding",status="Success",le="0.0005766503906250003"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Reserve",plugin="VolumeBinding",status="Success",le="0.0008649755859375004"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Reserve",plugin="VolumeBinding",status="Success",le="0.0012974633789062506"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Reserve",plugin="VolumeBinding",status="Success",le="0.0019461950683593758"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Reserve",plugin="VolumeBinding",status="Success",le="0.0029192926025390638"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Reserve",plugin="VolumeBinding",status="Success",le="0.004378938903808595"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Reserve",plugin="VolumeBinding",status="Success",le="0.006568408355712893"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Reserve",plugin="VolumeBinding",status="Success",le="0.009852612533569338"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Reserve",plugin="VolumeBinding",status="Success",le="0.014778918800354007"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Reserve",plugin="VolumeBinding",status="Success",le="0.02216837820053101"} 1 -scheduler_plugin_execution_duration_seconds_bucket{extension_point="Reserve",plugin="VolumeBinding",status="Success",le="+Inf"} 1 -scheduler_plugin_execution_duration_seconds_sum{extension_point="Reserve",plugin="VolumeBinding",status="Success"} 1.344e-06 -scheduler_plugin_execution_duration_seconds_count{extension_point="Reserve",plugin="VolumeBinding",status="Success"} 1 # HELP scheduler_pod_scheduling_attempts [STABLE] Number of attempts to successfully schedule a pod. # TYPE scheduler_pod_scheduling_attempts histogram -scheduler_pod_scheduling_attempts_bucket{le="1"} 4 -scheduler_pod_scheduling_attempts_bucket{le="2"} 7 -scheduler_pod_scheduling_attempts_bucket{le="4"} 7 -scheduler_pod_scheduling_attempts_bucket{le="8"} 7 -scheduler_pod_scheduling_attempts_bucket{le="16"} 7 -scheduler_pod_scheduling_attempts_bucket{le="+Inf"} 7 -scheduler_pod_scheduling_attempts_sum 10 -scheduler_pod_scheduling_attempts_count 7 -# HELP scheduler_pod_scheduling_duration_seconds [STABLE] E2e latency for a pod being scheduled which may include multiple scheduling attempts. -# TYPE scheduler_pod_scheduling_duration_seconds histogram -scheduler_pod_scheduling_duration_seconds_bucket{attempts="1",le="0.01"} 4 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="1",le="0.02"} 4 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="1",le="0.04"} 4 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="1",le="0.08"} 4 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="1",le="0.16"} 4 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="1",le="0.32"} 4 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="1",le="0.64"} 4 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="1",le="1.28"} 4 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="1",le="2.56"} 4 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="1",le="5.12"} 4 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="1",le="10.24"} 4 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="1",le="20.48"} 4 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="1",le="40.96"} 4 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="1",le="81.92"} 4 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="1",le="163.84"} 4 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="1",le="327.68"} 4 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="1",le="655.36"} 4 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="1",le="1310.72"} 4 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="1",le="2621.44"} 4 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="1",le="5242.88"} 4 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="1",le="+Inf"} 4 -scheduler_pod_scheduling_duration_seconds_sum{attempts="1"} 0.031305079 -scheduler_pod_scheduling_duration_seconds_count{attempts="1"} 4 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="2",le="0.01"} 0 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="2",le="0.02"} 0 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="2",le="0.04"} 0 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="2",le="0.08"} 0 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="2",le="0.16"} 0 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="2",le="0.32"} 0 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="2",le="0.64"} 0 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="2",le="1.28"} 0 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="2",le="2.56"} 0 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="2",le="5.12"} 0 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="2",le="10.24"} 3 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="2",le="20.48"} 3 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="2",le="40.96"} 3 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="2",le="81.92"} 3 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="2",le="163.84"} 3 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="2",le="327.68"} 3 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="2",le="655.36"} 3 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="2",le="1310.72"} 3 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="2",le="2621.44"} 3 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="2",le="5242.88"} 3 -scheduler_pod_scheduling_duration_seconds_bucket{attempts="2",le="+Inf"} 3 -scheduler_pod_scheduling_duration_seconds_sum{attempts="2"} 15.507525172000001 -scheduler_pod_scheduling_duration_seconds_count{attempts="2"} 3 +scheduler_pod_scheduling_attempts_bucket{le="1"} 3 +scheduler_pod_scheduling_attempts_bucket{le="2"} 6 +scheduler_pod_scheduling_attempts_bucket{le="4"} 6 +scheduler_pod_scheduling_attempts_bucket{le="8"} 6 +scheduler_pod_scheduling_attempts_bucket{le="16"} 6 +scheduler_pod_scheduling_attempts_bucket{le="+Inf"} 6 +scheduler_pod_scheduling_attempts_sum 9 +scheduler_pod_scheduling_attempts_count 6 +# HELP scheduler_pod_scheduling_sli_duration_seconds [BETA] E2e latency for a pod being scheduled, from the time the pod enters the scheduling queue an d might involve multiple scheduling attempts. +# TYPE scheduler_pod_scheduling_sli_duration_seconds histogram +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="0.01"} 1 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="0.02"} 1 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="0.04"} 2 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="0.08"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="0.16"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="0.32"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="0.64"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="1.28"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="2.56"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="5.12"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="10.24"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="20.48"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="40.96"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="81.92"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="163.84"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="327.68"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="655.36"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="1310.72"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="2621.44"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="5242.88"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="1",le="+Inf"} 3 +scheduler_pod_scheduling_sli_duration_seconds_sum{attempts="1"} 0.080696909 +scheduler_pod_scheduling_sli_duration_seconds_count{attempts="1"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="0.01"} 0 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="0.02"} 0 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="0.04"} 0 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="0.08"} 0 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="0.16"} 0 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="0.32"} 0 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="0.64"} 0 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="1.28"} 0 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="2.56"} 0 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="5.12"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="10.24"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="20.48"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="40.96"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="81.92"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="163.84"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="327.68"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="655.36"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="1310.72"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="2621.44"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="5242.88"} 3 +scheduler_pod_scheduling_sli_duration_seconds_bucket{attempts="2",le="+Inf"} 3 +scheduler_pod_scheduling_sli_duration_seconds_sum{attempts="2"} 14.839669345 +scheduler_pod_scheduling_sli_duration_seconds_count{attempts="2"} 3 # HELP scheduler_preemption_attempts_total [STABLE] Total preemption attempts in the cluster till now # TYPE scheduler_preemption_attempts_total counter scheduler_preemption_attempts_total 3 @@ -1627,57 +1645,57 @@ scheduler_preemption_victims_count 0 # HELP scheduler_queue_incoming_pods_total [STABLE] Number of pods added to scheduling queues by event and queue type. # TYPE scheduler_queue_incoming_pods_total counter scheduler_queue_incoming_pods_total{event="NodeTaintChange",queue="active"} 3 -scheduler_queue_incoming_pods_total{event="PodAdd",queue="active"} 7 +scheduler_queue_incoming_pods_total{event="PodAdd",queue="active"} 6 scheduler_queue_incoming_pods_total{event="ScheduleAttemptFailure",queue="unschedulable"} 3 # HELP scheduler_schedule_attempts_total [STABLE] Number of attempts to schedule pods, by the result. 'unschedulable' means a pod could not be scheduled, while 'error' means an internal scheduler problem. # TYPE scheduler_schedule_attempts_total counter -scheduler_schedule_attempts_total{profile="default-scheduler",result="scheduled"} 7 +scheduler_schedule_attempts_total{profile="default-scheduler",result="scheduled"} 6 scheduler_schedule_attempts_total{profile="default-scheduler",result="unschedulable"} 3 # HELP scheduler_scheduler_cache_size [ALPHA] Number of nodes, pods, and assumed (bound) pods in the scheduler cache. # TYPE scheduler_scheduler_cache_size gauge scheduler_scheduler_cache_size{type="assumed_pods"} 0 scheduler_scheduler_cache_size{type="nodes"} 1 -scheduler_scheduler_cache_size{type="pods"} 11 +scheduler_scheduler_cache_size{type="pods"} 10 # HELP scheduler_scheduling_algorithm_duration_seconds [ALPHA] Scheduling algorithm latency in seconds # TYPE scheduler_scheduling_algorithm_duration_seconds histogram -scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.001"} 7 -scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.002"} 7 -scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.004"} 7 -scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.008"} 7 -scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.016"} 7 -scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.032"} 7 -scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.064"} 7 -scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.128"} 7 -scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.256"} 7 -scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.512"} 7 -scheduler_scheduling_algorithm_duration_seconds_bucket{le="1.024"} 7 -scheduler_scheduling_algorithm_duration_seconds_bucket{le="2.048"} 7 -scheduler_scheduling_algorithm_duration_seconds_bucket{le="4.096"} 7 -scheduler_scheduling_algorithm_duration_seconds_bucket{le="8.192"} 7 -scheduler_scheduling_algorithm_duration_seconds_bucket{le="16.384"} 7 -scheduler_scheduling_algorithm_duration_seconds_bucket{le="+Inf"} 7 -scheduler_scheduling_algorithm_duration_seconds_sum 0.0012906859999999999 -scheduler_scheduling_algorithm_duration_seconds_count 7 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.001"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.002"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.004"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.008"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.016"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.032"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.064"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.128"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.256"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="0.512"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="1.024"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="2.048"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="4.096"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="8.192"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="16.384"} 6 +scheduler_scheduling_algorithm_duration_seconds_bucket{le="+Inf"} 6 +scheduler_scheduling_algorithm_duration_seconds_sum 0.00114044 +scheduler_scheduling_algorithm_duration_seconds_count 6 # HELP scheduler_scheduling_attempt_duration_seconds [STABLE] Scheduling attempt latency in seconds (scheduling algorithm + binding) # TYPE scheduler_scheduling_attempt_duration_seconds histogram scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.001"} 0 scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.002"} 0 scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.004"} 0 -scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.008"} 3 -scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.016"} 7 -scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.032"} 7 -scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.064"} 7 -scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.128"} 7 -scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.256"} 7 -scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.512"} 7 -scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="1.024"} 7 -scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="2.048"} 7 -scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="4.096"} 7 -scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="8.192"} 7 -scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="16.384"} 7 -scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="+Inf"} 7 -scheduler_scheduling_attempt_duration_seconds_sum{profile="default-scheduler",result="scheduled"} 0.067800619 -scheduler_scheduling_attempt_duration_seconds_count{profile="default-scheduler",result="scheduled"} 7 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.008"} 0 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.016"} 4 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.032"} 5 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.064"} 6 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.128"} 6 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.256"} 6 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="0.512"} 6 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="1.024"} 6 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="2.048"} 6 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="4.096"} 6 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="8.192"} 6 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="16.384"} 6 +scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="scheduled",le="+Inf"} 6 +scheduler_scheduling_attempt_duration_seconds_sum{profile="default-scheduler",result="scheduled"} 0.116016227 +scheduler_scheduling_attempt_duration_seconds_count{profile="default-scheduler",result="scheduled"} 6 scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="unschedulable",le="0.001"} 3 scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="unschedulable",le="0.002"} 3 scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="unschedulable",le="0.004"} 3 @@ -1694,15 +1712,15 @@ scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler" scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="unschedulable",le="8.192"} 3 scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="unschedulable",le="16.384"} 3 scheduler_scheduling_attempt_duration_seconds_bucket{profile="default-scheduler",result="unschedulable",le="+Inf"} 3 -scheduler_scheduling_attempt_duration_seconds_sum{profile="default-scheduler",result="unschedulable"} 0.000606865 +scheduler_scheduling_attempt_duration_seconds_sum{profile="default-scheduler",result="unschedulable"} 0.0009928839999999999 scheduler_scheduling_attempt_duration_seconds_count{profile="default-scheduler",result="unschedulable"} 3 # HELP scheduler_unschedulable_pods [ALPHA] The number of unschedulable pods broken down by plugin name. A pod will increment the gauge for all plugins that caused it to not schedule and so this metric have meaning only when broken down by plugin. # TYPE scheduler_unschedulable_pods gauge scheduler_unschedulable_pods{plugin="TaintToleration",profile="default-scheduler"} 0 # HELP workqueue_adds_total [ALPHA] Total number of adds handled by workqueue # TYPE workqueue_adds_total counter -workqueue_adds_total{name="DynamicConfigMapCABundle-client-ca"} 8 -workqueue_adds_total{name="DynamicServingCertificateController"} 8 +workqueue_adds_total{name="DynamicConfigMapCABundle-client-ca"} 42 +workqueue_adds_total{name="DynamicServingCertificateController"} 42 workqueue_adds_total{name="RequestHeaderAuthRequestController"} 0 # HELP workqueue_depth [ALPHA] Current depth of workqueue # TYPE workqueue_depth gauge @@ -1720,28 +1738,28 @@ workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="1e-06"} 0 workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="9.999999999999999e-06"} 0 -workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="9.999999999999999e-05"} 6 -workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="0.001"} 7 -workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="0.01"} 7 -workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="0.1"} 8 -workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="1"} 8 -workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="10"} 8 -workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="+Inf"} 8 -workqueue_queue_duration_seconds_sum{name="DynamicConfigMapCABundle-client-ca"} 0.033180206000000004 -workqueue_queue_duration_seconds_count{name="DynamicConfigMapCABundle-client-ca"} 8 +workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="9.999999999999999e-05"} 39 +workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="0.001"} 41 +workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="0.01"} 41 +workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="0.1"} 41 +workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="1"} 42 +workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="10"} 42 +workqueue_queue_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="+Inf"} 42 +workqueue_queue_duration_seconds_sum{name="DynamicConfigMapCABundle-client-ca"} 0.101941402 +workqueue_queue_duration_seconds_count{name="DynamicConfigMapCABundle-client-ca"} 42 workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="1e-06"} 0 workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="9.999999999999999e-06"} 0 -workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="9.999999999999999e-05"} 6 -workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.001"} 8 -workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.01"} 8 -workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.1"} 8 -workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="1"} 8 -workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="10"} 8 -workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="+Inf"} 8 -workqueue_queue_duration_seconds_sum{name="DynamicServingCertificateController"} 0.001107015 -workqueue_queue_duration_seconds_count{name="DynamicServingCertificateController"} 8 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="9.999999999999999e-05"} 41 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.001"} 42 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.01"} 42 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.1"} 42 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="1"} 42 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="10"} 42 +workqueue_queue_duration_seconds_bucket{name="DynamicServingCertificateController",le="+Inf"} 42 +workqueue_queue_duration_seconds_sum{name="DynamicServingCertificateController"} 0.001841972 +workqueue_queue_duration_seconds_count{name="DynamicServingCertificateController"} 42 workqueue_queue_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="1e-08"} 0 workqueue_queue_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="1e-07"} 0 workqueue_queue_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="1e-06"} 0 @@ -1771,28 +1789,28 @@ workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca" workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="1e-06"} 0 workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="9.999999999999999e-06"} 1 -workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="9.999999999999999e-05"} 6 -workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="0.001"} 8 -workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="0.01"} 8 -workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="0.1"} 8 -workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="1"} 8 -workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="10"} 8 -workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="+Inf"} 8 -workqueue_work_duration_seconds_sum{name="DynamicConfigMapCABundle-client-ca"} 0.00057137 -workqueue_work_duration_seconds_count{name="DynamicConfigMapCABundle-client-ca"} 8 +workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="9.999999999999999e-05"} 41 +workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="0.001"} 42 +workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="0.01"} 42 +workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="0.1"} 42 +workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="1"} 42 +workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="10"} 42 +workqueue_work_duration_seconds_bucket{name="DynamicConfigMapCABundle-client-ca",le="+Inf"} 42 +workqueue_work_duration_seconds_sum{name="DynamicConfigMapCABundle-client-ca"} 0.001374066 +workqueue_work_duration_seconds_count{name="DynamicConfigMapCABundle-client-ca"} 42 workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="1e-06"} 0 workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="9.999999999999999e-06"} 0 -workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="9.999999999999999e-05"} 6 -workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.001"} 7 -workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.01"} 8 -workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.1"} 8 -workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="1"} 8 -workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="10"} 8 -workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="+Inf"} 8 -workqueue_work_duration_seconds_sum{name="DynamicServingCertificateController"} 0.0020430379999999996 -workqueue_work_duration_seconds_count{name="DynamicServingCertificateController"} 8 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="9.999999999999999e-05"} 39 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.001"} 41 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.01"} 42 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="0.1"} 42 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="1"} 42 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="10"} 42 +workqueue_work_duration_seconds_bucket{name="DynamicServingCertificateController",le="+Inf"} 42 +workqueue_work_duration_seconds_sum{name="DynamicServingCertificateController"} 0.0027605059999999994 +workqueue_work_duration_seconds_count{name="DynamicServingCertificateController"} 42 workqueue_work_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="1e-08"} 0 workqueue_work_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="1e-07"} 0 workqueue_work_duration_seconds_bucket{name="RequestHeaderAuthRequestController",le="1e-06"} 0 diff --git a/metricbeat/module/kubernetes/scheduler/_meta/testdata/docs.plain-expected.json b/metricbeat/module/kubernetes/scheduler/_meta/testdata/docs.plain-expected.json index 7f41e8e2c2a8..ea31546dcb08 100644 --- a/metricbeat/module/kubernetes/scheduler/_meta/testdata/docs.plain-expected.json +++ b/metricbeat/module/kubernetes/scheduler/_meta/testdata/docs.plain-expected.json @@ -44,10 +44,14 @@ }, "kubernetes": { "scheduler": { - "leader": { - "is_master": true + "client": { + "request": { + "count": 1305 + } }, - "name": "kube-scheduler" + "code": "200", + "host": "172.18.0.2:6443", + "method": "GET" } }, "metricset": { @@ -67,24 +71,10 @@ }, "kubernetes": { "scheduler": { - "name": "DynamicConfigMapCABundle-client-ca", - "workqueue": { - "adds": { - "count": 8 - }, - "depth": { - "count": 0 - }, - "longestrunning": { - "sec": 0 - }, - "retries": { - "count": 0 - }, - "unfinished": { - "sec": 0 - } - } + "leader": { + "is_master": true + }, + "name": "kube-scheduler" } }, "metricset": { @@ -104,48 +94,11 @@ }, "kubernetes": { "scheduler": { - "process": { - "cpu": { - "sec": 3 - }, - "fds": { - "max": { - "count": 1048576 - }, - "open": { - "count": 10 - } - }, - "memory": { - "resident": { - "bytes": 45465600 - }, - "virtual": { - "bytes": 787210240 - } - }, - "started": { - "sec": 1698752384.42 - } - }, + "queue": "unschedulable", "scheduling": { - "preemption": { - "attempts": { - "count": 3 - }, - "victims": { - "bucket": { - "+Inf": 0, - "1": 0, - "16": 0, - "2": 0, - "32": 0, - "4": 0, - "64": 0, - "8": 0 - }, - "count": 0, - "sum": 0 + "pending": { + "pods": { + "count": 0 } } } @@ -168,11 +121,33 @@ }, "kubernetes": { "scheduler": { - "queue": "unschedulable", + "profile": "default-scheduler", + "result": "unschedulable", "scheduling": { - "pending": { - "pods": { - "count": 0 + "attempts": { + "duration": { + "us": { + "bucket": { + "+Inf": 3, + "1000": 3, + "1024000": 3, + "128000": 3, + "16000": 3, + "16384000": 3, + "2000": 3, + "2048000": 3, + "256000": 3, + "32000": 3, + "4000": 3, + "4096000": 3, + "512000": 3, + "64000": 3, + "8000": 3, + "8192000": 3 + }, + "count": 3, + "sum": 992.8839999999999 + } } } } @@ -197,7 +172,7 @@ "scheduler": { "client": { "request": { - "count": 21 + "count": 19 } }, "code": "201", @@ -222,14 +197,14 @@ }, "kubernetes": { "scheduler": { - "client": { - "request": { - "count": 189 + "queue": "backoff", + "scheduling": { + "pending": { + "pods": { + "count": 0 + } } - }, - "code": "200", - "host": "172.18.0.2:6443", - "method": "PUT" + } } }, "metricset": { @@ -259,7 +234,7 @@ "1000000": 3, "15000000": 3, "2000000": 3, - "25000": 3, + "25000": 0, "250000": 3, "30000000": 3, "4000000": 3, @@ -269,7 +244,7 @@ "8000000": 3 }, "count": 3, - "sum": 45567.53399999999 + "sum": 87315.227 } }, "size": { @@ -289,7 +264,7 @@ "65536": 3 }, "count": 3, - "sum": 1029 + "sum": 1026 } } }, @@ -311,7 +286,7 @@ "65536": 3 }, "count": 3, - "sum": 10810 + "sum": 10807 } } } @@ -329,82 +304,6 @@ "type": "kubernetes" } }, - { - "event": { - "dataset": "kubernetes.scheduler", - "duration": 115000, - "module": "kubernetes" - }, - "kubernetes": { - "scheduler": { - "profile": "default-scheduler", - "result": "scheduled", - "scheduling": { - "attempts": { - "duration": { - "us": { - "bucket": { - "+Inf": 7, - "1000": 0, - "1024000": 7, - "128000": 7, - "16000": 7, - "16384000": 7, - "2000": 0, - "2048000": 7, - "256000": 7, - "32000": 7, - "4000": 0, - "4096000": 7, - "512000": 7, - "64000": 7, - "8000": 3, - "8192000": 7 - }, - "count": 7, - "sum": 67800.619 - } - } - } - } - } - }, - "metricset": { - "name": "scheduler", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "kubernetes" - } - }, - { - "event": { - "dataset": "kubernetes.scheduler", - "duration": 115000, - "module": "kubernetes" - }, - "kubernetes": { - "scheduler": { - "queue": "backoff", - "scheduling": { - "pending": { - "pods": { - "count": 0 - } - } - } - } - }, - "metricset": { - "name": "scheduler", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "kubernetes" - } - }, { "event": { "dataset": "kubernetes.scheduler", @@ -418,41 +317,41 @@ "duration": { "us": { "bucket": { - "+Inf": 237, - "100000": 236, - "1000000": 236, - "15000000": 237, - "2000000": 236, - "25000": 236, - "250000": 236, - "30000000": 237, - "4000000": 237, - "5000": 226, - "500000": 236, - "60000000": 237, - "8000000": 237 + "+Inf": 1250, + "100000": 1248, + "1000000": 1249, + "15000000": 1250, + "2000000": 1249, + "25000": 1247, + "250000": 1249, + "30000000": 1250, + "4000000": 1250, + "5000": 1036, + "500000": 1249, + "60000000": 1250, + "8000000": 1250 }, - "count": 237, - "sum": 4451666.564999999 + "count": 1250, + "sum": 8867916.397999985 } }, "size": { "bytes": { "bucket": { - "+Inf": 237, - "1024": 237, - "1048576": 237, - "16384": 237, - "16777216": 237, - "256": 237, - "262144": 237, - "4096": 237, - "4194304": 237, - "512": 237, - "64": 237, - "65536": 237 + "+Inf": 1250, + "1024": 1250, + "1048576": 1250, + "16384": 1250, + "16777216": 1250, + "256": 1250, + "262144": 1250, + "4096": 1250, + "4194304": 1250, + "512": 1250, + "64": 1250, + "65536": 1250 }, - "count": 237, + "count": 1250, "sum": 0 } } @@ -461,21 +360,21 @@ "size": { "bytes": { "bucket": { - "+Inf": 237, - "1024": 233, - "1048576": 237, - "16384": 237, - "16777216": 237, + "+Inf": 1250, + "1024": 1246, + "1048576": 1250, + "16384": 1250, + "16777216": 1250, "256": 35, - "262144": 237, - "4096": 236, - "4194304": 237, - "512": 231, + "262144": 1250, + "4096": 1249, + "4194304": 1250, + "512": 1244, "64": 9, - "65536": 237 + "65536": 1250 }, - "count": 237, - "sum": 103711 + "count": 1250, + "sum": 537154 } } } @@ -520,6 +419,43 @@ "type": "kubernetes" } }, + { + "event": { + "dataset": "kubernetes.scheduler", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "scheduler": { + "name": "DynamicConfigMapCABundle-client-ca", + "workqueue": { + "adds": { + "count": 42 + }, + "depth": { + "count": 0 + }, + "longestrunning": { + "sec": 0 + }, + "retries": { + "count": 0 + }, + "unfinished": { + "sec": 0 + } + } + } + }, + "metricset": { + "name": "scheduler", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, { "event": { "dataset": "kubernetes.scheduler", @@ -533,42 +469,42 @@ "duration": { "us": { "bucket": { - "+Inf": 189, - "100000": 186, - "1000000": 189, - "15000000": 189, - "2000000": 189, - "25000": 182, - "250000": 189, - "30000000": 189, - "4000000": 189, - "5000": 27, - "500000": 189, - "60000000": 189, - "8000000": 189 + "+Inf": 1202, + "100000": 1201, + "1000000": 1202, + "15000000": 1202, + "2000000": 1202, + "25000": 1201, + "250000": 1202, + "30000000": 1202, + "4000000": 1202, + "5000": 6, + "500000": 1202, + "60000000": 1202, + "8000000": 1202 }, - "count": 189, - "sum": 2021037.9790000007 + "count": 1202, + "sum": 14273085.986000013 } }, "size": { "bytes": { "bucket": { - "+Inf": 189, - "1024": 189, - "1048576": 189, - "16384": 189, - "16777216": 189, + "+Inf": 1202, + "1024": 1202, + "1048576": 1202, + "16384": 1202, + "16777216": 1202, "256": 0, - "262144": 189, - "4096": 189, - "4194304": 189, - "512": 189, + "262144": 1202, + "4096": 1202, + "4194304": 1202, + "512": 1202, "64": 0, - "65536": 189 + "65536": 1202 }, - "count": 189, - "sum": 80519 + "count": 1202, + "sum": 512715 } } }, @@ -576,21 +512,21 @@ "size": { "bytes": { "bucket": { - "+Inf": 189, - "1024": 189, - "1048576": 189, - "16384": 189, - "16777216": 189, + "+Inf": 1202, + "1024": 1202, + "1048576": 1202, + "16384": 1202, + "16777216": 1202, "256": 0, - "262144": 189, - "4096": 189, - "4194304": 189, - "512": 189, + "262144": 1202, + "4096": 1202, + "4194304": 1202, + "512": 1202, "64": 0, - "65536": 189 + "65536": 1202 }, - "count": 189, - "sum": 80520 + "count": 1202, + "sum": 512716 } } } @@ -646,7 +582,7 @@ "name": "DynamicServingCertificateController", "workqueue": { "adds": { - "count": 8 + "count": 42 }, "depth": { "count": 0 @@ -680,75 +616,36 @@ }, "kubernetes": { "scheduler": { - "client": { - "request": { + "profile": "default-scheduler", + "result": "scheduled", + "scheduling": { + "attempts": { "duration": { "us": { "bucket": { - "+Inf": 21, - "100000": 21, - "1000000": 21, - "15000000": 21, - "2000000": 21, - "25000": 21, - "250000": 21, - "30000000": 21, - "4000000": 21, - "5000": 5, - "500000": 21, - "60000000": 21, - "8000000": 21 - }, - "count": 21, - "sum": 146836.43499999997 - } - }, - "size": { - "bytes": { - "bucket": { - "+Inf": 21, - "1024": 20, - "1048576": 21, - "16384": 21, - "16777216": 21, - "256": 8, - "262144": 21, - "4096": 21, - "4194304": 21, - "512": 19, - "64": 0, - "65536": 21 - }, - "count": 21, - "sum": 7220 - } - } - }, - "response": { - "size": { - "bytes": { - "bucket": { - "+Inf": 21, - "1024": 19, - "1048576": 21, - "16384": 21, - "16777216": 21, - "256": 7, - "262144": 21, - "4096": 21, - "4194304": 21, - "512": 8, - "64": 7, - "65536": 21 + "+Inf": 6, + "1000": 0, + "1024000": 6, + "128000": 6, + "16000": 4, + "16384000": 6, + "2000": 0, + "2048000": 6, + "256000": 6, + "32000": 5, + "4000": 0, + "4096000": 6, + "512000": 6, + "64000": 6, + "8000": 0, + "8192000": 6 }, - "count": 21, - "sum": 11028 + "count": 6, + "sum": 116016.227 } } } - }, - "host": "172.18.0.2:6443", - "verb": "POST" + } } }, "metricset": { @@ -768,33 +665,48 @@ }, "kubernetes": { "scheduler": { - "profile": "default-scheduler", - "result": "unschedulable", + "process": { + "cpu": { + "sec": 14 + }, + "fds": { + "max": { + "count": 1048576 + }, + "open": { + "count": 10 + } + }, + "memory": { + "resident": { + "bytes": 63909888 + }, + "virtual": { + "bytes": 1316384768 + } + }, + "started": { + "sec": 1704894767.07 + } + }, "scheduling": { - "attempts": { - "duration": { - "us": { - "bucket": { - "+Inf": 3, - "1000": 3, - "1024000": 3, - "128000": 3, - "16000": 3, - "16384000": 3, - "2000": 3, - "2048000": 3, - "256000": 3, - "32000": 3, - "4000": 3, - "4096000": 3, - "512000": 3, - "64000": 3, - "8000": 3, - "8192000": 3 - }, - "count": 3, - "sum": 606.865 - } + "preemption": { + "attempts": { + "count": 3 + }, + "victims": { + "bucket": { + "+Inf": 0, + "1": 0, + "16": 0, + "2": 0, + "32": 0, + "4": 0, + "64": 0, + "8": 0 + }, + "count": 0, + "sum": 0 } } } @@ -819,12 +731,73 @@ "scheduler": { "client": { "request": { - "count": 32 + "duration": { + "us": { + "bucket": { + "+Inf": 19, + "100000": 19, + "1000000": 19, + "15000000": 19, + "2000000": 19, + "25000": 17, + "250000": 19, + "30000000": 19, + "4000000": 19, + "5000": 2, + "500000": 19, + "60000000": 19, + "8000000": 19 + }, + "count": 19, + "sum": 249623.20100000003 + } + }, + "size": { + "bytes": { + "bucket": { + "+Inf": 19, + "1024": 18, + "1048576": 19, + "16384": 19, + "16777216": 19, + "256": 7, + "262144": 19, + "4096": 19, + "4194304": 19, + "512": 17, + "64": 0, + "65536": 19 + }, + "count": 19, + "sum": 6726 + } + } + }, + "response": { + "size": { + "bytes": { + "bucket": { + "+Inf": 19, + "1024": 17, + "1048576": 19, + "16384": 19, + "16777216": 19, + "256": 6, + "262144": 19, + "4096": 19, + "4194304": 19, + "512": 7, + "64": 6, + "65536": 19 + }, + "count": 19, + "sum": 10350 + } + } } }, - "code": "403", "host": "172.18.0.2:6443", - "method": "GET" + "verb": "POST" } }, "metricset": { @@ -846,10 +819,10 @@ "scheduler": { "client": { "request": { - "count": 224 + "count": 32 } }, - "code": "200", + "code": "403", "host": "172.18.0.2:6443", "method": "GET" } @@ -916,5 +889,32 @@ "address": "127.0.0.1:55555", "type": "kubernetes" } + }, + { + "event": { + "dataset": "kubernetes.scheduler", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "scheduler": { + "client": { + "request": { + "count": 1202 + } + }, + "code": "200", + "host": "172.18.0.2:6443", + "method": "PUT" + } + }, + "metricset": { + "name": "scheduler", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } } ] \ No newline at end of file diff --git a/metricbeat/module/kubernetes/scheduler/scheduler_test.go b/metricbeat/module/kubernetes/scheduler/scheduler_test.go index da60e1b3d1f8..97c8d003456f 100644 --- a/metricbeat/module/kubernetes/scheduler/scheduler_test.go +++ b/metricbeat/module/kubernetes/scheduler/scheduler_test.go @@ -33,6 +33,7 @@ var files = []string{ "./_meta/test/metrics.1.26", "./_meta/test/metrics.1.27", "./_meta/test/metrics.1.28", + "./_meta/test/metrics.1.29", } func TestEventMapping(t *testing.T) { From 0c387c54828a38ca83afae974c327d69d207c6c7 Mon Sep 17 00:00:00 2001 From: Chris Berkhout Date: Fri, 12 Jan 2024 10:58:33 +0100 Subject: [PATCH 041/129] x-pack/filebeat/input/httpjson: Fix basic auth nil pointer deref (#37591) For chained requests, setting user and password values for basic authentication via a pointer to a requestFactory struct was done before the struct was initialized, resulting in a nil pointer dereference and runtime panic. Moving it to after the initialization resolved the issue. --- CHANGELOG.next.asciidoc | 1 + x-pack/filebeat/input/httpjson/request.go | 17 +++-- .../filebeat/input/httpjson/request_test.go | 70 +++++++++++++++++++ 3 files changed, 79 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index f36c7d7b90e8..f805fd49700e 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -61,6 +61,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Filebeat* +- Fix nil pointer dereference in the httpjson input {pull}37591[37591] - [Gcs Input] - Added missing locks for safe concurrency {pull}34914[34914] - Fix the ignore_inactive option being ignored in Filebeat's filestream input {pull}34770[34770] - Fix TestMultiEventForEOFRetryHandlerInput unit test of CometD input {pull}34903[34903] diff --git a/x-pack/filebeat/input/httpjson/request.go b/x-pack/filebeat/input/httpjson/request.go index 248918e81166..b161363dbe77 100644 --- a/x-pack/filebeat/input/httpjson/request.go +++ b/x-pack/filebeat/input/httpjson/request.go @@ -316,13 +316,8 @@ func newRequestFactory(ctx context.Context, config config, log *logp.Logger, met if err != nil { return nil, fmt.Errorf("failed in creating chain http client with error: %w", err) } - if ch.Step.Auth != nil && ch.Step.Auth.Basic.isEnabled() { - rf.user = ch.Step.Auth.Basic.User - rf.password = ch.Step.Auth.Basic.Password - } responseProcessor := newChainResponseProcessor(ch, client, xmlDetails, metrics, log) - rf = &requestFactory{ url: *ch.Step.Request.URL.URL, method: ch.Step.Request.Method, @@ -336,6 +331,10 @@ func newRequestFactory(ctx context.Context, config config, log *logp.Logger, met chainClient: client, chainResponseProcessor: responseProcessor, } + if ch.Step.Auth != nil && ch.Step.Auth.Basic.isEnabled() { + rf.user = ch.Step.Auth.Basic.User + rf.password = ch.Step.Auth.Basic.Password + } } else if ch.While != nil { ts, _ := newBasicTransformsFromConfig(registeredTransforms, ch.While.Request.Transforms, requestNamespace, log) policy := newHTTPPolicy(evaluateResponse, ch.While.Until, log) @@ -344,10 +343,6 @@ func newRequestFactory(ctx context.Context, config config, log *logp.Logger, met if err != nil { return nil, fmt.Errorf("failed in creating chain http client with error: %w", err) } - if ch.While.Auth != nil && ch.While.Auth.Basic.isEnabled() { - rf.user = ch.While.Auth.Basic.User - rf.password = ch.While.Auth.Basic.Password - } responseProcessor := newChainResponseProcessor(ch, client, xmlDetails, metrics, log) rf = &requestFactory{ @@ -364,6 +359,10 @@ func newRequestFactory(ctx context.Context, config config, log *logp.Logger, met chainClient: client, chainResponseProcessor: responseProcessor, } + if ch.While.Auth != nil && ch.While.Auth.Basic.isEnabled() { + rf.user = ch.While.Auth.Basic.User + rf.password = ch.While.Auth.Basic.Password + } } rfs = append(rfs, rf) } diff --git a/x-pack/filebeat/input/httpjson/request_test.go b/x-pack/filebeat/input/httpjson/request_test.go index 05fefebef5c9..2bd3aab675a4 100644 --- a/x-pack/filebeat/input/httpjson/request_test.go +++ b/x-pack/filebeat/input/httpjson/request_test.go @@ -135,6 +135,76 @@ func TestCtxAfterDoRequest(t *testing.T) { ) } +func Test_newRequestFactory_UsesBasicAuthInChainedRequests(t *testing.T) { + ctx := context.Background() + log := logp.NewLogger("") + cfg := defaultChainConfig() + + url, _ := url.Parse("https://example.com") + cfg.Request.URL = &urlConfig{ + URL: url, + } + + enabled := true + user := "basicuser" + password := "basicuser" + cfg.Auth = &authConfig{ + Basic: &basicAuthConfig{ + Enabled: &enabled, + User: user, + Password: password, + }, + } + + step := cfg.Chain[0].Step + step.Auth = cfg.Auth + + while := cfg.Chain[0].While + while.Auth = cfg.Auth + + type args struct { + cfg config + step *stepConfig + while *whileConfig + } + tests := []struct { + name string + args args + }{ + { + name: "Step", + args: args{ + cfg: cfg, + step: step, + while: nil, + }, + }, + { + name: "While", + args: args{ + cfg: cfg, + step: nil, + while: while, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + tt.args.cfg.Chain[0].Step = tt.args.step + tt.args.cfg.Chain[0].While = tt.args.while + requestFactories, err := newRequestFactory(ctx, tt.args.cfg, log, nil, nil) + assert.NoError(t, err) + assert.NotNil(t, requestFactories) + for _, rf := range requestFactories { + assert.Equal(t, rf.user, user) + assert.Equal(t, rf.password, password) + } + + }) + } +} + func Test_newChainHTTPClient(t *testing.T) { cfg := defaultChainConfig() cfg.Request.URL = &urlConfig{URL: &url.URL{}} From 88b587c8ff0b257d376e6155421abdb765028074 Mon Sep 17 00:00:00 2001 From: kaiyan-sheng Date: Fri, 12 Jan 2024 07:10:00 -0700 Subject: [PATCH 042/129] Disable AWS mage build test (#37621) --- x-pack/metricbeat/Jenkinsfile.yml | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/x-pack/metricbeat/Jenkinsfile.yml b/x-pack/metricbeat/Jenkinsfile.yml index 43b872ef2429..f9fbd76a6ce5 100644 --- a/x-pack/metricbeat/Jenkinsfile.yml +++ b/x-pack/metricbeat/Jenkinsfile.yml @@ -24,19 +24,20 @@ stages: mage: "mage pythonIntegTest" withModule: true stage: mandatory - cloud: - cloud: "mage build test" - withModule: true ## run the ITs only if the changeset affects a specific module. - dirs: ## run the cloud tests for the given modules. - - "x-pack/metricbeat/module/aws" - when: ## Override the top-level when. - parameters: - - "awsCloudTests" - comments: - - "/test x-pack/metricbeat for aws cloud" - labels: - - "aws" - stage: extended + # Skip test until fixed https://github.com/elastic/beats/issues/37498 + #cloud: + # cloud: "mage build test" + # withModule: true ## run the ITs only if the changeset affects a specific module. + # dirs: ## run the cloud tests for the given modules. + # - "x-pack/metricbeat/module/aws" + # when: ## Override the top-level when. + # parameters: + # - "awsCloudTests" + # comments: + # - "/test x-pack/metricbeat for aws cloud" + # labels: + # - "aws" + # stage: extended # Skip test until fixed https://github.com/elastic/beats/issues/36425 #cloudAWS: # cloud: "mage build test goIntegTest" From 410f41682537173050d7fd8c684dde163153481e Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Sun, 14 Jan 2024 13:48:08 -0500 Subject: [PATCH 043/129] chore: Update snapshot.yml (#37626) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Made with ❤️️ by updatecli Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 5b4d6f9b20f4..9798bd2a05cb 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-u5089rwg-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-9yo2ylny-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.13.0-u5089rwg-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.13.0-9yo2ylny-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.13.0-u5089rwg-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.13.0-9yo2ylny-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From a9e07e45ec28f6e36e95c2f0d78d5c96410c62be Mon Sep 17 00:00:00 2001 From: Dan Kortschak <90160302+efd6@users.noreply.github.com> Date: Mon, 15 Jan 2024 08:08:44 +1030 Subject: [PATCH 044/129] x-pack/filebeat/input/httpjson: provide an approach to use complete URL replacements (#37486) --- CHANGELOG.next.asciidoc | 1 + .../docs/inputs/input-httpjson.asciidoc | 52 +++++++++++++++++++ x-pack/filebeat/input/httpjson/input_test.go | 38 ++++++++++++++ x-pack/filebeat/input/httpjson/request.go | 19 ++++++- 4 files changed, 109 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index f805fd49700e..2abeafaff675 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -205,6 +205,7 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d - Suppress and log max HTTP request retry errors in CEL input. {pull}37160[37160] - Prevent CEL input from re-entering the eval loop when an evaluation failed. {pull}37161[37161] - Update CEL extensions library to v1.7.0. {pull}37172[37172] +- Add support for complete URL replacement in HTTPJSON chain steps. {pull}37486[37486] *Auditbeat* diff --git a/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc b/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc index 33154579a60c..410edf9f9485 100644 --- a/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc @@ -1226,6 +1226,30 @@ request_url using file_name as 'file_2': \https://example.com/services/data/v1.0 + Collect and make events from response in any format supported by httpjson for all calls. ++ +Note that since `request.url` must be a valid URL, if an API returns complete URLs in place of an identifier as in the example above, it would not be possible to use the JSON Path syntax. To achieve the desired result in this case an opaque URI syntax can be used. An opaque URI has an arbitrary scheme and opaque text separated by a colon. When the replacement is done, the scheme and colon are stripped from the URI prior to the replacement and the remaining opaque text is used as the replacement target. In the following example, the scheme is "placeholder". + +["source","yaml",subs="attributes"] +---- +filebeat.inputs: +- type: httpjson + enabled: true + # first call + request.url: https://example.com/services/data/v1.0/records + interval: 1h + chain: + # second call + - step: + request.url: placeholder:$.records[:] + request.method: GET + replace: $.records[:] + # third call + - step: + request.url: placeholder:$.file_name + request.method: GET + replace: $.file_name +---- + + [[chain-step-replace_with]] [float] @@ -1478,6 +1502,34 @@ response_json using id as '2': + Collect and make events from response in any format supported by httpjson for all calls. ++ +Note that since `request.url` must be a valid URL, if an API returns complete URLs in place of an identifier as in the example above, it would not be possible to use the JSON Path syntax. To achieve the desired result in this case an opaque URI syntax can be used. An opaque URI has an arbitrary scheme and opaque text separated by a colon. When the replacement is done, the scheme and colon are stripped from the URI prior to the replacement and the remaining opaque text is used as the replacement target. In the following example, the scheme is "placeholder". + +["source","yaml",subs="attributes"] +---- +filebeat.inputs: +- type: httpjson + enabled: true + # first call + id: my-httpjson-id + request.url: http://example.com/services/data/v1.0/exports + interval: 1h + chain: + # second call + - while: + request.url: placeholder:$.exportId + request.method: GET + replace: $.exportId + until: '[[ eq .last_response.body.status "completed" ]]' + request.retry.max_attempts: 5 + request.retry.wait_min: 5s + # third call + - step: + request.url: placeholder:$.files[:] + request.method: GET + replace: $.files[:] +---- + NOTE: httpjson chain will only create and ingest events from last call on chained configurations. Also, the current chain only supports the following: all <>, <> and <>. [float] diff --git a/x-pack/filebeat/input/httpjson/input_test.go b/x-pack/filebeat/input/httpjson/input_test.go index de4cc3f11e69..498ccc861834 100644 --- a/x-pack/filebeat/input/httpjson/input_test.go +++ b/x-pack/filebeat/input/httpjson/input_test.go @@ -538,6 +538,25 @@ var testCases = []struct { handler: defaultHandler(http.MethodGet, "", ""), expected: []string{`{"hello":[{"world":"moon"},{"space":[{"cake":"pumpkin"}]}]}`}, }, + { + name: "simple_naked_Chain_GET_request", + setupServer: newNakedChainTestServer(httptest.NewServer), + baseConfig: map[string]interface{}{ + "interval": 10, + "request.method": http.MethodGet, + "chain": []interface{}{ + map[string]interface{}{ + "step": map[string]interface{}{ + "request.url": "placeholder:$.records[:]", + "request.method": http.MethodGet, + "replace": "$.records[:]", + }, + }, + }, + }, + handler: defaultHandler(http.MethodGet, "", ""), + expected: []string{`{"hello":[{"world":"moon"},{"space":[{"cake":"pumpkin"}]}]}`}, + }, { name: "multiple_Chain_GET_request", setupServer: func(t testing.TB, h http.HandlerFunc, config map[string]interface{}) { @@ -1419,6 +1438,25 @@ func newChainTestServer( } } +func newNakedChainTestServer( + newServer func(http.Handler) *httptest.Server, +) func(testing.TB, http.HandlerFunc, map[string]interface{}) { + return func(t testing.TB, h http.HandlerFunc, config map[string]interface{}) { + var server *httptest.Server + r := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/": + fmt.Fprintln(w, `{"records":["`+server.URL+`/1"]}`) + case "/1": + fmt.Fprintln(w, `{"hello":[{"world":"moon"},{"space":[{"cake":"pumpkin"}]}]}`) + } + }) + server = httptest.NewServer(r) + config["request.url"] = server.URL + t.Cleanup(server.Close) + } +} + func newChainPaginationTestServer( newServer func(http.Handler) *httptest.Server, ) func(testing.TB, http.HandlerFunc, map[string]interface{}) { diff --git a/x-pack/filebeat/input/httpjson/request.go b/x-pack/filebeat/input/httpjson/request.go index b161363dbe77..5612f2dc6410 100644 --- a/x-pack/filebeat/input/httpjson/request.go +++ b/x-pack/filebeat/input/httpjson/request.go @@ -713,8 +713,25 @@ func (r *requester) processChainPaginationEvents(ctx context.Context, trCtx *tra return n, nil } -// generateNewUrl returns new url value using replacement from oldUrl with ids +// generateNewUrl returns new url value using replacement from oldUrl with ids. +// If oldUrl is an opaque URL, the scheme: is dropped and the remaining string +// is used as the replacement target. For example +// +// placeholder:$.result[:] +// +// becomes +// +// $.result[:] +// +// which is now the replacement target. func generateNewUrl(replacement, oldUrl, id string) (url.URL, error) { + u, err := url.Parse(oldUrl) + if err != nil { + return url.URL{}, err + } + if u.Opaque != "" { + oldUrl = u.Opaque + } newUrl, err := url.Parse(strings.Replace(oldUrl, replacement, id, 1)) if err != nil { return url.URL{}, fmt.Errorf("failed to replace value in url: %w", err) From 8aa2a2fd696c3aa216c250c6578d389ab2729abb Mon Sep 17 00:00:00 2001 From: Olga Naydyonock Date: Mon, 15 Jan 2024 11:07:42 +0200 Subject: [PATCH 045/129] added infrastructure for auditbeat (#37625) --- .buildkite/auditbeat/auditbeat-pipeline.yml | 5 + .../deploy/kubernetes/deploy-k8s-pipeline.yml | 5 + .buildkite/heartbeat/heartbeat-pipeline.yml | 5 + .buildkite/pull-requests.json | 48 +++++++ catalog-info.yaml | 135 ++++++++++++++++++ 5 files changed, 198 insertions(+) create mode 100644 .buildkite/auditbeat/auditbeat-pipeline.yml create mode 100644 .buildkite/deploy/kubernetes/deploy-k8s-pipeline.yml create mode 100644 .buildkite/heartbeat/heartbeat-pipeline.yml diff --git a/.buildkite/auditbeat/auditbeat-pipeline.yml b/.buildkite/auditbeat/auditbeat-pipeline.yml new file mode 100644 index 000000000000..34321b61161b --- /dev/null +++ b/.buildkite/auditbeat/auditbeat-pipeline.yml @@ -0,0 +1,5 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json + +steps: + - label: "Example test" + command: echo "Hello!" diff --git a/.buildkite/deploy/kubernetes/deploy-k8s-pipeline.yml b/.buildkite/deploy/kubernetes/deploy-k8s-pipeline.yml new file mode 100644 index 000000000000..34321b61161b --- /dev/null +++ b/.buildkite/deploy/kubernetes/deploy-k8s-pipeline.yml @@ -0,0 +1,5 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json + +steps: + - label: "Example test" + command: echo "Hello!" diff --git a/.buildkite/heartbeat/heartbeat-pipeline.yml b/.buildkite/heartbeat/heartbeat-pipeline.yml new file mode 100644 index 000000000000..34321b61161b --- /dev/null +++ b/.buildkite/heartbeat/heartbeat-pipeline.yml @@ -0,0 +1,5 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json + +steps: + - label: "Example test" + command: echo "Hello!" diff --git a/.buildkite/pull-requests.json b/.buildkite/pull-requests.json index 2cfac0a00b25..43d8974f3bf6 100644 --- a/.buildkite/pull-requests.json +++ b/.buildkite/pull-requests.json @@ -47,6 +47,54 @@ "skip_target_branches": [ ], "skip_ci_on_only_changed": [ ], "always_require_ci_on_changed": [ "^metricbeat/.*", ".buildkite/metricbeat/.*", "^go.mod", "^pytest.ini", "^dev-tools/.*", "^libbeat/.*", "^testing/.*"] + }, + { + "enabled": true, + "pipelineSlug": "auditbeat", + "allow_org_users": true, + "allowed_repo_permissions": ["admin", "write"], + "allowed_list": [ ], + "set_commit_status": true, + "build_on_commit": true, + "build_on_comment": true, + "trigger_comment_regex": "^/test auditbeat$", + "always_trigger_comment_regex": "^/test auditbeat$", + "skip_ci_labels": [ ], + "skip_target_branches": [ ], + "skip_ci_on_only_changed": [ ], + "always_require_ci_on_changed": [ "^auditbeat/.*", ".buildkite/auditbeat/.*", "^go.mod", "^pytest.ini", "^dev-tools/.*", "^libbeat/.*", "^testing/.*"] + }, + { + "enabled": true, + "pipelineSlug": "heartbeat", + "allow_org_users": true, + "allowed_repo_permissions": ["admin", "write"], + "allowed_list": [ ], + "set_commit_status": true, + "build_on_commit": true, + "build_on_comment": true, + "trigger_comment_regex": "^/test heartbeat$", + "always_trigger_comment_regex": "^/test heartbeat$", + "skip_ci_labels": [ ], + "skip_target_branches": [ ], + "skip_ci_on_only_changed": [ ], + "always_require_ci_on_changed": [ "^heartbeat/.*", ".buildkite/heartbeat/.*", "^go.mod", "^pytest.ini", "^dev-tools/.*", "^libbeat/.*", "^testing/.*"] + }, + { + "enabled": true, + "pipelineSlug": "deploy-k8s", + "allow_org_users": true, + "allowed_repo_permissions": ["admin", "write"], + "allowed_list": [ ], + "set_commit_status": true, + "build_on_commit": true, + "build_on_comment": true, + "trigger_comment_regex": "^/test deploy/kubernetes$", + "always_trigger_comment_regex": "^/test deploy/kubernetes$", + "skip_ci_labels": [ ], + "skip_target_branches": [ ], + "skip_ci_on_only_changed": [ ], + "always_require_ci_on_changed": [ "^deploy/kubernetes/.*", ".buildkite/deploy/kubernetes/.*", "^libbeat/docs/version.asciidoc"] } ] } diff --git a/catalog-info.yaml b/catalog-info.yaml index 3ccb648d0979..92757fd4c134 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -152,3 +152,138 @@ spec: access_level: MANAGE_BUILD_AND_READ everyone: access_level: READ_ONLY + +--- +# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json +apiVersion: backstage.io/v1alpha1 +kind: Resource +metadata: + name: buildkite-pipeline-auditbeat + description: "Auditbeat pipeline" + links: + - title: Pipeline + url: https://buildkite.com/elastic/auditbeat + +spec: + type: buildkite-pipeline + owner: group:ingest-fp + system: buildkite + implementation: + apiVersion: buildkite.elastic.dev/v1 + kind: Pipeline + metadata: + name: auditbeat + description: "Auditbeat pipeline" + spec: + # branch_configuration: "main 7.* 8.* v7.* v8.*" TODO: temporarily commented to build PRs from forks + pipeline_file: ".buildkite/auditbeat/auditbeat-pipeline.yml" + # maximum_timeout_in_minutes: 120 TODO: uncomment when pipeline is ready + provider_settings: + build_pull_request_forks: false + build_pull_requests: true # requires filter_enabled and filter_condition settings as below when used with buildkite-pr-bot + build_tags: true + filter_enabled: true + filter_condition: >- + build.pull_request.id == null || (build.creator.name == 'elasticmachine' && build.pull_request.id != null) + repository: elastic/beats + cancel_intermediate_builds: true + cancel_intermediate_builds_branch_filter: "!main !7.* !8.*" + skip_intermediate_builds: true + skip_intermediate_builds_branch_filter: "!main !7.* !8.*" + # env: + # ELASTIC_PR_COMMENTS_ENABLED: "true" TODO: uncomment when pipeline is ready + teams: + ingest-fp: + access_level: MANAGE_BUILD_AND_READ + everyone: + access_level: READ_ONLY + +--- +# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json +apiVersion: backstage.io/v1alpha1 +kind: Resource +metadata: + name: buildkite-pipeline-heartbeat + description: "Heartbeat pipeline" + links: + - title: Pipeline + url: https://buildkite.com/elastic/heartbeat + +spec: + type: buildkite-pipeline + owner: group:ingest-fp + system: buildkite + implementation: + apiVersion: buildkite.elastic.dev/v1 + kind: Pipeline + metadata: + name: heartbeat + description: "Heartbeat pipeline" + spec: + # branch_configuration: "main 7.* 8.* v7.* v8.*" TODO: temporarily commented to build PRs from forks + pipeline_file: ".buildkite/heartbeat/heartbeat-pipeline.yml" + # maximum_timeout_in_minutes: 120 TODO: uncomment when pipeline is ready + provider_settings: + build_pull_request_forks: false + build_pull_requests: true # requires filter_enabled and filter_condition settings as below when used with buildkite-pr-bot + build_tags: true + filter_enabled: true + filter_condition: >- + build.pull_request.id == null || (build.creator.name == 'elasticmachine' && build.pull_request.id != null) + repository: elastic/beats + cancel_intermediate_builds: true + cancel_intermediate_builds_branch_filter: "!main !7.* !8.*" + skip_intermediate_builds: true + skip_intermediate_builds_branch_filter: "!main !7.* !8.*" + # env: + # ELASTIC_PR_COMMENTS_ENABLED: "true" TODO: uncomment when pipeline is ready + teams: + ingest-fp: + access_level: MANAGE_BUILD_AND_READ + everyone: + access_level: READ_ONLY + +--- +# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json +apiVersion: backstage.io/v1alpha1 +kind: Resource +metadata: + name: buildkite-pipeline-deploy-k8s + description: "Deploy K8S pipeline" + links: + - title: Pipeline + url: https://buildkite.com/elastic/deploy-k8s + +spec: + type: buildkite-pipeline + owner: group:ingest-fp + system: buildkite + implementation: + apiVersion: buildkite.elastic.dev/v1 + kind: Pipeline + metadata: + name: deploy-k8s + description: "Deploy K8S pipeline" + spec: + # branch_configuration: "main 7.* 8.* v7.* v8.*" TODO: temporarily commented to build PRs from forks + pipeline_file: ".buildkite/deploy/kubernetes/deploy-k8s-pipeline.yml" + # maximum_timeout_in_minutes: 120 TODO: uncomment when pipeline is ready + provider_settings: + build_pull_request_forks: false + build_pull_requests: true # requires filter_enabled and filter_condition settings as below when used with buildkite-pr-bot + build_tags: true + filter_enabled: true + filter_condition: >- + build.pull_request.id == null || (build.creator.name == 'elasticmachine' && build.pull_request.id != null) + repository: elastic/beats + cancel_intermediate_builds: true + cancel_intermediate_builds_branch_filter: "!main !7.* !8.*" + skip_intermediate_builds: true + skip_intermediate_builds_branch_filter: "!main !7.* !8.*" + # env: + # ELASTIC_PR_COMMENTS_ENABLED: "true" TODO: uncomment when pipeline is ready + teams: + ingest-fp: + access_level: MANAGE_BUILD_AND_READ + everyone: + access_level: READ_ONLY From d193b350054dc2ce86258c628f9a73751002e91b Mon Sep 17 00:00:00 2001 From: Kevin Lacabane Date: Mon, 15 Jan 2024 11:07:18 +0100 Subject: [PATCH 046/129] [elasticsearch] remove event.created mapping (#37514) * remove event.created * map cluster_name --- metricbeat/docs/fields.asciidoc | 14 +++++++------- metricbeat/module/elasticsearch/_meta/fields.yml | 2 ++ metricbeat/module/elasticsearch/fields.go | 2 +- .../module/elasticsearch/index/_meta/fields.yml | 2 -- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/metricbeat/docs/fields.asciidoc b/metricbeat/docs/fields.asciidoc index f4fbe02155c3..c7f85732c9c6 100644 --- a/metricbeat/docs/fields.asciidoc +++ b/metricbeat/docs/fields.asciidoc @@ -28522,6 +28522,13 @@ Elasticsearch module +*`cluster_settings.cluster.metadata.display_name`*:: ++ +-- +type: keyword + +-- + *`index_recovery.shards.start_time_in_millis`*:: + @@ -30663,13 +30670,6 @@ index -*`elasticsearch.index.created`*:: -+ --- -type: long - --- - *`elasticsearch.index.hidden`*:: + -- diff --git a/metricbeat/module/elasticsearch/_meta/fields.yml b/metricbeat/module/elasticsearch/_meta/fields.yml index 381b4ef805e3..0153f712bbf3 100644 --- a/metricbeat/module/elasticsearch/_meta/fields.yml +++ b/metricbeat/module/elasticsearch/_meta/fields.yml @@ -6,6 +6,8 @@ settings: ["ssl", "http"] short_config: false fields: + - name: cluster_settings.cluster.metadata.display_name + type: keyword - name: index_recovery type: group fields: diff --git a/metricbeat/module/elasticsearch/fields.go b/metricbeat/module/elasticsearch/fields.go index 936bc88923a3..38430958f39c 100644 --- a/metricbeat/module/elasticsearch/fields.go +++ b/metricbeat/module/elasticsearch/fields.go @@ -32,5 +32,5 @@ func init() { // AssetElasticsearch returns asset data. // This is the base64 encoded zlib format compressed contents of module/elasticsearch. func AssetElasticsearch() string { - return "eJzsXV+P3LiRf/enIPy0AWwd8moEmwNyyZ0P2MUiu7mXw0HhSNXdtCVRpqjxzH36QKSkJiX+laietnf8tDsz+tWviv+KxWLxPfoMzx8QVLjjpOgAs+LyBiFOeAUf0Nu/qj9/+wahErqCkZYT2nxAP75BCCHtb1BNy76CNwgxqAB38AGd8RuEOuCcNOfuA/rft11XvX2H3l44b9/+3/C7C2U8L2hzIucP6ISrbvj+RKAquw9CxHvU4Bo+INKU8JQzKOgjsGfxK4T4cztIYbRvx5+on6qfdxfMyi7rOGY856SGnDR5TaqKdPPfTni4Ilj9aYv5ZWGnTNDJJjoKblZ3duG0PUT2COsQzSnH1QGyr7iT8Fkwx8XnvOOYd9GNhds6O9G+KTdRLKq+48AyITsTPLI14iTrqR1+XxQsgwY/VJBOph15LRs/YlINf3SAdB17kl2RApoO4gcSx7zf1nd0miOBbAE4yRlgE0qZ4bSBEa19y0iN5+knjpiQmC0RVLtuU1ji6t9rs+aOgT6grEAbWm5jOnyYkfU4UNtii+5NXz8A05p37AUbJyDSlKSAdS9XvzV9rzGgfcO139gUsymn9+SRk5xyjRLVuf4AwSO8rpfaJxLYKzl5wStbt8Uk9dNjbZS2ZG5jr2LV+CnvW+si61cnRqVPj3V2Fagu/StaUGcXwG3ed1AOzB6eORxMDGrKnoXUbJCamUWuGA4K3ZxgjZ8UfqYJJH6VFJLyC+4uaRZ0DpkBcpL2CKwjtEkmaol37eDCJJvnf5MsE6a2JuZ9TxI5ZVx6GwvI5J6NAjR7M6SGjuO6fWODlrBv/33+y7fG7qgwt2GYqY2fzcrSnhWgmj28c29uENv6r3kZ0YDz1/OcTh/kup8N/xVpr7oavlqa6wp5ogwK3PFu/H91wYqSYAfSt7ybPRjN8Qt08czG171fsnOh7zhlkHXk/8E218ct+FKNmVvmw594lLQweQZ7xVtgZ+3hXEPDj5DsgJ6kMzgx6C6+eMB+LsGCrj4AO0/ebX5c5wgUow0j0pyT+YhyTJu8XLduofpNhDOXoAWZ9A6rj5Vb4rxoXhjlvIKbMvQJncktLBs/D37pgT3nBS4uMPqjyfu9IJlFCZrYCeYl5vhYbhFirnPYlx46fgvLRYq6yVwmmUXOYwfP+5O1Iuf8YzwBSSbUC/hGZnip1D3N7mZG9zKzL9iFCLx6aONJ2JrHlv4gZ99j+sP4I5eIBY1j7a3zCTG19FiTGZsDq7t8nKiTRpJ0NUdHO0zcvH2ipOE3ZBcozxWwTsjGDK9sxvJHXPVwQ/tEyLzG9W7av8LEactpmcsxcjuScWKv/uUTlPkD4XkH/HZk48Sq00r+CAWn7MazS7DURSw4r3F7O6YxQnX35CsjHNjtmEZJVc4nbsNuKUgLBxYFy3HPaX6iVUW/bgwMyrPSnJ7yEybVMG4lmu3IMyj8XbBMYZZJ5ExHth0cLvkwqCmHXDt/yceNWFJ6TkFetl1fFNB1p746woIjepgJ5R8By2YL4nK/xWZLzObCpUJA7ZXbeqI4T863nu0ULJv1XmUBIG3rqzbzDnMYYeZ0GsAlsHx7vsUgQ4JkOsiylXfKmI1mljLqca7oA67y4gLFZ+FG7tXJDriQXOOnvIMveUP3ijQgrWyZTs/Zrn5NZ+kJdJ3FOrQdPprmAyh3W9eJNsmkPe84bkrSnFPPRwr0clKyMRDr/UEUBLaFg5T70J9Ow6LRAsN8cJOWW6E4FipoNoOGMLBFw3bIHyAX2R2Gbt62QyvsSlPQ+7oZcCV5Sj1OJ9qKuJItoCGhaBugdiIhB5sIvezI8h3kKqm9YpQZkosViePyCE9QHCJdwTemOV+9scSzzRXZNdnc0vNS5c7DvxOy93eyK2Cg2GE24JCgezswJ7FintmnqAFC781ydkvcheUa4e63qdcopeO6lqix56aWPnZdVbK2bWhoCRv3Dac1vZhjV3uG6ZaA85xc74oOWFOXxpSiobHqGrPna66+JWPSHWnQe3ISQjJAEUvm2r3rRbp8ImtbGb2IqROw2WdnQrWxtN/Q7uMqE6ILFRmXDuOfhdhs0nY0mrJk2Y4wkDdEp8YLcJmnZylW08RU5aSdnquctXeQTXmPQhwvJOvYrmQDmzbBlpxvSsy5B57Ed8ef3IJK4kxIg3hPMuRRyRe+U+5A9obkBtNFi7CVwp0XspGRGK5bDm7jMzFiGWrIcWZb5aAl6x8B2WIRui6T1zav30qG3p2qus4h3KysllR3p+qaEv+2e8GHJBQlGq+rlJ3tM5w702kTn03z22HZRTYiwao5U2HCjByVKBNL7Aq+tc+jHSkesWw1/BSEI1MSYvmq8CnopmeYgFRohlQsNYGbgmBwFl4sQwmcgmJsIlcsUw0/BeHI/KhYvip8KrpH8UxCMC6JK5amgr6F7HwnUrs6H7+xPxcJF+aqkt3jmJhVVTqDHzZsH75BBdPx8PKfp70/PdbZuciuNsloVWZXfGc0BwWEnyy0vR5pKv5GBzWU/ET8mfaruIOOce+tKjT4ptt1pcGmllXcoWQTSkg1igB1p8IWixoTYXqt+Pjqd8QQclTliGXUAitgy75oTagtQrdF2jmilkMYUnjAjUn3BayLtk/WDyuKyxw/AsPnZajEDewCVwX8cTlmpn+etqNdVrR9NvI7Z1Yc36gtTOx3eAJtjwtHL9pjq77DZ8gb3NCNJy2D0QSBbKSZCcjMenITMhDX3S2NtsWpy7/0lOO8JgVLonJWnLpMYGb9FpWRtkfC7hOpFMv3Nd0bKtyOkx2h5Y5VUDfI8LNsgZ10Hb9qIArL5FNU3ubQbtNggZ1UA+F+zNDO4bedvMLdOhh9xPWwzDFjkjac0Sp39e1gA4xbvxDM0EFZkZpwl4uyhaAAtfoqMfTkBJ6YnpzCY+nN0ShGC+jux+HY7MyNiohhFe/G8YtIu2gp3VdT46GvPiezxZceepPX5bGEoks28MkEzq6gP4NPUHDjpB1LZoLafKpyBnOWwAtZ+Az8bgw8cNlt3+X1nhe3sLypeC82ngpQ77Ry+jPZvWa+HnzehZ3H3+42tDjuuic7y+S2ezGzZBNt5UWC3cZM6hxX+5ZbU0FAO5INDfkzwtygLuAVuDVBAQVtTOx3uadUXlvZM8euKDJ97XCmpjyQY/LnjfOwG9IFi4LTUVAiG64L3wS0c2AKz4EcfUWjv4NxaCpQZbjArkFET6FTed3GVEX1Mzx/pVple8MbKtM//S2VEVdIyaxSDSHtBDJJaZcoqyinlSswjVKNlaZjFiiyXJ5tZD2Eh38/0xLQx/8wylk0fwpJesurwhitLCvudmkC0yxOVug2ynugtALcxMn72CF+AdG24j8kvvj/P5sJVLT4rLsq+ylMoGh8nAXRZqb153X3L9aVI5b90CHzL4x23ftpfDFoK1KIqxVoeW1Hfzpp+ufq4tb6FsjZLZx3Kq+fVnSxBsyhPk9RgQAIQwGQ61ek4XBW9LH7ImIdTeaQLO86e7VZfWy5qhwFtLow6vza0i9wmcNTAa3pbpBEaUTjWT5fXQVF+zy9+T7qIT6F4baySYTR6mhl+Y1Ac+aruP36/eup1MH4/pQ1Dgu1+EW6oeG/BuZuBksRkHj1lBpUybQ7cDA4y2N5LWBCjCkKFixgMQXeoSnMS7yoo5SsH5gC28jjuiK/Lzf8+xnXgOhpZGyRdPVnDVWXHLaJYvITfiJ1X6Nu6DJNAeMB/EBuHqWTqzmyXb5+prN1VahykDY26FRr41tq0omzp1GNJeCcFopjM7fi0HBCGPpK+IXIlnRzc1ZLSc/wKk7yghL9MG05oPzD4FlTwXo2rdTnxGgd3i9F8KojTQH5uBXY4DcHafYbqeEdIg2qu3dISNTZD+LRCXhxgZUSqYdVFPH/FDLQVQYSd6uG4a+b/v6nqmC+tkpfAaSvIWFr0a4IFHv5rQgQdzUkJ5QpcOZP83DFEMbowf6Igdkk/lDB8olSuxo2BmgR21s9kecng1aRqeV7dHEYvv7h+974Rl0cxPLtTP3rU4WHJcOyTV89WucTq/UwtU/1HfrhzACad+gZhtH+DjEo/2COCZoo27uCJvPn4VMhkYgYcBbUb2YXpnOWyrGOaeV7X+EfL4Z5FnBMJ5r+vw30lblWmHJY3qaZwiLVGIYNF3v1DyTQe6jImTxUEEzAUIhhi/gBJljm+nlW917JvlNSl1T7m6kelZZJnv7LHRvgnFczvEHVdckdFD42DZFyCbd5vHpL4XhHm/mYMLTfmVyf5QgcTRbQHeUTyFE90svmV+F82+3rX0lFkR13xS6nlxPh1UTotbaztJ7DzP7EkiNojS+y++nNa0hY2Zc9XfQnAYyGqQCdKFNEGge9/pi/TiDcN5tPxJ9awp7zcnBs7OkJ3oFrdE3czsk85vXH/30fKhKLdOm/uK2zE+0bu2NnPnu8Ijy1uPgsa/tPPkcCrPHAMhhpbtaGkWL9HnXEFuSvAiHBDgSeoOiHTV7e0ooU6QpEGs7hUbAfznG37D1uNi5GjhwEFdbjF1go+SNscycWF2t3QRS4KaCy9V5f/1XmdMyg4fmg0vrQN46S4cRXBbBfZXFf/FLmEcZdl5E9hwnKcULfNKQ5Zw02hnCcaFrOq3ChjMPEwMKX8HFdkWUCznS+gcZwppToGLniwH/4FMyOR3pm/IL5OINNd2Yo69AFP8LMaQwOisQS0YR9a0mgkYc8yY+4i54x8+WZvT7LXySyGqK7Hr2O+szGsrtVG5zE2OCznVZYO9mOFqIXqyVA/CpVMMDrBG/PaL2QslwF+O1TpHNnsb0n7tgNbHC9p08NUbCwqGJEBOuYjLuPQ2dxpNylT3S/g8Rn72F4dJqyJznKXvcxjeKBNSAjLACPpHCVVg6EuRDurP8SCFOTrtuKo7fpt9QGd2Q8/4PVgUDWissx35dQgfkCUtApm7E4JkrSCVI0l7fsWQRYaMG3CMjYGn2RbCOq6UUghxZnjIAMLqcYgRlV4zQCN7K+ZwRyXMG+CODYGqoe6OtaHPhUfzQiPHFgzQCaDLoGdp42mt6FJQDvLu42BXbWvc6Xihhe690B6tphbPeGt66HmwL+JS16sfYhNX3LvmvevthuoyclRdMMc05OlNWYj/c+DtBm0mfgMeXLCeKDFkKoXYNjfNNozzQoMuveGQRBuB3bsHNjt1MbMBkuD7NSTLBXTJeVXreMLpg72vWkXn+/Kefj0H1SElfRP6ej0Kl7vRjVykH0w7PMfh4t4pjQ0bF7ryP2HDvG2mbjynVxrsv9InuiA7aEB26NE2+H0E123YdtDtPuZ3+/kenXDWEiQxrK76EkRgzzPSJUx4/n1HBJW0bo633ZMQptKz+t/2UMCvoIWn3TFziaTVsk70TWBTXciC5UFdleThMFpLuoDq6wurVsb2BHYDA4cgk6047VZZ3HgpKYejZRoiEtbZXKufC9yOzA2nTivR7663wmj7yO03brl5jxbZ9aEgjiU4lkWvU0W2nVhZA703RHGsFC6IDkyicw5z1uqmjzG+sBkUUys1l2x9ePCezQ+e+TtgLXYmWGm66iyybcPmPb59XAVM/t+THj0tzkopeHY1xTOtkZeCZM0lLGc1yWbF0VIGBoS6CUJa5+E5AyM8s6YqTYC+3MdYB3Cx6Q0WgU9ENB+6pED4A+/jL/kDLxRwMfy/20kWTaJCGVpJ4qZB5ntGcFJGjoEShlQ/8qIN0NPYpN29Cq4BQNPZJM29AqSXtO2CMwcho23CldUXFZPA97bzpgmoqC0B39sYziy/r56fPuXk8aU540bqla5I9XB6uy5ZAx8BXedFb2RXlXxILeZDzatHuPAG4XW0tR9sle9jgI6lahUyezNO8Vhjzc4zgJURYgpzE9YIcHFJXKHgm6j3MqjMCx3mtK3sabW2aJhh/PSbFcVnTAvaYlHU7v1Vl4dRa8/H8XzoL/GT1XJE8H087gNjkfax1f/ZhXPyZK32/Sj7kDz+M67M7Q8bwlLVSk8T9ZMAdCHkCrqaRNuH/vm8GOqAbOSNEh2oxy0CRnqqOgXfB3hVOsYbKvpCqLdWU/vYQTrgH9GyLldRHV2CTwxzR5P131HqQJJLsF3iHSFFUvLh7jqlIuiYcVLPLPHeFL/c8Gn27kMy2LpDPbTSXiKZy8jQmUIy7idCIVTklMhrIA285an9JxEB28a6HhE5eh/a5WG1bH/nzR2Y1N3ZFH4c9RfgE2/7JDBa5MZtNU6KA67dZgG3d4iuN+DYWOHTrZ7sZw9okCshJ0Iwwj87kF6bvKsWkhqgvOOba7ebHCcVPKOQKfRbkkafKJx7RRke4uevun4ZsfP/yJ4/OPb60kKSuBGaP7KK6bXEBiLVnhtgXM5m3UPKWVcCINMZZtufkU5W3KF5ij/N3rRSepFb052bvKPtEHr2/g2HTVlcmHCE+aSniA+I+GfOkB1RX6RB/sR4jWirGbhP43fZCQZmknyqDAHR+fQ4ypTDO3ES1B5hkmm2WnGybmamgh578l5lje4Uh3DEaaR1yRUhZ523ERfZ4zcgYFZeUWrEW7/zJPQ6JUKDyuIyGqZTJVFeMmaGsK0TBzXyMbUr9OlH1HQMTCjcX9GjEbYA7S6GiY2uX/i6JF8ly7oRw9AGox66A0JA6sZougp88cCiy+P75KdNg7YyOquTnX1V4DtwqGqeJ/fkIfmxONdf63loMOCpYNpIwGQKv5QpaCHfyAFwvt/RfgFg0MtGjeoIM/kBdaHvcmOtT4absKDW1evil+ps37BM0x6fKSLTKrEt4qi7UmO+A9wNPsZg7YUIrnJqTRjDPz/nL+v84FfxF+oD1HgIvLmJDVIGx+mnOf+ye2Z+l2j84UyheuHpPyLoa9ql8EiHWf5MBQgvpRJb19LXdYMHq6BeNUM0Xod0/Yd50g9w3RtdyeSEt34xH8Ema6YvWt8FWOUmTwYOe1+ZsmD8DTsJioYYn7TRy4o+P2b+2gfcphiDhT35t/sSm/QggNIDdXCjlkPfI0mVd15G262yUuJMwPyHdNbyhp8oKWFnB4nZdbdIVb1ay5hS7HlwJI0amVqgK7X2g9strLtYSHbzEJUD6k6st1llWqZtxYtlqj4saiRWmTG8uUpWluLFQruHJj2WrZlBcQfWuZSi2XgyUrQfFBkoilJgsBHBQBnOAtVQb2T6G3GsWFew0paW97DeUqxnRHedmoLaVVusAOrY6xuvuBuySWD66h9RN+Cqk61gL+fDecfwH8OZR0fk/GFsTrMIt7ni28LfF/yIMF597zmfYHbdjuqQlfx8vreEkzXrqePZJHQ97V65B5ec6vQ+bWxG1DRnXxzkVW0KqSu6OUbt4E60pVeekHPHZEA8XS/J3paOwkp3S94rjz+D3Fy5ZY80OoKQqVriqUhOkcMMD/RipA3XPHoXaICTbezQJ9DHbd34qR5XtGP4nAOYpI82XGjS5hd/928k9xSh14Uw/FXVxjgMvsGGgRs02APRsg3VxXtP0hPaGiuMzx4zn74zIRVZdxwdUpP1UUr80xU7TxSLBwtT0uCp71HT5Dtrc4rNmWfqY+tpqMU5d96SnHmTEFPpAxWtws8CK5qIfQVwVChdsOyrwFRmjpHwyB+qBF0ppytfwoEYoEa98JhNdPJI/vRrThjFa5r119qdo6akVqR2rvNkw5NLdjLlNei7bP1kFpRzA6JAjNL8PqkbeUpquYclhuoXyTOoHPzuATFNwzvAK8zxNlBeTibZTfh8JnsGdkfVeKuu98fleqHpvgcV+6Cn/2+1V1mYqWtwy6rmepbzIe49LKHPusoPUDaaDMC0pZSRrMB01wU+ZjYeub5RaIbfskVLbL9ocYtku+qbqq0V9AZ038TRVn0FakwC+g8yT5xu18J6PsOuxv3/KT7BdS+cYNP4nF1c2CkbKv3VCg2Mgd9JjebEd5ES9roSmHQcNxp+53zCuhI+D8TxPgP8VOF5OmQxiNv0DDL1Qk9dRpy2XADhjPRWESo4MQf13+o4BEa8jrZpZQRrj51Yt4eb+Y4GbHVpRwN0raURI+Q3+jDMETrttqUKjn72vctsvEfy3aQppc+oehT6H46xKQWtytELCrHipe/tjTJQXA2Hl29bHDHjkRhVZIJ8vm+B88kdd9UhlfKwohmLjfWklZAOU3cbUGcwiRzaCihVzaxZXlJu1jGJfxgQRx3Ud0ma+4m4RCiU6M1mHEkr5gEkQLfeTogmUHgidccNThGpBIzkf8ghuj8URpqYLWLebkgVSEP6O2Zy3tbCkAchLKF6U40K5NmKEVfSZTwpL9ytbrj/8VAAD///7fHXM=" + return "eJzsXVuP3biRfvevIPw0AWwt8moEkwWyya4XmMEgM9mXxUJhSzzn0JZEmaTa3fvrA5G6kBKvEqU+9rSfZrpbX31VvBWLxeJ78Bk9fwCogozjgiFIi9sbADjmFfoA3v5V/fnbNwCUiBUUtxyT5gP48Q0AAGh/A2pSdhV6AwBFFYIMfQBX+AYAhjjHzZV9AP/7lrHq7Tvw9sZ5+/b/+t/dCOV5QZoLvn4AF1ix/vsLRlXJPggR70EDa/QBFFXHOKL5iJYNP8hqxGEJOcxKzNoKPuf934tPAeDPLfrQq/mV0FKDw02JnnKKCvKI6LP251dKunb4icpE/ZzdIC1ZxjikPOe4Rjlu8hpXFWbT3454sMJQ/WkL+W1h9kzQyUY6Cm5WM7tw0h4ie4B1iOaEw+oA2TPuKHwSzGHxOWccchbdWLCtswvpmnITxbGfCdmZ4JGtEUdZT23/+6KgGWrgQ4XSybQjr2XDR4ir/o8OkK5jj7IrXKCGofiBxCHvtvUdneZAIFsAjnJ62IRSJjhtYERr31Jcw2n6iSMmJGZLBNWu2xSWuPr32qy5Y6ArU/MM2pByG9P+wwyvx4HaFlt0b7r6AVHz6rNpAsJNiQu07uXqt6bvNQaka7j2G5tiNuX0njxwklOuUaI61x8geIDX9VL7RAJ7JScveGXrthilfnqsjdKWzG3sVawaPuVda11k/erEqPTpsc5mgerSv6KF6uyGYJt3DJU9s4dnjg4mhmpCn4XUrJeamUWuGPYKnU6whk8KP9MEEr9KCkn5DbJbmgWdo8wAOUp7RJRh0iQTtcSbO7gwyeb53yTLhKmtiXnX4UROGZfexgIyuWejAE3eDK4R47Bu39igJezbf5/+8q2xOyrMbRhmaljfTjHS0QKpZg/v3JsbxLb+a15GNOD09TSnkwe57mf9f0Xaq676r5bmmiEvhKICMs6G/1cXrCgJdiB9y7vZg9Ecv0AXz2x83fvFOxd6xglFGcP/j2xzfdyCL9WYuGU+/JFHSQqTZ7BXvAV20h5da9TwIyQ7oEfpFF0oYjdfPGA/l2BBsw9Ar6N3mx/XOQLFaMMIN9dkPqIc0yYv161bqH4j4cwlaEEmvcPqY+WWOC2aN0o4r9CpDH1CJ3ILy8bPg186RJ/zAhY3NPijyfu9IJlFCRrZCeYiTHsotwgx8xz2pUOMn2G5SFGnzGWSWeQ8dvC8P1orcs4/xhOQZEK9gG9khpdK3dPsbmZ0LzP7gl2IwNlDGw7W1jy29Ac5+x7TH4YfuUQsaBxrb51PiKmlx5rM2BzRmuXDRJ00kqSrOTjaYeKm7RPBDT+RXaA8V8A6IRszvLIZyx9h1aET7RMhc47rndq/wsRpy2mZyzFyHsk4sbN/+YTK/AHznCF+Htk4seq0kj+ighN68uwSLHURC85r2J7HNEao7p58pZgjeh7TKKnK+cQ57JaCtHBgUdAcdpzkF1JV5OvGwKA8K83JJb9AXPXjVqLZjjyDwt8FzRRmmUTOdGTbweGSD0U14SjXzl/yYSOWlJ5TkJct64oCMXbpqiMsOKCHmVD+EaLZZEFY7rfYZInJXLBUCKi9cltPFOfJ+daznYJmk96rLACgbX3VZt5hDiPMlE6DYIlovj3fopchQTIdZNnKO2VMRjNLGfS4VuQBVnlxQ8Vn4Ubu1ckOuJBcw6ecoS95Q/aKNCCtbJlOz8mufk0n6Ql0ncQ6tO0/GucDVO62rhNtlEk6zjhsStxcU89HCvRyUrIxEOv9QRQEtoWDlPvQXS79otEiCnnvJi23QnEsVNBsAg1hYIuG7ZDfQy6yOwzdvG37VtiVpqD3dTPgSvKYe5xOtBVxJVtAo4SibYDaiYQcbCL0siPLt5erpPaKUWZILlYkDssjekLFIdIVfGOa8+yNJZ5tZmTXZHOm56XKnYY/E7L3d7IZMFBsPxtwlKB7OzBHsWKe2aeoAULvzXJ2S9yF5Rrh7rep1yil47qWqKHnppY+dF1VsrZtaEiJNu4bLmt6Mceu9gzTLQHnKbneFR2wpi4NKUV9Y9U1pM9zrr4lY9IdadB7chJCMkARS2bu3vUiXT6Rta2MXsTUCdjsszMm2ljab2j3cZUJ0YUKjEuH8c9CbDZqOxhNWbJsRxjAG6JT4wWwzNOzFKtpYqpy0k7PVc7aO8imvEchjheSdWxXsoFNm2BLTjclptwDT+K740/OoJI4E9Ig3pMMeVTyhe+UO5C9IbnBdNEibKVw54VsZCSG65aD2/hMjFiGGnKc2VY5aMn6R0C2WISuy+S1zeu3kqF3p6qucwg3K6sl1d2puqbEv+1e8CEJRYnG6yplZ/sM58502sRn0/x2WHaRjUiwas5UmDAjRyXKxBKbwbf2ebAjxSOWrYafgnBkSkIsXxU+Bd30DBOQCs2QiqUmcFMQDM7Ci2UogVNQjE3kimWq4acgHJkfFctXhU9F9yieSQjGJXHF0lTQt5Cd7kRqV+fjN/bXIuHCXFWyexwTs6pKZ/DDhu3DN6hgOh5e/vO096fHOrsW2WyTjFRlNuM7ozkgIPxkoe31SFPxNzqooeRH4s+kW8UddIx7b1WhwTfdrisNNrWs4g4lm1BCqlEEqDsWtljUmAjTa8XHV78jhpCjKkcsoxbRAm3ZF60JtUXotkg7R9RyCEMKD7gxyb6AddF2yfphRWCZw0dE4XUZKnEDu8BVAX9cjpnxn6ftCMuKtssGftfMiuMbtYWJ/Q5PoO1g4ehFe2zVMXhFeQMbsvGkpTeaIJANNDMBmVlPbkIG4rq7pdG2uLD8S0c4zGtc0CQqZ8WFZQIz67aoDLQ9EnSfSKVYvud0b1TBdpjsMCl3rIK6QfqfZQvspOv4rIEoLJOPUXmbQ7tNgwV2Ug2E+zFBO4ffdvIKd+tg9BHXwzLHjEnScEqq3NW3gw0wbP1CMEMHZYVrzF0uyhaCAtTqq8TQkxN4YnpyCo+lN0WjKCkQux+HY7MzNygihlW8G8dvIu2iJWRfTY2HrvqczBZfOtSZvC6PJRRdsp5PJnB2Bf0p+oQKbpy0Y8mMUJtPVa7InCXwQha+In43Bu657Lbv8nrPi1tY3lS8FxuPBah3Wjn9mexeM88Hn3dh5+G3uw0tjrvuyc4yue1ezCzZRFt5kWC3MZM6h9W+5dZUENCOZEMD/owwN6gLeAVuTVAAQRsT+13uMZXXVvbMsSuKTF87nKkpD+SY/HnjPOyGdMGC4HQUkMiG68I3Ae0cmMJzIEdf0ejvYByaClQZLrBrENFT6FhetzFVUVXfMZH/DE+yjP/0p1kGXCEls0o1hLQTyMSlXaKsopxWrsA0SjVWmo5ZoPByebaR9RDu//1MSgQ+/odRzqL5U0jSW14VRkllWXG3SxOYZnGyQrdR3gMhFYJNnLyPDPAbEm0r/kPii///s5lARYrPuquyn8IICobHWQBpJlp/Xnf/Yl05YtkPHTL/Qglj78fxRVFb4UJcrQDLazv6S0zjP1cXt9a3AM5u4bxTOX9akcUaMIX6PEUFAiAMBUDmr3DD0VXRx+6LiHU0mUOyvOvs1Wb1seWqchTQ6sKo82tLv4Bljp4K1JruBkmURjSe5fPVVVCwz9Ob7qMe4lMYbiubRBitDlaW3wg0Zb6K26/fv55KHYzvT1njsFCLX6QbGv5rYO5msBQBiVdPqUGVTLsDB4OzPJbXAibEmKJgwQIWU+AdmsK8xIs6Ssn6gSmwDTyuK/D7cv2/n2GNALkMjC2SZn/WUHXJYZsoJj/BJ1x3NWB9l2kKNBzA9+SmUTq6mgPb5etnOltXhSoHaWODjrU2vqUmHTl7GtVYAs5poTg2Uyv2DSeEga+Y37BsSTc3Z7WU9AxncZIXKsEP45YDlX/oPWsiWE+mlfpcKKnD+6UIXjHcFCgftgIb/OYgzX7DNXoHcANq9g4IiTr7Xjy4IF7c0EqJ1MMqivh/ChlglgHE3ap++Oumv/+pKpivrdJXAOk5JGwt2hWBYi+/FQHirobkhDIFzvxpHq4YwhA92B8xMJvEHypYPlFqV8PGACxie6sn8vxkwCoytXyPLg7D1z983xvfqIuDWL6dqX99qWC/ZFi26atH63xitR6m9qmOgR+uFKHmHXhG/Wh/Bygq/2COCZoo27uCJvPn/lMhEYsYcBbUbyYXhjlL5VjHtPK9r/CPF8M8CzimE03/33r6ylwrTNkvb+NMYZFqDMOGi539Awn0HlX4ih8qFEzAUIhhi/geJljm+nlW917JvlNSl1T7m6kelZZJnv7LHRvgnFczvEHVdckdED42DZFyCbd5vHpL4XhHm/mYMLTfmVyf5QgcTBbQHeUTyFE90svmV+F82+3rX0lFkR13xS6nlxPh1UTotbaztJ7DzP7EkiNoDS+y++lNa0hY2Zc9XfQnAQz6qQBcCFVEGge9/pi/TiDcN5tOxJ9aTJ/zsnds7OkJ3oFrdE3czsk05vXH/30fKhKLdOm/sK2zC+kau2NnPnucEZ5aWHyWtf1HnyMB1nBgGYw0NWtDcbF+jzpiC/JXgZBgB4KeUNH1m7y8JRUu0hWINJzDg2A/nEO27D1uNi5GjhwEFdbjF1go+SNsUycWF2t3QRSwKVBl672+/qvM6ZCihue9SutD3zhKhhNfFcB+lcV98UuZRyh3XUb2HCYoxwld0+DmmjXQGMJxomk5r8KFMg4TAwtfwse8IssEnPF8AwzhTCnRMXLFgX//KTI7HumZ8Rvkwww23pkhlIEbfEQTpyE4KBJLRBN2rSWBRh7yJD/iLjpKzZdn9vosf5HIaohuPnod9JmMZXerNjiJscFnO62wdrIdLUQvVkuA+FXqhstyFau3z3bOTcL2TrXDsd/gRY+fGgJaYQHCiGDUMclzH/t2d2TPpc9Zv4McZu+5dnTGsSfPyV7CMY3igeUcIyyAHnHhqpIcCHPD3FnKJRCmxoxtxdHb9Ftqgzsynv/t6UAga/HkmO9LVCHzXaKgAzNjnUuQpBOkaC5vBbMIsNDabRGQseX2ItlGFMaLQA6tsxgBGVwZMQIzqlxpBG5kqc4I5LjaexHAseVQPdDzWhz8tH8kInriiDY9aDLoGtHruGf0LiwBeHdxTSmws+51vlTE8LLtDlDXDmO7N7x1PdwUuy9J0Ym1D6iZWPYN8PbFdhs9KSmaZphzciG0hny4wnGANqM+PY8x9U0Q77UQQu0aHOObRnumQUFW984gCMLt2IYdAbud2oDJcHkulWKCnTFdVnrdMrpg7mjXk3r9/aacj0P3SUlcRf+cDkKn7vViVCtnyg/PMpF5sIhjQgfH7r2O2HPsGGubjSvXxanE9ovsiQ7YEh64NU68HQKn7LoP2xym3c/+fiPTrxvCRIY0VNIDSYwY5ntEqA4fr6nhkraM0Nf7SGMU2lZ+Wv/LKCrII9JKlb7AKWvaencXvK6N4UZ0oarI9sqYICBzRXVwhdWtFXgDOwJFvSOXoDPtWF3WKSkgiaknEyUa0tJWqZwL3+PKDqxNJ97rob9OTfLIY5y0W7+ElG/71JJAEJ8VJDOkx9lKKxQE3EmjO9IIFkJ7JFc+gTmFcVNxmt9ohwBe5CWbZTO+fhdgh85/H7UVuBYrU9iwiiybcPuMbZ9XA7M2t+fHDEtzk4teHo4xZ2fSK+KZMElLKM9hWdL1Bf+AoS2BUlar+k1AyiQr64iRYm+EmUv67hbcI4PBKOCHgnRVCR4Q+PjL9ENCxR/1fCxXzQaSaZOEVJJ6qpB5nJGOFihBQw9AKRv6VwHpbuhBbNqGVgWnaOiBZNqGVknac8IeEcWXfsOd0hUV977zsKejA6apKAjd0R8qIr6sn58+7+71pDHlSeOWAkT+eHWwKlsOGQMf1E1nZV+Ud0Us6HnFo0279wjgvNhaigpO9grGQVBnhU6dzNI8PRjyBo/jJERZgJzG9IAdHlBUinQk6D7OqTACx3pFKXkbb26ZJRp8vCbFclnRAfealnQ4vVdn4dVZ8PL/XTgL/hfxXJE8HUw7g9vkfKx1fPVjXv2YKH2/ST/mDjyPedhdEeN5i1tU4cb/+sAUCHlAWnkkbcL9e9f0dgQ14hQXDJBmkANGOWNJBO2uviucYg2TfcVVWayL9OnVmGCNwL8BXM6LqMYmgT+myftp1ruXJpDsFngHcFNUnbhDDKtKue8dVnvIP3eEL/U/G3y6gc+4LGJmtptKxFMDeRsTVA64gJORVDglMRnKWmo7y3ZKx0F0cNaiho9c+vabrdavjt31prMbmprhR+HPEX5DdPolAwWsTGbTVGCouuzWYBt39BTHfQ6FDh062e7GcPYJArISdCP0I/O5RdJ3lWPTQlQXnHNod/NihcOmlHMEvIrKR9LkI49xoyLdXfD2T/03P374E4fXH99aSRJaImqM7oO4bnJDEmvJCrYtgnTaRk1TWokuuMHGCiynT1HepnyBOcrfvV50klrRm5K9q+wTefD6Bo5NV12ZfIjwpKmEB4j/aPCXDoG6Ap/Ig/0I0Vr8dZPQ/yYPEtIs7UIoKiDjw8uGMUVmpjYiJZJ5hslm2fGGibmwWcj5bwk5lHc40h2D4eYRVriU9dp2XESf5oycooLQcgvWot1/maYhUfUTPa4jIaplMlUV4yZoawpRP3PPkQ2pHxMV3AHCYuGG4n6NmA0gR9LooJ/a5f+L+kPyXLshHDwg0ELKUGlIHFjNFkGvmDkUWHx/fMHnsCfDBlRzc64LtwZuFQxTxf/8BD42FxLr/G+t7BwULOtJGQ0AVvOFrOra+wEvFtr7LwRb0DPQonm9Dv5AXmil21N0qOHTdhUa0rx8U/xMmvcJmmPU5SVbZFIlvFUWa012wNN+l8nN7LFRKV6OkEYzzsz7K/P/OtXuBfCBdBwgWNyGhKwGQPMrm/vcP7E9S7d7dKZQvnD1mJR3MewF+iJArPskB4YS1I+qzu1rucOC0eMtGKeaKUK/e8K+6wS5b4iu5fZEWrobj+CXMOMVq2+Fr3KUIoMHO6/Nn5o8gJ76xUQNS9xv4sAdHbd/awftYw5DxJn63vyLTfkVQmgAualSyCHrkafJvKoDb9Odl7iQMD8g3zW9gaTJC1pawOF1Xs7oCmfVrDlDl+NLAaTo1EpVgd2PrR5Z7WUu4eFbTAKUD6n6Ms+yStWMk2WrNSpOFi1Km5wsU5amOVmoVnDlZNlq2ZQXEH22TKWWy8GSlaB4L0nEUpOFAA6KAI7wlioD+6fQs0Zx4V5DStLZHjaZxZjuKC8btSWkShfYIdUxVne/VZfE8sE1tH6CTyFVx1oEP98N518Q/BxKOr8nYwvidZjFPS8Qnkv8H/Jgwbn3fCbdQRu2e2rC1/HyOl7SjBfW0Uf8aMi7eh0yL8/5dcicTdw2ZFQX71pkBakquTtK6eaNsK5UlZd+wGNHNFAszd+ZjsZOcknXK447j99TvGyJNb1pmqJQ6apCSZjOAQP8b7hCgD0zjmqHmGDjnRboo2jX/a0YWb4X8ZMInKKIJF9m3OgSdvdvJ/8Up9SBN/VA3MU1imCZHQMtYrYJsCcDpJvrirY7pCdUBJY5fLxmf1wmouoybrC65JeKwLU5Joo2HgkWrraDRcGzjsEryvYWhzXb0s/Ux1aTcWHZl45wmBlT4AMZg8XNAi+Si3oIfVUgqmDLUJm3iGJS+gdDoD5gkbSmXC0/SoQiwdp3AuH1E8njuxFpOCVV7mtXX6q2jlrh2pHauw1TDs3tmMuU16LtsnVQ2hGMDglC81u/euQtIekqphyWWyifl07gs1P0CRXcM7wCvM8LoQXKxdsovw+Fr8iekfVdKeq+8/ldqXpsgsd96Sr82e9X1WUqWt5SxFhHU99kPMallTn2WUHqB9ygMi8IoSVuIO81gU2ZD4WtT8stENv2Uahsl+0PMWyXfKq6qtFfQGdN/KmKU9RWuIAvoPMo+eR2vpNRNg/781t+lP1CKp/c8KNYWJ0WjJR97USBYiN30GN6kx3lRbysRU3ZDxoOmbrfMa+EjoDzP02A/xQ7XYgbBiAYfgH6X6hI6qnTlsuADFGei8IkRgch/rr8RwEJ1pDzZhYTirn51Yt4eb+Y4CbHVpRwN0raURI+A38jFKAnWLdVr1DH39ewbZeJ/1q0BTe59A9Dn0Lx1yXAtbhbIWBXPVS8/LGnSwqAofPs6mOHPXIiCq1gJsvm+B88kdd9UhlfKwohmLjfWklZAOU3cbUGchQim6KKFHJpF1eWm7SPYdyGBxLEdR/RZb5CNgpFJbhQUocRS/qCSRAt8JGDG5QdCD3BggMGawREcj7gN9gYjSdKSxWkbiHHD7jC/Bm0HW0Js6UAyEkoX5TiALs2YYZW9JlMCUt2K1uvP/5XAAAA//8zjyyF" } diff --git a/metricbeat/module/elasticsearch/index/_meta/fields.yml b/metricbeat/module/elasticsearch/index/_meta/fields.yml index b1a4ed1898a3..ac4be51c415f 100644 --- a/metricbeat/module/elasticsearch/index/_meta/fields.yml +++ b/metricbeat/module/elasticsearch/index/_meta/fields.yml @@ -4,8 +4,6 @@ index release: ga fields: - - name: created - type: long - name: hidden type: boolean - name: shards From d4f5d9fd6080d02a7782342546fe6243275427e4 Mon Sep 17 00:00:00 2001 From: Anderson Queiroz Date: Mon, 15 Jan 2024 12:25:24 +0100 Subject: [PATCH 047/129] Disable Go Workspaces when executing 'go list' in packaging process (#37579) running 'go list' with Go worspaces enabled will report all modules listed on go.work, what would make GetModuleName to fail as it requires 'go list' to return only one module, the agent itself. --- dev-tools/mage/gotool/go.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/dev-tools/mage/gotool/go.go b/dev-tools/mage/gotool/go.go index bb7066a4f3e5..e507282c15ce 100644 --- a/dev-tools/mage/gotool/go.go +++ b/dev-tools/mage/gotool/go.go @@ -58,12 +58,19 @@ var Test goTest = runGoTest // GetModuleName returns the name of the module. func GetModuleName() (string, error) { - lines, err := getLines(callGo(nil, "list", "-m")) + lines, err := getLines(callGo( + // Disabling the Go workspace prevents 'go list' from listing all + // modules within the workspace. + map[string]string{"GOWORK": "off"}, + "list", + "-m")) if err != nil { return "", err } + if len(lines) != 1 { - return "", fmt.Errorf("unexpected number of lines") + return "", fmt.Errorf("expected 'go list -m' to return 1 line, got %d", + len(lines)) } return lines[0], nil } From 10b1f7a03aa09f13375dfc1cfc785b6e3e51ffc7 Mon Sep 17 00:00:00 2001 From: Andrew Gizas Date: Mon, 15 Jan 2024 16:24:33 +0200 Subject: [PATCH 048/129] Conditions on podupdater function of kubernetes autodiscovery (#37431) * first update for nodePodUpdater with key function * first update for namespacePodUpdater * updating elastic-agent-autodiscovery library to v0.6.7 * updating NOTICE.txt * updating kubernetes_test.go interface functions * updating pod_test.go by removing unused functions * updating receiver name in tests * fixing lint events --------- Co-authored-by: Michal Pristas --- CHANGELOG.next.asciidoc | 1 + NOTICE.txt | 4 +- go.mod | 2 +- go.sum | 4 +- .../autodiscover/providers/kubernetes/pod.go | 6 +- .../providers/kubernetes/pod_test.go | 82 ++++++++++++++++--- .../module/kubernetes/util/kubernetes_test.go | 5 ++ 7 files changed, 84 insertions(+), 20 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 2abeafaff675..2114812d5365 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -140,6 +140,7 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d - Upgrade to Go 1.21.6. {pull}37615[37615] - The Elasticsearch output can now configure performance presets with the `preset` configuration field. {pull}37259[37259] - Upgrade to elastic-agent-libs v0.7.3 and golang.org/x/crypto v0.17.0. {pull}37544[37544] +- Make more selective the Pod autodiscovery upon node and namespace update events. {issue}37338[37338] {pull}37431[37431] *Auditbeat* diff --git a/NOTICE.txt b/NOTICE.txt index d258999b356b..207dc8035f33 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -12257,11 +12257,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-autodiscover -Version: v0.6.6 +Version: v0.6.7 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-autodiscover@v0.6.6/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-autodiscover@v0.6.7/LICENSE: Apache License Version 2.0, January 2004 diff --git a/go.mod b/go.mod index a9e019c82923..3f326b006a76 100644 --- a/go.mod +++ b/go.mod @@ -200,7 +200,7 @@ require ( github.com/aws/smithy-go v1.13.5 github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20220623125934-28468a6701b5 github.com/elastic/bayeux v1.0.5 - github.com/elastic/elastic-agent-autodiscover v0.6.6 + github.com/elastic/elastic-agent-autodiscover v0.6.7 github.com/elastic/elastic-agent-libs v0.7.3 github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3 github.com/elastic/elastic-agent-system-metrics v0.9.1 diff --git a/go.sum b/go.sum index 716e1a1b4117..3c6faaf866ad 100644 --- a/go.sum +++ b/go.sum @@ -658,8 +658,8 @@ github.com/elastic/bayeux v1.0.5 h1:UceFq01ipmT3S8DzFK+uVAkbCdiPR0Bqei8qIGmUeY0= github.com/elastic/bayeux v1.0.5/go.mod h1:CSI4iP7qeo5MMlkznGvYKftp8M7qqP/3nzmVZoXHY68= github.com/elastic/dhcp v0.0.0-20200227161230-57ec251c7eb3 h1:lnDkqiRFKm0rxdljqrj3lotWinO9+jFmeDXIC4gvIQs= github.com/elastic/dhcp v0.0.0-20200227161230-57ec251c7eb3/go.mod h1:aPqzac6AYkipvp4hufTyMj5PDIphF3+At8zr7r51xjY= -github.com/elastic/elastic-agent-autodiscover v0.6.6 h1:P1y0dDpbhJc7Uw/xe85irPEad4Vljygc+y4iSxtqW7A= -github.com/elastic/elastic-agent-autodiscover v0.6.6/go.mod h1:chulyCAyZb/njMHgzkhC/yWnt8v/Y6eCRUhmFVnsA5o= +github.com/elastic/elastic-agent-autodiscover v0.6.7 h1:+KVjltN0rPsBrU8b156gV4lOTBgG/vt0efFCFARrf3g= +github.com/elastic/elastic-agent-autodiscover v0.6.7/go.mod h1:hFeFqneS2r4jD0/QzGkrNk0YVdN0JGh7lCWdsH7zcI4= github.com/elastic/elastic-agent-client/v7 v7.6.0 h1:FEn6FjzynW4TIQo5G096Tr7xYK/P5LY9cSS6wRbXZTc= github.com/elastic/elastic-agent-client/v7 v7.6.0/go.mod h1:GlUKrbVd/O1CRAZonpBeN3J0RlVqP6VGcrBjFWca+aM= github.com/elastic/elastic-agent-libs v0.7.3 h1:tc6JDXYR+2XFMHJVv+7+M0OwAbZPxm3caLJEd943dlE= diff --git a/libbeat/autodiscover/providers/kubernetes/pod.go b/libbeat/autodiscover/providers/kubernetes/pod.go index 31c7297a106e..d849039a66e4 100644 --- a/libbeat/autodiscover/providers/kubernetes/pod.go +++ b/libbeat/autodiscover/providers/kubernetes/pod.go @@ -153,12 +153,12 @@ func NewPodEventer(uuid uuid.UUID, cfg *conf.C, client k8s.Interface, publish fu watcher.AddEventHandler(p) if nodeWatcher != nil && (config.Hints.Enabled() || metaConf.Node.Enabled()) { - updater := kubernetes.NewNodePodUpdater(p.unlockedUpdate, watcher.Store(), &p.crossUpdate) + updater := kubernetes.NewNodePodUpdater(p.unlockedUpdate, watcher.Store(), p.nodeWatcher, &p.crossUpdate) nodeWatcher.AddEventHandler(updater) } if namespaceWatcher != nil && (config.Hints.Enabled() || metaConf.Namespace.Enabled()) { - updater := kubernetes.NewNamespacePodUpdater(p.unlockedUpdate, watcher.Store(), &p.crossUpdate) + updater := kubernetes.NewNamespacePodUpdater(p.unlockedUpdate, watcher.Store(), p.namespaceWatcher, &p.crossUpdate) namespaceWatcher.AddEventHandler(updater) } @@ -407,7 +407,7 @@ func (p *pod) containerPodEvents(flag string, pod *kubernetes.Pod, c *kubernetes ports = []kubernetes.ContainerPort{{ContainerPort: 0}} } - var events []bus.Event + events := []bus.Event{} portsMap := mapstr.M{} ShouldPut(meta, "container", cmeta, p.logger) diff --git a/libbeat/autodiscover/providers/kubernetes/pod_test.go b/libbeat/autodiscover/providers/kubernetes/pod_test.go index 4704dc6b8c75..1718dbe07529 100644 --- a/libbeat/autodiscover/providers/kubernetes/pod_test.go +++ b/libbeat/autodiscover/providers/kubernetes/pod_test.go @@ -26,9 +26,13 @@ import ( "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" k8sfake "k8s.io/client-go/kubernetes/fake" + interfaces "k8s.io/client-go/kubernetes" + caches "k8s.io/client-go/tools/cache" + "github.com/elastic/beats/v7/libbeat/autodiscover/template" "github.com/elastic/elastic-agent-autodiscover/bus" "github.com/elastic/elastic-agent-autodiscover/kubernetes" @@ -1988,6 +1992,11 @@ func TestNamespacePodUpdater(t *testing.T) { } } + namespace := &kubernetes.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }} + cases := map[string]struct { pods []interface{} expected []interface{} @@ -2014,14 +2023,19 @@ func TestNamespacePodUpdater(t *testing.T) { t.Run(title, func(t *testing.T) { handler := &mockUpdaterHandler{} store := &mockUpdaterStore{objects: c.pods} - updater := kubernetes.NewNamespacePodUpdater(handler.OnUpdate, store, &sync.Mutex{}) - - namespace := &kubernetes.Namespace{ + //We simulate an update on the namespace with the addition of one label + namespace1 := &kubernetes.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", - }, - } - updater.OnUpdate(namespace) + Labels: map[string]string{ + "beta.kubernetes.io/arch": "arm64", + }, + }} + + watcher := &mockUpdaterWatcher{cachedObject: namespace} + updater := kubernetes.NewNamespacePodUpdater(handler.OnUpdate, store, watcher, &sync.Mutex{}) + + updater.OnUpdate(namespace1) assert.EqualValues(t, c.expected, handler.objects) }) @@ -2040,8 +2054,15 @@ func TestNodePodUpdater(t *testing.T) { } } + node := &kubernetes.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + } + cases := map[string]struct { - pods []interface{} + pods []interface{} + expected []interface{} }{ "no pods": {}, @@ -2066,14 +2087,21 @@ func TestNodePodUpdater(t *testing.T) { t.Run(title, func(t *testing.T) { handler := &mockUpdaterHandler{} store := &mockUpdaterStore{objects: c.pods} - updater := kubernetes.NewNodePodUpdater(handler.OnUpdate, store, &sync.Mutex{}) - node := &kubernetes.Node{ + //We simulate an update on the node with the addition of one label + node1 := &kubernetes.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", - }, - } - updater.OnUpdate(node) + Annotations: map[string]string{ + "beta.kubernetes.io/arch": "arm64", + }, + }} + + watcher := &mockUpdaterWatcher{cachedObject: node} + updater := kubernetes.NewNodePodUpdater(handler.OnUpdate, store, watcher, &sync.Mutex{}) + + //This is when the update happens. + updater.OnUpdate(node1) assert.EqualValues(t, c.expected, handler.objects) }) @@ -2092,6 +2120,36 @@ type mockUpdaterStore struct { objects []interface{} } +var store caches.Store +var client interfaces.Interface +var err error + +type mockUpdaterWatcher struct { + cachedObject runtime.Object +} + +func (s *mockUpdaterWatcher) CachedObject() runtime.Object { + return s.cachedObject +} + +func (s *mockUpdaterWatcher) Client() interfaces.Interface { + return client +} + +func (s *mockUpdaterWatcher) Start() error { + return err +} + +func (s *mockUpdaterWatcher) Stop() { +} + +func (s *mockUpdaterWatcher) Store() caches.Store { + return store +} + +func (s *mockUpdaterWatcher) AddEventHandler(kubernetes.ResourceEventHandler) { +} + func (s *mockUpdaterStore) List() []interface{} { return s.objects } diff --git a/metricbeat/module/kubernetes/util/kubernetes_test.go b/metricbeat/module/kubernetes/util/kubernetes_test.go index 2d0d3e461139..92d60b28b2d2 100644 --- a/metricbeat/module/kubernetes/util/kubernetes_test.go +++ b/metricbeat/module/kubernetes/util/kubernetes_test.go @@ -28,6 +28,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "github.com/elastic/elastic-agent-autodiscover/kubernetes" @@ -180,3 +181,7 @@ func (m *mockWatcher) Store() cache.Store { func (m *mockWatcher) Client() k8s.Interface { return nil } + +func (m *mockWatcher) CachedObject() runtime.Object { + return nil +} From 3e05c2abf8352858506cb34bf7c79681adcebdc8 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 15 Jan 2024 12:32:40 -0500 Subject: [PATCH 049/129] chore: Update snapshot.yml (#37637) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Made with ❤️️ by updatecli Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 9798bd2a05cb..df64c5001258 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-9yo2ylny-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-07re5v7e-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.13.0-9yo2ylny-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.13.0-07re5v7e-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.13.0-9yo2ylny-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.13.0-07re5v7e-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From b3a03e99baf5e8fee6bfb069e458b301b178f3e7 Mon Sep 17 00:00:00 2001 From: Maurizio Branca Date: Tue, 16 Jan 2024 15:10:53 +0100 Subject: [PATCH 050/129] Update CODEOWNERS to update cloud metrics ownership (#37581) * Update CODEOWNER to update cloud metrics ownership Explicitly assign the ownership of AWS and Azure metricsets to the owning team. As a side-effect of a recent change[^1], AWS and Azure metricsets were assigned to the @elastic/elastic-agent-data-plane. Previously, /x-pack/metricbeat/module/ was assigned to @elastic/integrations which allowed our team to work on this module independently. [^1]: https://github.com/elastic/beats/commit/8fe2f53bf1613bf6c0566a7e8e7afda785c51770#diff-3d36a1bf06148bc6ba1ce2ed3d19de32ea708d955fed212c0d27c536f0bd4da7L180 * Add CODEOWNERS entry for GCP metricsets Since we assigned the ownership of the GCP metricsets across three different teams, I am considering adding: - shared ownership for all files at `/x-pack/metricbeat/module/gcp` - specific team ownership for each metricset --- .github/CODEOWNERS | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 61fa4b04bcdc..140ccf9d73f7 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -177,11 +177,34 @@ CHANGELOG* /x-pack/metricbeat/docs/ # Listed without an owner to avoid maintaining doc ownership for each input and module. /x-pack/metricbeat/module/activemq @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/airflow @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/aws @elastic/obs-ds-hosted-services +/x-pack/metricbeat/module/awsfargate @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/azure @elastic/obs-ds-hosted-services +/x-pack/metricbeat/module/azure/billing @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/cloudfoundry @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/cockroachdb @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/containerd/ @elastic/obs-cloudnative-monitoring /x-pack/metricbeat/module/coredns @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/enterprisesearch @elastic/ent-search-application-backend +/x-pack/metricbeat/module/gcp @elastic/obs-ds-hosted-services @elastic/obs-infraobs-integrations @elastic/security-external-integrations +/x-pack/metricbeat/module/gcp/billing @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/gcp/cloudrun_metrics @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/gcp/cloudsql_mysql @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/gcp/cloudsql_postgressql @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/gcp/cloudsql_sqlserver @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/gcp/carbon @elastic/obs-ds-hosted-services +/x-pack/metricbeat/module/gcp/compute @elastic/obs-ds-hosted-services +/x-pack/metricbeat/module/gcp/dataproc @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/gcp/dns @elastic/security-external-integrations +/x-pack/metricbeat/module/gcp/firestore @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/gcp/firewall @elastic/security-external-integrations +/x-pack/metricbeat/module/gcp/gke @elastic/obs-ds-hosted-services +/x-pack/metricbeat/module/gcp/loadbalancing_logs @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/gcp/loadbalancing_metrics @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/gcp/pubsub @elastic/obs-ds-hosted-services +/x-pack/metricbeat/module/gcp/redis @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/gcp/storage @elastic/obs-ds-hosted-services +/x-pack/metricbeat/module/gcp/vpcflow @elastic/security-external-integrations /x-pack/metricbeat/module/ibmmq @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/iis @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/istio/ @elastic/obs-cloudnative-monitoring From 1554d7d3cd919ccf209e544ddef1789192173d27 Mon Sep 17 00:00:00 2001 From: Maurizio Branca Date: Tue, 16 Jan 2024 17:26:30 +0100 Subject: [PATCH 051/129] Fix value mapping for min and max aggregation type in Azure Monitor (#37643) * Fix value mapping for min and max aggregation type Metric values for `min` and `max` aggregation types were picked up from the wrong place. --------- Co-authored-by: Andrew Gizas --- CHANGELOG.next.asciidoc | 1 + x-pack/metricbeat/module/azure/data.go | 4 +- x-pack/metricbeat/module/azure/data_test.go | 116 ++++++++++++++++++++ 3 files changed, 119 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 2114812d5365..b16313aaadb4 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -104,6 +104,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Fix EC2 host.cpu.usage {pull}35717[35717] - Add option in SQL module to execute queries for all dbs. {pull}35688[35688] - Add remaining dimensions for azure storage account to make them available for tsdb enablement. {pull}36331[36331] +- Fix Azure Resource Metrics missing metrics (min and max aggregations) after upgrade to 8.11.3 {issue}37642[37642] {pull}37643[37643] *Osquerybeat* diff --git a/x-pack/metricbeat/module/azure/data.go b/x-pack/metricbeat/module/azure/data.go index b39a99d480da..c46aee9da246 100644 --- a/x-pack/metricbeat/module/azure/data.go +++ b/x-pack/metricbeat/module/azure/data.go @@ -142,10 +142,10 @@ func mapToKeyValuePoints(metrics []Metric) []KeyValuePoint { switch { case value.min != nil: point.Key = fmt.Sprintf("%s.%s", metricName, "min") - point.Value = value.avg + point.Value = value.min case value.max != nil: point.Key = fmt.Sprintf("%s.%s", metricName, "max") - point.Value = value.avg + point.Value = value.max case value.avg != nil: point.Key = fmt.Sprintf("%s.%s", metricName, "avg") point.Value = value.avg diff --git a/x-pack/metricbeat/module/azure/data_test.go b/x-pack/metricbeat/module/azure/data_test.go index 8ea3144a0a71..85b781ed64ec 100644 --- a/x-pack/metricbeat/module/azure/data_test.go +++ b/x-pack/metricbeat/module/azure/data_test.go @@ -5,7 +5,9 @@ package azure import ( + "fmt" "testing" + "time" "github.com/stretchr/testify/assert" ) @@ -46,3 +48,117 @@ func TestManagePropertyName(t *testing.T) { result = managePropertyName("Percentage CPU") assert.Equal(t, result, "percentage_cpu") } + +func TestMapToKeyValuePoints(t *testing.T) { + timestamp := time.Now().UTC() + metricName := "test" + minValue := 4.0 + maxValue := 42.0 + avgValue := 13.0 + totalValue := 46.0 + countValue := 2.0 + namespace := "test" + resourceId := "test" + resourceSubId := "test" + timeGrain := "PT1M" + + t.Run("test aggregation types", func(t *testing.T) { + + metrics := []Metric{{ + Namespace: namespace, + Names: []string{"test"}, + Aggregations: "min", + Values: []MetricValue{{name: metricName, min: &minValue, timestamp: timestamp}}, + TimeGrain: timeGrain, + ResourceId: resourceId, + ResourceSubId: resourceSubId, + }, { + Namespace: namespace, + Names: []string{"test"}, + Aggregations: "max", + Values: []MetricValue{{name: metricName, max: &maxValue, timestamp: timestamp}}, + TimeGrain: timeGrain, + ResourceId: resourceId, + ResourceSubId: resourceSubId, + }, { + Namespace: namespace, + Names: []string{"test"}, + Aggregations: "avg", + Values: []MetricValue{{name: metricName, avg: &avgValue, timestamp: timestamp}}, + TimeGrain: timeGrain, + ResourceId: resourceId, + ResourceSubId: resourceSubId, + }, { + Namespace: namespace, + Names: []string{"test"}, + Aggregations: "total", + Values: []MetricValue{{name: metricName, total: &totalValue, timestamp: timestamp}}, + TimeGrain: timeGrain, + ResourceId: resourceId, + ResourceSubId: resourceSubId, + }, { + Namespace: namespace, + Names: []string{"test"}, + Aggregations: "count", + Values: []MetricValue{{name: metricName, count: &countValue, timestamp: timestamp}}, + TimeGrain: timeGrain, + ResourceId: resourceId, + ResourceSubId: resourceSubId, + }} + + actual := mapToKeyValuePoints(metrics) + + expected := []KeyValuePoint{ + { + Key: fmt.Sprintf("%s.%s", metricName, "min"), + Value: &minValue, + Namespace: namespace, + TimeGrain: timeGrain, + Timestamp: timestamp, + ResourceId: resourceId, + ResourceSubId: resourceSubId, + Dimensions: map[string]interface{}{}, + }, { + Key: fmt.Sprintf("%s.%s", metricName, "max"), + Value: &maxValue, + Namespace: namespace, + TimeGrain: timeGrain, + Timestamp: timestamp, + ResourceId: resourceId, + ResourceSubId: resourceSubId, + Dimensions: map[string]interface{}{}, + }, { + Key: fmt.Sprintf("%s.%s", metricName, "avg"), + Value: &avgValue, + Namespace: namespace, + TimeGrain: timeGrain, + Timestamp: timestamp, + ResourceId: resourceId, + ResourceSubId: resourceSubId, + Dimensions: map[string]interface{}{}, + }, + { + Key: fmt.Sprintf("%s.%s", metricName, "total"), + Value: &totalValue, + Namespace: namespace, + TimeGrain: timeGrain, + Timestamp: timestamp, + ResourceId: resourceId, + ResourceSubId: resourceSubId, + Dimensions: map[string]interface{}{}, + }, + { + Key: fmt.Sprintf("%s.%s", metricName, "count"), + Value: &countValue, + Namespace: namespace, + TimeGrain: timeGrain, + Timestamp: timestamp, + ResourceId: resourceId, + ResourceSubId: resourceSubId, + Dimensions: map[string]interface{}{}, + }, + } + + assert.Equal(t, expected, actual) + }) +} From fb967917ff3be17d4cd10e247d93b73d6b8d346c Mon Sep 17 00:00:00 2001 From: Henrik Nordvik Date: Tue, 16 Jan 2024 10:08:59 -0800 Subject: [PATCH 052/129] Collect total_data_set_size_in_bytes from elasticsearch module (#37457) * Collect total_data_set_size_in_bytes from elasticsearch module Some APIs return total_data_set_size_in_bytes, which is usually the same as size_in_bytes, except when using partially mounted indices/searchable snapshots. For those the size_in_bytes returns zero, while total_data_set_size_in_bytes is the total shard size of the backing index on object storage. We want to collect this to see how much storage each index is using on object storage. Marking it as optional because older versions of ES don't return the field. --- metricbeat/docs/fields.asciidoc | 62 ++++++++++++++++++- .../cluster_stats/_meta/data.json | 2 +- .../cluster_stats/_meta/fields.yml | 2 + .../elasticsearch/cluster_stats/data.go | 3 +- metricbeat/module/elasticsearch/fields.go | 2 +- .../elasticsearch/index/_meta/data.json | 8 ++- .../elasticsearch/index/_meta/fields.yml | 7 +++ metricbeat/module/elasticsearch/index/data.go | 6 +- .../index_summary/_meta/fields.yml | 10 +++ .../elasticsearch/index_summary/data.go | 3 + .../elasticsearch/node_stats/_meta/fields.yml | 6 +- .../module/elasticsearch/node_stats/data.go | 3 + 12 files changed, 104 insertions(+), 10 deletions(-) diff --git a/metricbeat/docs/fields.asciidoc b/metricbeat/docs/fields.asciidoc index c7f85732c9c6..d96172d0bfca 100644 --- a/metricbeat/docs/fields.asciidoc +++ b/metricbeat/docs/fields.asciidoc @@ -30458,6 +30458,13 @@ type: long -- +*`elasticsearch.cluster.stats.indices.store.total_data_set_size.bytes`*:: ++ +-- +type: long + +-- + *`elasticsearch.cluster.stats.indices.total`*:: + -- @@ -30790,6 +30797,13 @@ type: long -- +*`elasticsearch.index.primaries.store.total_data_set_size_in_bytes`*:: ++ +-- +type: long + +-- + *`elasticsearch.index.primaries.docs.count`*:: + -- @@ -30952,6 +30966,18 @@ type: long Total size of the index in bytes. +type: long + +format: bytes + +-- + +*`elasticsearch.index.total.store.total_data_set_size_in_bytes`*:: ++ +-- +Total size of the index in bytes including backing data for partially mounted indices. + + type: long format: bytes @@ -31482,6 +31508,18 @@ type: long Total size of the index in bytes. +type: long + +format: bytes + +-- + +*`elasticsearch.index.summary.primaries.store.total_data_set_size.bytes`*:: ++ +-- +Total size of the index in bytes including backing data for partially mounted indices. + + type: long format: bytes @@ -31605,6 +31643,18 @@ type: long Total size of the index in bytes. +type: long + +format: bytes + +-- + +*`elasticsearch.index.summary.total.store.total_data_set_size.bytes`*:: ++ +-- +Total size of the index in bytes including backing data for partially mounted indices. + + type: long format: bytes @@ -32104,7 +32154,17 @@ format: bytes *`elasticsearch.node.stats.indices.store.size.bytes`*:: + -- -Total size of the store in bytes. +Total size of all shards assigned to this node in bytes. + + +type: long + +-- + +*`elasticsearch.node.stats.indices.store.total_data_set_size.bytes`*:: ++ +-- +Total size of shards in bytes assigned to this node including backing data for partially mounted indices. type: long diff --git a/metricbeat/module/elasticsearch/cluster_stats/_meta/data.json b/metricbeat/module/elasticsearch/cluster_stats/_meta/data.json index 82b913b68071..2c4cc3a185a4 100644 --- a/metricbeat/module/elasticsearch/cluster_stats/_meta/data.json +++ b/metricbeat/module/elasticsearch/cluster_stats/_meta/data.json @@ -143,4 +143,4 @@ "host": { "name": "host.example.com" } - } \ No newline at end of file + } diff --git a/metricbeat/module/elasticsearch/cluster_stats/_meta/fields.yml b/metricbeat/module/elasticsearch/cluster_stats/_meta/fields.yml index b3dc966464e1..9ce475e06b10 100644 --- a/metricbeat/module/elasticsearch/cluster_stats/_meta/fields.yml +++ b/metricbeat/module/elasticsearch/cluster_stats/_meta/fields.yml @@ -55,6 +55,8 @@ fields: - name: store.size.bytes type: long + - name: store.total_data_set_size.bytes + type: long - name: total type: long description: > diff --git a/metricbeat/module/elasticsearch/cluster_stats/data.go b/metricbeat/module/elasticsearch/cluster_stats/data.go index 2853dd3466fa..4fe1d03f6d73 100644 --- a/metricbeat/module/elasticsearch/cluster_stats/data.go +++ b/metricbeat/module/elasticsearch/cluster_stats/data.go @@ -76,7 +76,8 @@ var ( "primaries": c.Int("primaries"), }), "store": c.Dict("store", s.Schema{ - "size": s.Object{"bytes": c.Int("size_in_bytes")}, + "size": s.Object{"bytes": c.Int("size_in_bytes")}, + "total_data_set_size": s.Object{"bytes": c.Int("total_data_set_size_in_bytes", s.Optional)}, }), "fielddata": c.Dict("fielddata", s.Schema{ "memory": s.Object{ diff --git a/metricbeat/module/elasticsearch/fields.go b/metricbeat/module/elasticsearch/fields.go index 38430958f39c..357fea6c3152 100644 --- a/metricbeat/module/elasticsearch/fields.go +++ b/metricbeat/module/elasticsearch/fields.go @@ -32,5 +32,5 @@ func init() { // AssetElasticsearch returns asset data. // This is the base64 encoded zlib format compressed contents of module/elasticsearch. func AssetElasticsearch() string { - return "eJzsXVuP3biRfvevIPw0AWwt8moEkwWyya4XmMEgM9mXxUJhSzzn0JZEmaTa3fvrA5G6kBKvEqU+9rSfZrpbX31VvBWLxeJ78Bk9fwCogozjgiFIi9sbADjmFfoA3v5V/fnbNwCUiBUUtxyT5gP48Q0AAGh/A2pSdhV6AwBFFYIMfQBX+AYAhjjHzZV9AP/7lrHq7Tvw9sZ5+/b/+t/dCOV5QZoLvn4AF1ix/vsLRlXJPggR70EDa/QBFFXHOKL5iJYNP8hqxGEJOcxKzNoKPuf934tPAeDPLfrQq/mV0FKDw02JnnKKCvKI6LP251dKunb4icpE/ZzdIC1ZxjikPOe4Rjlu8hpXFWbT3454sMJQ/WkL+W1h9kzQyUY6Cm5WM7tw0h4ie4B1iOaEw+oA2TPuKHwSzGHxOWccchbdWLCtswvpmnITxbGfCdmZ4JGtEUdZT23/+6KgGWrgQ4XSybQjr2XDR4ir/o8OkK5jj7IrXKCGofiBxCHvtvUdneZAIFsAjnJ62IRSJjhtYERr31Jcw2n6iSMmJGZLBNWu2xSWuPr32qy5Y6ArU/MM2pByG9P+wwyvx4HaFlt0b7r6AVHz6rNpAsJNiQu07uXqt6bvNQaka7j2G5tiNuX0njxwklOuUaI61x8geIDX9VL7RAJ7JScveGXrthilfnqsjdKWzG3sVawaPuVda11k/erEqPTpsc5mgerSv6KF6uyGYJt3DJU9s4dnjg4mhmpCn4XUrJeamUWuGPYKnU6whk8KP9MEEr9KCkn5DbJbmgWdo8wAOUp7RJRh0iQTtcSbO7gwyeb53yTLhKmtiXnX4UROGZfexgIyuWejAE3eDK4R47Bu39igJezbf5/+8q2xOyrMbRhmaljfTjHS0QKpZg/v3JsbxLb+a15GNOD09TSnkwe57mf9f0Xaq676r5bmmiEvhKICMs6G/1cXrCgJdiB9y7vZg9Ecv0AXz2x83fvFOxd6xglFGcP/j2xzfdyCL9WYuGU+/JFHSQqTZ7BXvAV20h5da9TwIyQ7oEfpFF0oYjdfPGA/l2BBsw9Ar6N3mx/XOQLFaMMIN9dkPqIc0yYv161bqH4j4cwlaEEmvcPqY+WWOC2aN0o4r9CpDH1CJ3ILy8bPg186RJ/zAhY3NPijyfu9IJlFCRrZCeYiTHsotwgx8xz2pUOMn2G5SFGnzGWSWeQ8dvC8P1orcs4/xhOQZEK9gG9khpdK3dPsbmZ0LzP7gl2IwNlDGw7W1jy29Ac5+x7TH4YfuUQsaBxrb51PiKmlx5rM2BzRmuXDRJ00kqSrOTjaYeKm7RPBDT+RXaA8V8A6IRszvLIZyx9h1aET7RMhc47rndq/wsRpy2mZyzFyHsk4sbN/+YTK/AHznCF+Htk4seq0kj+ighN68uwSLHURC85r2J7HNEao7p58pZgjeh7TKKnK+cQ57JaCtHBgUdAcdpzkF1JV5OvGwKA8K83JJb9AXPXjVqLZjjyDwt8FzRRmmUTOdGTbweGSD0U14SjXzl/yYSOWlJ5TkJct64oCMXbpqiMsOKCHmVD+EaLZZEFY7rfYZInJXLBUCKi9cltPFOfJ+daznYJmk96rLACgbX3VZt5hDiPMlE6DYIlovj3fopchQTIdZNnKO2VMRjNLGfS4VuQBVnlxQ8Vn4Ubu1ckOuJBcw6ecoS95Q/aKNCCtbJlOz8mufk0n6Ql0ncQ6tO0/GucDVO62rhNtlEk6zjhsStxcU89HCvRyUrIxEOv9QRQEtoWDlPvQXS79otEiCnnvJi23QnEsVNBsAg1hYIuG7ZDfQy6yOwzdvG37VtiVpqD3dTPgSvKYe5xOtBVxJVtAo4SibYDaiYQcbCL0siPLt5erpPaKUWZILlYkDssjekLFIdIVfGOa8+yNJZ5tZmTXZHOm56XKnYY/E7L3d7IZMFBsPxtwlKB7OzBHsWKe2aeoAULvzXJ2S9yF5Rrh7rep1yil47qWqKHnppY+dF1VsrZtaEiJNu4bLmt6Mceu9gzTLQHnKbneFR2wpi4NKUV9Y9U1pM9zrr4lY9IdadB7chJCMkARS2bu3vUiXT6Rta2MXsTUCdjsszMm2ljab2j3cZUJ0YUKjEuH8c9CbDZqOxhNWbJsRxjAG6JT4wWwzNOzFKtpYqpy0k7PVc7aO8imvEchjheSdWxXsoFNm2BLTjclptwDT+K740/OoJI4E9Ig3pMMeVTyhe+UO5C9IbnBdNEibKVw54VsZCSG65aD2/hMjFiGGnKc2VY5aMn6R0C2WISuy+S1zeu3kqF3p6qucwg3K6sl1d2puqbEv+1e8CEJRYnG6yplZ/sM58502sRn0/x2WHaRjUiwas5UmDAjRyXKxBKbwbf2ebAjxSOWrYafgnBkSkIsXxU+Bd30DBOQCs2QiqUmcFMQDM7Ci2UogVNQjE3kimWq4acgHJkfFctXhU9F9yieSQjGJXHF0lTQt5Cd7kRqV+fjN/bXIuHCXFWyexwTs6pKZ/DDhu3DN6hgOh5e/vO096fHOrsW2WyTjFRlNuM7ozkgIPxkoe31SFPxNzqooeRH4s+kW8UddIx7b1WhwTfdrisNNrWs4g4lm1BCqlEEqDsWtljUmAjTa8XHV78jhpCjKkcsoxbRAm3ZF60JtUXotkg7R9RyCEMKD7gxyb6AddF2yfphRWCZw0dE4XUZKnEDu8BVAX9cjpnxn6ftCMuKtssGftfMiuMbtYWJ/Q5PoO1g4ehFe2zVMXhFeQMbsvGkpTeaIJANNDMBmVlPbkIG4rq7pdG2uLD8S0c4zGtc0CQqZ8WFZQIz67aoDLQ9EnSfSKVYvud0b1TBdpjsMCl3rIK6QfqfZQvspOv4rIEoLJOPUXmbQ7tNgwV2Ug2E+zFBO4ffdvIKd+tg9BHXwzLHjEnScEqq3NW3gw0wbP1CMEMHZYVrzF0uyhaCAtTqq8TQkxN4YnpyCo+lN0WjKCkQux+HY7MzNygihlW8G8dvIu2iJWRfTY2HrvqczBZfOtSZvC6PJRRdsp5PJnB2Bf0p+oQKbpy0Y8mMUJtPVa7InCXwQha+In43Bu657Lbv8nrPi1tY3lS8FxuPBah3Wjn9mexeM88Hn3dh5+G3uw0tjrvuyc4yue1ezCzZRFt5kWC3MZM6h9W+5dZUENCOZEMD/owwN6gLeAVuTVAAQRsT+13uMZXXVvbMsSuKTF87nKkpD+SY/HnjPOyGdMGC4HQUkMiG68I3Ae0cmMJzIEdf0ejvYByaClQZLrBrENFT6FhetzFVUVXfMZH/DE+yjP/0p1kGXCEls0o1hLQTyMSlXaKsopxWrsA0SjVWmo5ZoPByebaR9RDu//1MSgQ+/odRzqL5U0jSW14VRkllWXG3SxOYZnGyQrdR3gMhFYJNnLyPDPAbEm0r/kPii///s5lARYrPuquyn8IICobHWQBpJlp/Xnf/Yl05YtkPHTL/Qglj78fxRVFb4UJcrQDLazv6S0zjP1cXt9a3AM5u4bxTOX9akcUaMIX6PEUFAiAMBUDmr3DD0VXRx+6LiHU0mUOyvOvs1Wb1seWqchTQ6sKo82tLv4Bljp4K1JruBkmURjSe5fPVVVCwz9Ob7qMe4lMYbiubRBitDlaW3wg0Zb6K26/fv55KHYzvT1njsFCLX6QbGv5rYO5msBQBiVdPqUGVTLsDB4OzPJbXAibEmKJgwQIWU+AdmsK8xIs6Ssn6gSmwDTyuK/D7cv2/n2GNALkMjC2SZn/WUHXJYZsoJj/BJ1x3NWB9l2kKNBzA9+SmUTq6mgPb5etnOltXhSoHaWODjrU2vqUmHTl7GtVYAs5poTg2Uyv2DSeEga+Y37BsSTc3Z7WU9AxncZIXKsEP45YDlX/oPWsiWE+mlfpcKKnD+6UIXjHcFCgftgIb/OYgzX7DNXoHcANq9g4IiTr7Xjy4IF7c0EqJ1MMqivh/ChlglgHE3ap++Oumv/+pKpivrdJXAOk5JGwt2hWBYi+/FQHirobkhDIFzvxpHq4YwhA92B8xMJvEHypYPlFqV8PGACxie6sn8vxkwCoytXyPLg7D1z983xvfqIuDWL6dqX99qWC/ZFi26atH63xitR6m9qmOgR+uFKHmHXhG/Wh/Bygq/2COCZoo27uCJvPn/lMhEYsYcBbUbyYXhjlL5VjHtPK9r/CPF8M8CzimE03/33r6ylwrTNkvb+NMYZFqDMOGi539Awn0HlX4ih8qFEzAUIhhi/geJljm+nlW917JvlNSl1T7m6kelZZJnv7LHRvgnFczvEHVdckdED42DZFyCbd5vHpL4XhHm/mYMLTfmVyf5QgcTBbQHeUTyFE90svmV+F82+3rX0lFkR13xS6nlxPh1UTotbaztJ7DzP7EkiNoDS+y++lNa0hY2Zc9XfQnAQz6qQBcCFVEGge9/pi/TiDcN5tOxJ9aTJ/zsnds7OkJ3oFrdE3czsk05vXH/30fKhKLdOm/sK2zC+kau2NnPnucEZ5aWHyWtf1HnyMB1nBgGYw0NWtDcbF+jzpiC/JXgZBgB4KeUNH1m7y8JRUu0hWINJzDg2A/nEO27D1uNi5GjhwEFdbjF1go+SNsUycWF2t3QRSwKVBl672+/qvM6ZCihue9SutD3zhKhhNfFcB+lcV98UuZRyh3XUb2HCYoxwld0+DmmjXQGMJxomk5r8KFMg4TAwtfwse8IssEnPF8AwzhTCnRMXLFgX//KTI7HumZ8Rvkwww23pkhlIEbfEQTpyE4KBJLRBN2rSWBRh7yJD/iLjpKzZdn9vosf5HIaohuPnod9JmMZXerNjiJscFnO62wdrIdLUQvVkuA+FXqhstyFau3z3bOTcL2TrXDsd/gRY+fGgJaYQHCiGDUMclzH/t2d2TPpc9Zv4McZu+5dnTGsSfPyV7CMY3igeUcIyyAHnHhqpIcCHPD3FnKJRCmxoxtxdHb9Ftqgzsynv/t6UAga/HkmO9LVCHzXaKgAzNjnUuQpBOkaC5vBbMIsNDabRGQseX2ItlGFMaLQA6tsxgBGVwZMQIzqlxpBG5kqc4I5LjaexHAseVQPdDzWhz8tH8kInriiDY9aDLoGtHruGf0LiwBeHdxTSmws+51vlTE8LLtDlDXDmO7N7x1PdwUuy9J0Ym1D6iZWPYN8PbFdhs9KSmaZphzciG0hny4wnGANqM+PY8x9U0Q77UQQu0aHOObRnumQUFW984gCMLt2IYdAbud2oDJcHkulWKCnTFdVnrdMrpg7mjXk3r9/aacj0P3SUlcRf+cDkKn7vViVCtnyg/PMpF5sIhjQgfH7r2O2HPsGGubjSvXxanE9ovsiQ7YEh64NU68HQKn7LoP2xym3c/+fiPTrxvCRIY0VNIDSYwY5ntEqA4fr6nhkraM0Nf7SGMU2lZ+Wv/LKCrII9JKlb7AKWvaencXvK6N4UZ0oarI9sqYICBzRXVwhdWtFXgDOwJFvSOXoDPtWF3WKSkgiaknEyUa0tJWqZwL3+PKDqxNJ97rob9OTfLIY5y0W7+ElG/71JJAEJ8VJDOkx9lKKxQE3EmjO9IIFkJ7JFc+gTmFcVNxmt9ohwBe5CWbZTO+fhdgh85/H7UVuBYrU9iwiiybcPuMbZ9XA7M2t+fHDEtzk4teHo4xZ2fSK+KZMElLKM9hWdL1Bf+AoS2BUlar+k1AyiQr64iRYm+EmUv67hbcI4PBKOCHgnRVCR4Q+PjL9ENCxR/1fCxXzQaSaZOEVJJ6qpB5nJGOFihBQw9AKRv6VwHpbuhBbNqGVgWnaOiBZNqGVknac8IeEcWXfsOd0hUV977zsKejA6apKAjd0R8qIr6sn58+7+71pDHlSeOWAkT+eHWwKlsOGQMf1E1nZV+Ud0Us6HnFo0279wjgvNhaigpO9grGQVBnhU6dzNI8PRjyBo/jJERZgJzG9IAdHlBUinQk6D7OqTACx3pFKXkbb26ZJRp8vCbFclnRAfealnQ4vVdn4dVZ8PL/XTgL/hfxXJE8HUw7g9vkfKx1fPVjXv2YKH2/ST/mDjyPedhdEeN5i1tU4cb/+sAUCHlAWnkkbcL9e9f0dgQ14hQXDJBmkANGOWNJBO2uviucYg2TfcVVWayL9OnVmGCNwL8BXM6LqMYmgT+myftp1ruXJpDsFngHcFNUnbhDDKtKue8dVnvIP3eEL/U/G3y6gc+4LGJmtptKxFMDeRsTVA64gJORVDglMRnKWmo7y3ZKx0F0cNaiho9c+vabrdavjt31prMbmprhR+HPEX5DdPolAwWsTGbTVGCouuzWYBt39BTHfQ6FDh062e7GcPYJArISdCP0I/O5RdJ3lWPTQlQXnHNod/NihcOmlHMEvIrKR9LkI49xoyLdXfD2T/03P374E4fXH99aSRJaImqM7oO4bnJDEmvJCrYtgnTaRk1TWokuuMHGCiynT1HepnyBOcrfvV50klrRm5K9q+wTefD6Bo5NV12ZfIjwpKmEB4j/aPCXDoG6Ap/Ig/0I0Vr8dZPQ/yYPEtIs7UIoKiDjw8uGMUVmpjYiJZJ5hslm2fGGibmwWcj5bwk5lHc40h2D4eYRVriU9dp2XESf5oycooLQcgvWot1/maYhUfUTPa4jIaplMlUV4yZoawpRP3PPkQ2pHxMV3AHCYuGG4n6NmA0gR9LooJ/a5f+L+kPyXLshHDwg0ELKUGlIHFjNFkGvmDkUWHx/fMHnsCfDBlRzc64LtwZuFQxTxf/8BD42FxLr/G+t7BwULOtJGQ0AVvOFrOra+wEvFtr7LwRb0DPQonm9Dv5AXmil21N0qOHTdhUa0rx8U/xMmvcJmmPU5SVbZFIlvFUWa012wNN+l8nN7LFRKV6OkEYzzsz7K/P/OtXuBfCBdBwgWNyGhKwGQPMrm/vcP7E9S7d7dKZQvnD1mJR3MewF+iJArPskB4YS1I+qzu1rucOC0eMtGKeaKUK/e8K+6wS5b4iu5fZEWrobj+CXMOMVq2+Fr3KUIoMHO6/Nn5o8gJ76xUQNS9xv4sAdHbd/awftYw5DxJn63vyLTfkVQmgAualSyCHrkafJvKoDb9Odl7iQMD8g3zW9gaTJC1pawOF1Xs7oCmfVrDlDl+NLAaTo1EpVgd2PrR5Z7WUu4eFbTAKUD6n6Ms+yStWMk2WrNSpOFi1Km5wsU5amOVmoVnDlZNlq2ZQXEH22TKWWy8GSlaB4L0nEUpOFAA6KAI7wlioD+6fQs0Zx4V5DStLZHjaZxZjuKC8btSWkShfYIdUxVne/VZfE8sE1tH6CTyFVx1oEP98N518Q/BxKOr8nYwvidZjFPS8Qnkv8H/Jgwbn3fCbdQRu2e2rC1/HyOl7SjBfW0Uf8aMi7eh0yL8/5dcicTdw2ZFQX71pkBakquTtK6eaNsK5UlZd+wGNHNFAszd+ZjsZOcknXK447j99TvGyJNb1pmqJQ6apCSZjOAQP8b7hCgD0zjmqHmGDjnRboo2jX/a0YWb4X8ZMInKKIJF9m3OgSdvdvJ/8Up9SBN/VA3MU1imCZHQMtYrYJsCcDpJvrirY7pCdUBJY5fLxmf1wmouoybrC65JeKwLU5Joo2HgkWrraDRcGzjsEryvYWhzXb0s/Ux1aTcWHZl45wmBlT4AMZg8XNAi+Si3oIfVUgqmDLUJm3iGJS+gdDoD5gkbSmXC0/SoQiwdp3AuH1E8njuxFpOCVV7mtXX6q2jlrh2pHauw1TDs3tmMuU16LtsnVQ2hGMDglC81u/euQtIekqphyWWyifl07gs1P0CRXcM7wCvM8LoQXKxdsovw+Fr8iekfVdKeq+8/ldqXpsgsd96Sr82e9X1WUqWt5SxFhHU99kPMallTn2WUHqB9ygMi8IoSVuIO81gU2ZD4WtT8stENv2Uahsl+0PMWyXfKq6qtFfQGdN/KmKU9RWuIAvoPMo+eR2vpNRNg/781t+lP1CKp/c8KNYWJ0WjJR97USBYiN30GN6kx3lRbysRU3ZDxoOmbrfMa+EjoDzP02A/xQ7XYgbBiAYfgH6X6hI6qnTlsuADFGei8IkRgch/rr8RwEJ1pDzZhYTirn51Yt4eb+Y4CbHVpRwN0raURI+A38jFKAnWLdVr1DH39ewbZeJ/1q0BTe59A9Dn0Lx1yXAtbhbIWBXPVS8/LGnSwqAofPs6mOHPXIiCq1gJsvm+B88kdd9UhlfKwohmLjfWklZAOU3cbUGchQim6KKFHJpF1eWm7SPYdyGBxLEdR/RZb5CNgpFJbhQUocRS/qCSRAt8JGDG5QdCD3BggMGawREcj7gN9gYjSdKSxWkbiHHD7jC/Bm0HW0Js6UAyEkoX5TiALs2YYZW9JlMCUt2K1uvP/5XAAAA//8zjyyF" + return "eJzsfV2P3biR9r1/BeGrCWDrRW6NYPIC2WTXC4wxyEz2ZrFQ2BLPObQlUSapdvf++oVIfZASPyVKfey0b5Lpbj31VPGrWCwW34Mv6PkDQBVkHBcMQVrc3gDAMa/QB/D2r+rP374BoESsoLjlmDQfwM9vAABA+xtQk7Kr0BsAKKoQZOgDuMI3ADDEOW6u7AP477eMVW/fgbc3ztu3/9P/7kYozwvSXPD1A7jAivXfXzCqSvZBiHgPGlijD6CoOsYRzUe0bPhBViMOS8hhVmLWVvA57/9efAoAf27Rh17Nb4SWGhxuSvSUU1SQR0SftT+/UtK1w09UJurn7AZpyTLGIeU5xzXKcZPXuKowm/52xIMVhupPW8hvC7Nngk420lFws5rZhZP2ENkDrEM0JxxWB8iecUfhk2AOiy8545Cz6MaCbZ1dSNeUmyiO/UzIzgSPbI04ynpq+98XBc1QAx8qlE6mHXktGz5CXPV/dIB0HXuUXeECNQzFDyQOebet7+g0BwLZAnCU08MmlDLBaQMjWvuW4hpO008cMSExWyKodt2msMTVv9dmzR0DXZmaZ9CGlNuY9h9meD0O1LbYonvT1Q+ImlefTRMQbkpcoHUvV781fa8xIF3Dtd/YFLMpp/fkgZOcco0S1bn+AMEDvK6X2icS2Cs5ecErW7fFKPXzY22UtmRuY69i1fAp71rrIutXJ0alz491NgtUl/4VLVRnNwTbvGOo7Jk9PHN0MDFUE/ospGa91MwscsWwV+h0gjV8UviZJpD4VVJIym+Q3dIs6BxlBshR2iOiDJMmmagl3tzBhUk2z/8mWSZMbU3Muw4ncsq49DYWkMk9GwVo8mZwjRiHdfvGBi1h3/7/6S/fGrujwtyGYaaG9e0UIx0tkGr28M69uUFs67/mZUQDTl9Pczp5kOt+1v+/SHvVVf/V0lwz5IVQVEDG2fDf6oIVJcEOpG95N3swmuMX6OKZja97v3jnQs84oShj+H+Rba6PW/ClGhO3zIc/8ihJYfIM9oq3wE7ao2uNGn6EZAf0KJ2iC0Xs5osH7OcSLGj2Aeh19G7z4zpHoBhtGOHmmsxHlGPa5OW6dQvVbyScuQQtyKR3WH2s3BKnRfNGCecVOpWhT+hEbmHZ+Hnwa4foc17A4oYGfzR5vxcksyhBIzvBXIRpD+UWIWaew752iPEzLBcp6pS5TDKLnMcOnvdHa0XO+cd4ApJMqBfwnczwUql7mt3NjO5lZl+wCxE4e2jDwdqax5b+IGffY/rD8COXiAWNY+2t8wkxtfRYkxmbI1qzfJiok0aSdDUHRztM3LR9IrjhJ7ILlOcKWCdkY4ZXNmP5I6w6dKJ9ImTOcb1T+1eYOG05LXM5Rs4jGSd29i+fUJk/YJ4zxM8jGydWnVbyR1RwQk+eXYKlLmLBeQ3b85jGCNXdk28Uc0TPYxolVTmfOIfdUpAWDiwKmsOOk/xCqop82xgYlGelObnkF4irftxKNNuRZ1D4u6CZwiyTyJmObDs4XPKhqCYc5dr5Sz5sxJLScwrysmVdUSDGLl11hAUH9DATyj9CNJssCMv9FpssMZkLlgoBtVdu64niPDnferZT0GzSe5UFALStr9rMO8xhhJnSaRAsEc2351v0MiRIpoMsW3mnjMloZimDHteKPMAqL26o+CLcyL062QEXkmv4lDP0NW/IXpEGpJUt0+k52dWv6SQ9ga6TWIe2/UfjfIDK3dZ1oo0ySccZh02Jm2vq+UiBXk5KNgZivT+IgsC2cJByH7rLpV80WkQh792k5VYojoUKmk2gIQxs0bAd8nvIRXaHoZu3bd8Ku9IU9L5uBlxJHnOP04m2Iq5kC2iUULQNUDuRkINNhF52ZPn2cpXUXjHKDMnFisRheURPqDhEuoJvTHOevbHEs82M7JpszvS8VLnT8GdC9v5ONgMGiu1nA44SdG8H5ihWzDP7FDVA6L1Zzm6Ju7BcI9z9NvUapXRc1xI19NzU0oeuq0rWtg0NKdHGfcNlTS/m2NWeYbol4Dwl17uiA9bUpSGlqG+suob0ec7Vt2RMuiMNek9OQkgGKGLJzN27XqTLJ7K2ldGLmDoBm312xkQbS/sN7T6uMiG6UIFx6TD+WYjNRm0HoylLlu0IA3hDdGq8AJZ5epZiNU1MVU7a6bnKWXsH2ZT3KMTxQrKO7Uo2sGkTbMnppsSUe+BJfHf8yRlUEmdCGsR7kiGPSr7wnXIHsjckN5guWoStFO68kI2MxHDdcnAbn4kRy1BDjjPbKgctWf8IyBaL0HWZvLZ5/VYy9O5U1XUO4WZltaS6O1XXlPi33Qs+JKEo0Xhdpexsn+HcmU6b+Gya3w7LLrIRCVbNmQoTZuSoRJlYYjP41j4PdqR4xLLV8FMQjkxJiOWrwqegm55hAlKhGVKx1ARuCoLBWXixDCVwCoqxiVyxTDX8FIQj86Ni+arwqegexTMJwbgkrliaCvoWstOdSO3qfPzG/lokXJirSnaPY2JWVekMftiwffgGFUzHw8t/nvb+/Fhn1yKbbZKRqsxmfGc0BwSEnyy0vR5pKv5GBzWU/Ej8mXSruIOOce+tKjT4rtt1pcGmllXcoWQTSkg1igB1x8IWixoTYXqt+Pjqd8QQclTliGXUIlqgLfuiNaG2CN0WaeeIWg5hSOEBNybZF7Au2i5ZP6wILHP4iCi8LkMlbmAXuCrgj8sxM/7ztB1hWdF22cDvmllxfKO2MLHf4Qm0HSwcvWiPrToGryhvYEM2nrT0RhMEsoFmJiAz68lNyEBcd7c02hYXln/tCId5jQuaROWsuLBMYGbdFpWBtkeC7hOpFMv3nO6NKtgOkx0m5Y5VUDdI/7NsgZ10HZ81EIVl8jEqb3Not2mwwE6qgXA/Jmjn8NtOXuFuHYw+4npY5pgxSRpOSZW7+nawAYatXwhm6KCscI25y0XZQlCAWn2VGHpyAk9MT07hsfSmaBQlBWL343BsduYGRcSwinfj+E2kXbSE7Kup8dBVX5LZ4muHOpPX5bGEokvW88kEzq6gP0WfUcGNk3YsmRFq86nKFZmzBF7IwlfE78bAPZfd9l1e73lxC8ubivdi47EA9U4rpz+T3Wvm+eDzLuw8/Ha3ocVx1z3ZWSa33YuZJZtoKy8S7DZmUuew2rfcmgoC2pFsaMCfEeYGdQGvwK0JCiBoY2K/yz2m8trKnjl2RZHpa4czNeWBHJM/b5yH3ZAuWBCcjgIS2XBd+CagnQNTeA7k6Csa/QOMQ1OBKsMFdg0iegody+s2piqq6jsm8p/hSZbxn/40y4ArpGRWqYaQdgKZuLRLlFWU08oVmEapxkrTMQsUXi7PNrIewv2/T6RE4OO/GeUsmj+FJL3lVWGUVJYVd7s0gWkWJyt0G+U9EFIh2MTJ+8gAvyHRtuL/SHzx3382E6hI8UV3VfZTGEHB8DgLIM1E68/r7l+sK0cs+6FD5l8oYez9OL4oaitciKsVYHltR3+Jafzn6uLW+hbA2S2cdyrnTyuyWAOmUJ+nqEAAhKEAyPwVbji6KvrYfRGxjiZzSJZ3nb3arD62XFWOAlpdGHV+bekXsMzRU4Fa090gidKIxrN8vroKCvZ5etN91EN8CsNtZZMIo9XByvIbgabMV3H79cfXU6mD8eMpaxwWavGLdEPDfw3M3QyWIiDx6ik1qJJpd+BgcJbH8lrAhBhTFCxYwGIKvENTmJd4UUcpWT8wBbaBx3UFfl+u//cJ1giQy8DYImn2Zw1Vlxy2iWLyC3zCdVcD1neZpkDDAXxPbhqlo6s5sF2+fqazdVWocpA2NuhYa+N7atKRs6dRjSXgnBaKYzO1Yt9wQhj4hvkNy5Z0c3NWS0nPcBYneaES/DRuOVD5h96zJoL1ZFqpz4WSOrxfiuAVw02B8mErsMFvDtLsd1yjdwA3oGbvgJCos+/FgwvixQ2tlEg9rKKI/7uQAWYZQNyt6oe/bvr7n6qC+doqfQWQnkPC1qJdESj28lsRIO5qSE4oU+DMn+bhiiEM0YP9EQOzSfyhguUTpXY1bAzAIra3eiLPTwasIlPL9+jiMHz9w/e98Y26OIjl25n615cK9kuGZZu+erTOJ1brYWqf6hj46UoRat6BZ9SP9neAovIP5pigibK9K2gyP/WfColYxICzoH4zuTDMWSrHOqaV732Ff7wY5lnAMZ1o+v/e01fmWmHKfnkbZwqLVGMYNlzs7B9IoPeowlf8UKFgAoZCDFvE9zDBMtfPs7r3Svadkrqk2t9M9ai0TPL0X+7YAOe8muENqq5L7oDwsWmIlEu4zePVWwrHO9okgoz09n1HXCbfA2g+dwztyCZfajmkhzYI6N/yTeWoLu5l85vw5u0N5l+aRdUedwkwp9sU4SZF6LW2s7Sew8z+TJUjaA1PvPvpTYtSWB2ZPV30FwEM+rkFXAhVRBpnkeHZ/J3O3nTE/tRi+tyPXle+Q8BMYPB13N7ONObH5//DPlQkFunyiWFbZxfSNXZP0XyYOSM8tbD4Ih8LGJ2YBFjDCWgw0tSsDcXF+oHriD3NXwVCgi0NekJF1+8a85ZUuEhXcdJwsA+CHXsO2bL3uNm4GDmSGlRYj6NhoeQP2U2dWNzU3QVRwKZAla33+vqvMqdDihqe9yqtT5HjKBmOkFUA+90Y900yZR6h3HW72XM6oZxPdE2Dm2vWQGNMyImmJdEKn8w4TAwsfBkk84osM3rGAxMwxEelRMfIFRkE/afI7HikZ8ZvkA8z2HgJh1AGbvARTZyGaKPIVBFN2LWWjBx5apT8zLzoKDXfxtnrs/xFIqsxv/ksd9BnMpbdrdrgJMZGs+20wtrJdlYRvVgtAeJXqRsuy1Xw3z7bOTcJ2zvVDsd+gxc9fmqIkIVFHCOiW8dk433s292Rjpc+Cf4OkqK9B+XRKcyexCl7Tcg0igfWh4ywAHrEhavsciDMDXNnbZhAmBozthVHb9PvqQ3uyHj+x6yjgAwBr5241irPMd+XqELmS09BJ3vGgpwgSedK0Q28pdYiwEKLzEVAxtYFjGQbUcEvAjm0IGQEZHAJxwjMqLqqEbiRNUUjkOOKBEYAx9Zt9UDPa/yFInZT3sUJeIk8CBE9cUSbHjQZdI3oddyLpph87+I+VWBn3evUqYjh9eUdoK6dy3Yve+t6uOlMoCRFJ9Y+oKaM2TfW2xfbbfSkpGiaYU7PhdAa8uGuyQHajPr0PMYcPUG810II9WkQ6W0drZBPHYCboupETOQBFl/6/xXH6hdCQQspx7CqnkHd921UjseRdhsc4/dHe/1BAWz3risIwr1pCDuvd28YAhaE5ZlfikVmxnRZ6XU77oK5ox1lah/ku3LADt0rJnGX/csACF2+1gtyrZzXPzzLrPPBIo5FDRy7/zxi37VjrG02rlxMp3roL7IvPGBbfGB4IPGWEJwSeThsg5x2T/+vG/V/3RQnMqSh7CFIYsQw3yNCdfh4TQ2XtGWEvt4XNaPQtvLT+l9GUUEekVZX9gVOsNMWJ7zgdSETN6ILVUW2lzEFAVlBqoMrrG4tlxzYESjqHbkEnWnH6rJO9wFJTD2ZKNGQlrZK5Vz4XsJ2YG3KJlgP/XXal0ce46Td+iWkfNunluSM+IwrmX0+zlZaVSfgTsjdkaKxENojuXI1zOmhmyoJ/U47BPAi59ssm/H1Iw47dP77qK3AtViZwoZVZNmE22ds+7wamBG7PfdoWJqbXPTycIw585VeEc+ESVpCeQ7Lkq6rMQQMbQmUsrTY7wJSJrBZR4wUeyPMXH95t+AeGQxGAT8VpKtK8IDAx1+nHxIq/qjnY7kXOJBMm4ClktTTsMzjjHS0QAkaegBK2dC/CUh3Qw9i0za0KjhFQw8k0za0StKeb/eIKL70G+6Urqi4pJ+HvfMdME1FQeiO/lC+8mX9/PQ5ja+nrSlPW7dUi/LHqxOcTG44aL1bXRKfsgY+/5yum/nC3CtiQY+BHt0ee89Azgsupqg3Zq+3HQR1VuzYySzNQ5khL0Y5joKUFdhpTA/Y4RFVpaRMgu7jnD8jcKz335K38eaWWaLBx2tSLJcVHXCvuWmH03v1ll69pUh9X72lH9Nb8j9g6Yrl6mDaKewm72ut46sj9+rIRen7XTpyd+B6zcPuihjPW9yiCjf+x0KmUNgD0qqZaRPu37umtyOoEae4YIA0gxwwyhkLjmiVMFwBNWug9BuuymJdU1MvngZrBP4fwOW88mpsEjikmrxfZr17aQLJboF3ysoPq0qpphBWKsw/d4Qv9Z8MTu3AZ1wWMTPbTSXiKVm+jQkqB1zAyUgqnJKYDGXpw51VdqXjIDo4a1HDRy7CY5us1q+O3fWmsxuamuFH4QQSfkN0+iUDBaxMZtNUYKi67NZgG3f0FMd9DoYPHTrZ9s5w+g0C8lJ0I/Qj87lF0neVY9NCVBecc2h382KFw6aUcwS8Ck9fmnzkMe7UpLsL3v6p/+bnD3/i8PrzWytJQktEjec7IK6b3JDEWrKCbYsgnfaR05RWogtusLG+0elTlLcpX2CO8nevF52kVvSmdP8q+0wevL6BY9NVVyYfIjxtLuER8j8a/LVDoK7AZ/JgP0S21mreJPQ/yYOENEu7EIoKyPjwEGlMCaepjUiJZKZpsll2vGNkLhsYkgEgIi9iyKc7CMXNI6xwKash7ijHMM0ZOUUFoeUWrEW7/zpNQyJygx7XkRDVMpmqinETtDWJrJ+558iG1I+JBxcAwmLhhuKG1RBkQtLooJ/a5X+L6l4ys6EhHDwg0ELKUGlIHVnNFkGPDjoUWHx/fH32sBf+BlRzc67rLAduFQxTxX/9Aj42FxLr/G8txB4ULOtJGQ0AVvOFLMLc+wEvFtr7DwRb0DPQonm9Dv5AXmhh6lN0qOHTdhUa0rx8U3wizfsEzTHq8pItMqkS3iqLtSY74CXOy+Rm9tjiZGA0mnFm3v+Qxm9TZWwAH0jHAYLFbUjJawA0P4q7z/0T27N0u0dnEu0L11BKeRvHXv4yAsS6T3JgKEH9qGL6vpY7LBg93oNyqpki9Lsn7LtOkfyO6Fruz6SluzEHYQkzXrL7XvgqRykyeLCzcMKp2RPoqV9M1LDE/WZO3NFx+/d20D4mPkScqe9NQInmBqtqfHQCMoavjYzjiejY6N+cm3cSb97pzQyZVGLTI2WqyVQ255Cl2dN7vSYE3l58Xg5HwlSJfNdMD5LmcWgZEocXPTqjK5xVwOkMXY6vi5GiUyslNnY/E31k6aO5no1vXQ1QPqQE0jzLKiVkTpatFmw5WbSo83OyTFmn6WShWvWhk2WrNYReQPTZMpXCRgdLVs4HekkirJwsGnJQMHSEt5Tc2D+FnjWKC/caUpLO9oLSLMZ0YX/ZqC0hVboYF6mOsbr7lc0klg8uKPcLfAopwdci+OVuOP+K4JdQ0vk9GVsQr8Ms7nk79Vzi/5BnLM799jPpDtqw3VMTvo6X1/GSZrywjj7iR0MK2uuQeXnOr0PmbOK2IaO6eNciK0hVyd1RSjdvhHVl7bz0iz47ooFiaf7BdDR2kku6XnFcasKeSn5LrOnx5BRVe1flesJ0Dhjgf8MVAuyZcVQ7xAQb77RAH0W7rrLFyJpa8pTzFZIvk490Cbv7t5N/igP7wEuLIO4OH0WwzI6BFjHbBNiTAdLNdUXbHdITKgLLHD5esz8uc3J1GTdYXfJLReDaHBNFG48EC1fbwaLgWcfgFWV7KyWbbeln6mOrybiw7GtHOMyMtwECGYPFJQsvkot6CH1VIKpgy1CZt4hiUvoHQ6A+YJG/p9yyP0qEIsHadwLh9RPJ47sRaTglVe5rV1/Wuo5a4dqR5bwNUw7N7ZjL7N+i7bJ1UNoRjA4JQvNbv3rkLSHpqucclmYp37FP4LNT9BkV3DO8ArzPC6EFysVDQf8aCl+RPTnth1LUff31h1L12ASP+9JV+LM/rqrLVLS8pYixjqa+1HmMSyuvG2QFqR9wg8q8IISWuIG81wQ2ZT5UeT8tt0Bs20ehsl22v0qyXfKp6qpGfwGdNfGnKk5RW+ECvoDOo+ST2/lORtk87M9v+VH2C6l8csOPYmF1WjBS9rUTBYqN3EEvS052lHcSsxY1ZT9oOGTqfse8EjoCzv80Af5T7HQhbhiAYPgF6H+hIqmnTlvuRTJEeS5qtBgdhPjKAR8FJFhDzptZTCjm5idg4uX9aoKbHFvxnoFR0o73ETLwN0IBeoJ1W/UKdfx9Ddt2mfivRVtwk0v/MPRdIH+JBlyL+yQCdtVDxTWOPV1SAAydZ1cfO+zFH3EbBTNZQcj/+o+8+ZTK+Fp9DMHE/fBQylowvXABGSKboooUcmkXt7ebtC/D3IbXQkS1U9FlvkE2CkUluFBShxFL+pxPEC3wkYMblB0IPcGCAwZrBERyPuA32BiNJy45FaRuIccPuML8GbQdbQmzpQDISShfVCUBuzZhhlb0mUwJS3YrW68//r8AAAD//5ZWhcA=" } diff --git a/metricbeat/module/elasticsearch/index/_meta/data.json b/metricbeat/module/elasticsearch/index/_meta/data.json index cde504eefb7d..2fa8eeb4f18f 100644 --- a/metricbeat/module/elasticsearch/index/_meta/data.json +++ b/metricbeat/module/elasticsearch/index/_meta/data.json @@ -50,7 +50,8 @@ "query_time_in_millis": 2456214 }, "store": { - "size_in_bytes": 17759832 + "size_in_bytes": 17759832, + "total_data_set_size_in_bytes": 17759832 }, "query_cache": { "memory_size_in_bytes": 21120, @@ -89,7 +90,8 @@ "total_size_in_bytes": 2199683211 }, "store": { - "size_in_bytes": 17759832 + "size_in_bytes": 17759832, + "total_data_set_size_in_bytes": 17759832 }, "indexing": { "index_time_in_millis": 117187, @@ -152,4 +154,4 @@ "host": { "name": "host.example.com" } - } \ No newline at end of file + } diff --git a/metricbeat/module/elasticsearch/index/_meta/fields.yml b/metricbeat/module/elasticsearch/index/_meta/fields.yml index ac4be51c415f..8182963959b3 100644 --- a/metricbeat/module/elasticsearch/index/_meta/fields.yml +++ b/metricbeat/module/elasticsearch/index/_meta/fields.yml @@ -53,6 +53,8 @@ type: long - name: store.size_in_bytes type: long + - name: store.total_data_set_size_in_bytes + type: long - name: docs.count type: long - name: docs.deleted @@ -113,6 +115,11 @@ type: long description: > Total size of the index in bytes. + - name: store.total_data_set_size_in_bytes + format: bytes + type: long + description: > + Total size of the index in bytes including backing data for partially mounted indices. - name: query_cache type: group fields: diff --git a/metricbeat/module/elasticsearch/index/data.go b/metricbeat/module/elasticsearch/index/data.go index bcd4aeb3b6e7..620cddf93a00 100644 --- a/metricbeat/module/elasticsearch/index/data.go +++ b/metricbeat/module/elasticsearch/index/data.go @@ -73,7 +73,8 @@ type primaries struct { FixedBitSetMemoryInBytes int `json:"fixed_bit_set_memory_in_bytes"` } `json:"segments"` Store struct { - SizeInBytes int `json:"size_in_bytes"` + SizeInBytes int `json:"size_in_bytes"` + TotalDataSetSizeInBytes int `json:"total_data_set_size_in_bytes"` } `json:"store"` Refresh struct { TotalTimeInMillis int `json:"total_time_in_millis"` @@ -132,7 +133,8 @@ type total struct { FixedBitSetMemoryInBytes int `json:"fixed_bit_set_memory_in_bytes"` } `json:"segments"` Store struct { - SizeInBytes int `json:"size_in_bytes"` + SizeInBytes int `json:"size_in_bytes"` + TotalDataSetSizeInBytes int `json:"total_data_set_size_in_bytes"` } `json:"store"` Refresh struct { TotalTimeInMillis int `json:"total_time_in_millis"` diff --git a/metricbeat/module/elasticsearch/index_summary/_meta/fields.yml b/metricbeat/module/elasticsearch/index_summary/_meta/fields.yml index b0a8352e57fe..8ca0dcb11d53 100644 --- a/metricbeat/module/elasticsearch/index_summary/_meta/fields.yml +++ b/metricbeat/module/elasticsearch/index_summary/_meta/fields.yml @@ -20,6 +20,11 @@ format: bytes description: > Total size of the index in bytes. + - name: store.total_data_set_size.bytes + type: long + format: bytes + description: > + Total size of the index in bytes including backing data for partially mounted indices. - name: segments.count type: long description: > @@ -78,6 +83,11 @@ format: bytes description: > Total size of the index in bytes. + - name: store.total_data_set_size.bytes + type: long + format: bytes + description: > + Total size of the index in bytes including backing data for partially mounted indices. - name: segments.count type: long description: > diff --git a/metricbeat/module/elasticsearch/index_summary/data.go b/metricbeat/module/elasticsearch/index_summary/data.go index ff6fedb53c9a..abd1d02d01df 100644 --- a/metricbeat/module/elasticsearch/index_summary/data.go +++ b/metricbeat/module/elasticsearch/index_summary/data.go @@ -46,6 +46,9 @@ var indexSummaryDict = s.Schema{ "size": s.Object{ "bytes": c.Int("size_in_bytes"), }, + "total_data_set_size": s.Object{ + "bytes": c.Int("total_data_set_size_in_bytes", s.Optional), + }, }), "segments": c.Dict("segments", s.Schema{ "count": c.Int("count"), diff --git a/metricbeat/module/elasticsearch/node_stats/_meta/fields.yml b/metricbeat/module/elasticsearch/node_stats/_meta/fields.yml index ab7d2d06338e..1c5b1a1de684 100644 --- a/metricbeat/module/elasticsearch/node_stats/_meta/fields.yml +++ b/metricbeat/module/elasticsearch/node_stats/_meta/fields.yml @@ -66,7 +66,11 @@ - name: store.size.bytes type: long description: > - Total size of the store in bytes. + Total size of all shards assigned to this node in bytes. + - name: store.total_data_set_size.bytes + type: long + description: > + Total size of shards in bytes assigned to this node including backing data for partially mounted indices. - name: fielddata type: group fields: diff --git a/metricbeat/module/elasticsearch/node_stats/data.go b/metricbeat/module/elasticsearch/node_stats/data.go index 1c0e00f9991a..c6d430875d0b 100644 --- a/metricbeat/module/elasticsearch/node_stats/data.go +++ b/metricbeat/module/elasticsearch/node_stats/data.go @@ -116,6 +116,9 @@ var ( "size": s.Object{ "bytes": c.Int("size_in_bytes"), }, + "total_data_set_size": s.Object{ + "bytes": c.Int("total_data_set_size_in_bytes", s.Optional), + }, }), "segments": c.Dict("segments", s.Schema{ "count": c.Int("count"), From 46a86b8252daef54c457f2f8aa999cf873d0e756 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 16 Jan 2024 17:00:00 -0500 Subject: [PATCH 053/129] chore: Update snapshot.yml (#37645) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Made with ❤️️ by updatecli Co-authored-by: apmmachine Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- testing/environments/snapshot.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index df64c5001258..350dd078eeef 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-07re5v7e-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-sq0d327c-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.13.0-07re5v7e-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.13.0-sq0d327c-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.13.0-07re5v7e-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.13.0-sq0d327c-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From c34f3dbde57c10e54dabc489ec7003840db8f6c4 Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Wed, 17 Jan 2024 14:37:03 +0100 Subject: [PATCH 054/129] Add missing generated files (make update) (#37658) This commit adds a missing file generated when running `make update` from the project's root directory. --- .../modules.d/enterprisesearch-xpack.yml.disabled | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 x-pack/metricbeat/modules.d/enterprisesearch-xpack.yml.disabled diff --git a/x-pack/metricbeat/modules.d/enterprisesearch-xpack.yml.disabled b/x-pack/metricbeat/modules.d/enterprisesearch-xpack.yml.disabled new file mode 100644 index 000000000000..0af7916573a0 --- /dev/null +++ b/x-pack/metricbeat/modules.d/enterprisesearch-xpack.yml.disabled @@ -0,0 +1,11 @@ +# Module: enterprisesearch +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-enterprisesearch.html + +- module: enterprisesearch + xpack.enabled: true + metricsets: ["health", "stats"] + enabled: true + period: 10s + hosts: ["http://localhost:3002"] + #username: "user" + #password: "secret" From c5c9fab06531b4aa02d2299320ba7600d097c443 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 17 Jan 2024 18:54:08 +0100 Subject: [PATCH 055/129] docs: Prepare Changelog for 8.12.0 (#37655) (#37661) * docs: Close changelog for 8.12.0 * Update CHANGELOG.asciidoc * Update CHANGELOG.asciidoc * Apply suggestions from code review Co-authored-by: David Kilfoyle <41695641+kilfoyle@users.noreply.github.com> --------- Co-authored-by: Pierre HILBERT Co-authored-by: David Kilfoyle <41695641+kilfoyle@users.noreply.github.com> (cherry picked from commit 9ecb4273e58f62902661b3979f1d936eac706240) Co-authored-by: Elastic Machine --- CHANGELOG.asciidoc | 86 +++++++++++++++++++++++++++++++++++ CHANGELOG.next.asciidoc | 56 ++--------------------- libbeat/docs/release.asciidoc | 1 + 3 files changed, 91 insertions(+), 52 deletions(-) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 0a9456d31ec7..3305b1989b69 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -3,6 +3,92 @@ :issue: https://github.com/elastic/beats/issues/ :pull: https://github.com/elastic/beats/pull/ +[[release-notes-8.12.0]] +=== Beats version 8.12.0 +https://github.com/elastic/beats/compare/v8.11.4\...v8.12.0[View commits] + +==== Breaking changes + +*Heartbeat* +- Decrease the ES default timeout to 10 for the load monitor state requests. + +*Osquerybeat* + +- Upgrade to osquery 5.10.2. {pull}37115[37115] + +==== Bugfixes + +*Filebeat* + +- Add validation to the `http_endpoint` config for empty URL. {pull}36816[36816] {issue}36772[36772] +- Fix merging of array fields (processors, paths, parsers) in configurations generated from hints and default config. {issue}36838[36838] {pull}36857[36857] + +==== Added + +*Affecting all Beats* + +- Allow `queue` configuration settings to be set under the output. {issue}35615[35615] {pull}36788[36788] +- Raise up logging level to warning when attempting to configure {beats} with unknown fields from autodiscovered events/environments. +- Elasticsearch output now supports `idle_connection_timeout`. {issue}35616[35615] {pull}36843[36843] +- Upgrade to Go 1.20.12. {pull}37350[37350] +- The Elasticsearch output can now configure performance presets with the `preset` configuration field. {pull}37259[37259] +- Upgrade `elastic-agent-system-metrics` to v0.9.1. See https://github.com/elastic/elastic-agent-system-metrics/releases/tag/v0.9.1. {pull}37353[37353] +- Upgrade to elastic-agent-libs v0.7.3 and golang.org/x/crypto v0.17.0. {pull}37544[37544] + +*Auditbeat* + +- Add `ignore_errors` option to audit module. {issue}15768[15768] {pull}36851[36851] +- Fix copy arguments for strict aligned architectures. {pull}36976[36976] + +*Filebeat* + +- Allow http_endpoint input to receive PUT and PATCH requests. {pull}36734[36734] +- Avoid unwanted publication of Azure entity records. {pull}36753[36753] +- Avoid unwanted publication of Okta entity records. {pull}36770[36770] +- Add support for Digest Authentication to CEL input. {issue}35514[35514] {pull}36932[36932] +- Use filestream input with `file_identity.fingerprint` as default for hints autodiscover. {issue}35984[35984] {pull}36950[36950] +- Add network processor in addition to interface based direction resolution. {pull}37023[37023] +- Make CEL input log current transaction ID when request tracing is turned on. {pull}37065[37065] +- Make Azure Blob Storage input GA and update docs accordingly. {pull}37128[37128] +- Add request trace logging to http_endpoint input. {issue}36951[36951] {pull}36957[36957] +- Make GCS input GA and update docs accordingly. {pull}37127[37127] +- Suppress and log max HTTP request retry errors in CEL input. {pull}37160[37160] +- Prevent CEL input from re-entering the eval loop when an evaluation failed. {pull}37161[37161] +- Update CEL extensions library to v1.7.0. {pull}37172[37172] + +*Auditbeat* + +- Upgrade go-libaudit to v2.4.0. {issue}36776[36776] {pull}36964[36964] +- Add a `/inputs/` route to the HTTP monitoring endpoint that exposes metrics for each dataset instance. {pull}36971[36971] + +*Heartbeat* +- Capture and log the individual connection metrics for all the lightweight monitors. + +*Metricbeat* + +- Add metrics grouping by dimensions and time to Azure app insights. {pull}36634[36634] +- Align on the algorithm used to transform Prometheus histograms into Elasticsearch histograms. {pull}36647[36647] +- Enhance GCP billing with detailed tables identification, additional fields, and optimized data handling. {pull}36902[36902] +- Add a `/inputs/` route to the HTTP monitoring endpoint that exposes metrics for each metricset instance. {pull}36971[36971] +- Add Linux IO metrics to system/process. {pull}37213[37213] +- Add new memory/cgroup metrics to Kibana module. {pull}37232[37232] + +*Packetbeat* + +- Add metrics for TCP flags. {issue}36992[36992] {pull}36975[36975] + +*Winlogbeat* + +- Make ingest pipeline routing robust to letter case of channel names for forwarded events. {issue}36670[36670] {pull}36899[36899] +- Document minimum permissions required for local user account. {issue}15773[15773] {pull}37176[37176] + +==== Deprecated + +*Filebeat* + +- Deprecate rsa2elk Filebeat modules. {issue}36125[36125] {pull}36887[36887] + + [[release-notes-8.11.4]] === Beats version 8.11.4 https://github.com/elastic/beats/compare/v8.11.3\...v8.11.4[View commits] diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index b16313aaadb4..be590958c3d0 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -17,14 +17,12 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Heartbeat* -- Decreases the ES default timeout to 10 for the load monitor state requests *Metricbeat* *Osquerybeat* -- Upgrade to osquery 5.10.2. {pull}37115[37115] *Packetbeat* @@ -73,18 +71,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Fix panic when sqs input metrics getter is invoked {pull}36101[36101] {issue}36077[36077] - Fix handling of Juniper SRX structured data when there is no leading junos element. {issue}36270[36270] {pull}36308[36308] - Fix Filebeat Cisco module with missing escape character {issue}36325[36325] {pull}36326[36326] -- Fix panic when redact option is not provided to CEL input. {issue}36387[36387] {pull}36388[36388] -- Remove 'onFilteredOut' and 'onDroppedOnPublish' callback logs {issue}36299[36299] {pull}36399[36399] - Added a fix for Crowdstrike pipeline handling process arrays {pull}36496[36496] -- Ensure winlog input retains metric collection when handling recoverable errors. {issue}36479[36479] {pull}36483[36483] -- Revert error introduced in {pull}35734[35734] when symlinks can't be resolved in filestream. {pull}36557[36557] -- Fix ignoring external input configuration in `take_over: true` mode {issue}36378[36378] {pull}36395[36395] -- Add validation to http_endpoint config for empty URL {pull}36816[36816] {issue}36772[36772] -- Fix merging of array fields(processors, paths, parsers) in configurations generated from hints and default config. {issue}36838[36838] {pull}36857[36857] -- Fix handling of response errors in HTTPJSON and CEL request trace logging. {pull}36956[36956] -- Do not error when Okta API returns no data. {pull}37092[37092] -- Fix request body close behaviour in HTTP_Endpoint when handling GZIP compressed content. {pull}37091[37091] -- Make CEL input now global evaluate to a time in UTC. {pull}37159[37159] *Heartbeat* @@ -104,6 +91,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Fix EC2 host.cpu.usage {pull}35717[35717] - Add option in SQL module to execute queries for all dbs. {pull}35688[35688] - Add remaining dimensions for azure storage account to make them available for tsdb enablement. {pull}36331[36331] +- Add log error when statsd server fails to start {pull}36477[36477] - Fix Azure Resource Metrics missing metrics (min and max aggregations) after upgrade to 8.11.3 {issue}37642[37642] {pull}37643[37643] *Osquerybeat* @@ -124,12 +112,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Affecting all Beats* - Added append Processor which will append concrete values or values from a field to target. {issue}29934[29934] {pull}33364[33364] -- When running under Elastic-Agent the status is now reported per Unit instead of the whole Beat {issue}35874[35874] {pull}36183[36183] -- Add warning message to SysV init scripts for RPM-based systems that lack `/etc/rc.d/init.d/functions`. {issue}35708[35708] {pull}36188[36188] -- Mark `translate_sid` processor is GA. {issue}36279[36279] {pull}36280[36280] - dns processor: Add support for forward lookups (`A`, `AAAA`, and `TXT`). {issue}11416[11416] {pull}36394[36394] -- Mark `syslog` processor as GA, improve docs about how processor handles syslog messages. {issue}36416[36416] {pull}36417[36417] -- Add support for AWS external IDs. {issue}36321[36321] {pull}36322[36322] - [Enhanncement for host.ip and host.mac] Disabling netinfo.enabled option of add-host-metadata processor {pull}36506[36506] Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will disable the netinfo.enabled option of add_host_metadata processor - allow `queue` configuration settings to be set under the output. {issue}35615[35615] {pull}36788[36788] @@ -145,8 +128,6 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d *Auditbeat* -- Add `ignore_errors` option to audit module. {issue}15768[15768] {pull}36851[36851] -- Fix copy arguments for strict aligned architectures. {pull}36976[36976] *Filebeat* @@ -163,33 +144,9 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d - Add nginx ingress_controller parsing if one of upstreams fails to return response {pull}34787[34787] - Add oracle authentication messages parsing {pull}35127[35127] - Add `clean_session` configuration setting for MQTT input. {pull}35806[16204] -- Add fingerprint mode for the filestream scanner and new file identity based on it {issue}34419[34419] {pull}35734[35734] -- Add file system metadata to events ingested via filestream {issue}35801[35801] {pull}36065[36065] -- Add support for localstack based input integration testing {pull}35727[35727] -- Allow parsing bytes in and bytes out as long integer in CEF processor. {issue}36100[36100] {pull}36108[36108] -- Add support for registered owners and users to AzureAD entity analytics provider. {pull}36092[36092] -- Add support for endpoint resolver in AWS config {pull}36208[36208] -- Added support for Okta OAuth2 provider in the httpjson input. {pull}36273[36273] -- Add support of the interval parameter in Salesforce setupaudittrail-rest fileset. {issue}35917[35917] {pull}35938[35938] -- Add device handling to Okta input package for entity analytics. {pull}36049[36049] -- Add setup option `--force-enable-module-filesets`, that will act as if all filesets have been enabled in a module during setup. {issue}30916[30916] {pull}36286[36286] -- [Azure] Add input metrics to the azure-eventhub input. {pull}35739[35739] -- Reduce HTTPJSON metrics allocations. {pull}36282[36282] - Add support for a simplified input configuraton when running under Elastic-Agent {pull}36390[36390] -- Make HTTPJSON response body decoding errors more informative. {pull}36481[36481] -- Allow fine-grained control of entity analytics API requests for Okta provider. {issue}36440[36440] {pull}36492[36492] -- Add support for expanding `journald.process.capabilities` into the human-readable effective capabilities in the ECS `process.thread.capabilities.effective` field. {issue}36454[36454] {pull}36470[36470] -- Allow fine-grained control of entity analytics API requests for AzureAD provider. {issue}36440[36440] {pull}36441[36441] -- For request tracer logging in CEL and httpjson the request and response body are no longer included in `event.original`. The body is still present in `http.{request,response}.body.content`. {pull}36531[36531] - Added support for Okta OAuth2 provider in the CEL input. {issue}36336[36336] {pull}36521[36521] -- Improve error logging in HTTPJSON input. {pull}36529[36529] -- Disable warning message about ingest pipeline loading when running under Elastic Agent. {pull}36659[36659] -- Add input metrics to http_endpoint input. {issue}36402[36402] {pull}36427[36427] -- Remove Event Normalization from GCP PubSub Input. {pull}36716[36716] -- Update mito CEL extension library to v1.6.0. {pull}36651[36651] - Added support for new features & removed partial save mechanism in the Azure Blob Storage input. {issue}35126[35126] {pull}36690[36690] -- Improve template evaluation logging for HTTPJSON input. {pull}36668[36668] -- Add CEL partial value debug function. {pull}36652[36652] - Added support for new features and removed partial save mechanism in the GCS input. {issue}35847[35847] {pull}36713[36713] - Re-use buffers to optimise memory allocation in fingerprint mode of filestream {pull}36736[36736] - Allow http_endpoint input to receive PUT and PATCH requests. {pull}36734[36734] @@ -211,14 +168,11 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d *Auditbeat* -- Upgrade go-libaudit to v2.4.0. {issue}36776[36776] {pull}36964[36964] -- Add a `/inputs/` route to the HTTP monitoring endpoint that exposes metrics for each dataset instance. {pull}36971[36971] *Libbeat* *Heartbeat* - Added status to monitor run log report. -- Capture and log the individual connection metrics for all the lightweight monitors *Metricbeat* @@ -240,16 +194,12 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d *Packetbeat* -- Add metrics for TCP flags. {issue}36992[36992] {pull}36975[36975] *Packetbeat* *Winlogbeat* -- Make ingest pipeline routing robust to letter case of channel names for forwarded events. {issue}36670[36670] {pull}36899[36899] -- Document minimum permissions required for local user account. {issue}15773[15773] {pull}37176[37176] -- Bump Windows Npcap version to v1.78. {issue}37300[37300] {pull}37370[37370] *Functionbeat* @@ -269,7 +219,6 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d *Filebeat* -- Deprecate rsa2elk Filebeat modules. {issue}36125[36125] {pull}36887[36887] *Heartbeat* @@ -329,6 +278,9 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d + + + diff --git a/libbeat/docs/release.asciidoc b/libbeat/docs/release.asciidoc index 8034a904ff31..47a6f1eaf23f 100644 --- a/libbeat/docs/release.asciidoc +++ b/libbeat/docs/release.asciidoc @@ -8,6 +8,7 @@ This section summarizes the changes in each release. Also read <> for more detail about changes that affect upgrade. +* <> * <> * <> * <> From eb53e2f7992b2ce648aaae92b7138523536c343f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 Jan 2024 18:56:32 +0000 Subject: [PATCH 056/129] build(deps): bump jinja2 from 2.11.3 to 3.1.3 in /libbeat/tests/system (#37652) Bumps [jinja2](https://github.com/pallets/jinja) from 2.11.3 to 3.1.3. - [Release notes](https://github.com/pallets/jinja/releases) - [Changelog](https://github.com/pallets/jinja/blob/main/CHANGES.rst) - [Commits](https://github.com/pallets/jinja/compare/2.11.3...3.1.3) --- updated-dependencies: - dependency-name: jinja2 dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- libbeat/tests/system/requirements.txt | 31 +++++++++++++++--- libbeat/tests/system/requirements_aix.txt | 39 ++++++++++++++++++----- 2 files changed, 57 insertions(+), 13 deletions(-) diff --git a/libbeat/tests/system/requirements.txt b/libbeat/tests/system/requirements.txt index 11442585fdc9..a9df181606d3 100644 --- a/libbeat/tests/system/requirements.txt +++ b/libbeat/tests/system/requirements.txt @@ -1,10 +1,18 @@ +async-timeout==4.0.3 attrs==19.3.0 autopep8==1.5.4 +backoff==2.2.1 backports.ssl-match-hostname==3.5.0.1 +bcrypt==4.1.2 cached-property==1.4.2 certifi==2023.7.22 +cffi==1.16.0 chardet==3.0.4 +charset-normalizer==3.3.2 +cryptography==41.0.7 deepdiff==4.2.0 +Deprecated==1.2.14 +distro==1.9.0 docker==6.0.1 docker-compose==1.29.2 docker-pycreds==0.4.0 @@ -12,42 +20,55 @@ dockerpty==0.4.1 docopt==0.6.2 elasticsearch==7.8.1 enum34==1.1.6 +exceptiongroup==1.2.0 +googleapis-common-protos==1.56.4 +grpcio==1.60.0 idna==2.6 importlib-metadata==1.7.0 iniconfig==1.0.1 ipaddress==1.0.19 -Jinja2==2.11.3 +Jinja2==3.1.3 jsondiff==1.1.2 jsonschema==3.2.0 kafka-python==1.4.3 -MarkupSafe==1.1.1 +MarkupSafe==2.1.3 more-itertools==8.4.0 opentelemetry-api==1.13.0 opentelemetry-exporter-otlp==1.13.0 +opentelemetry-exporter-otlp-proto-grpc==1.13.0 +opentelemetry-exporter-otlp-proto-http==1.13.0 +opentelemetry-proto==1.13.0 opentelemetry-sdk==1.13.0 +opentelemetry-semantic-conventions==0.34b0 ordered-set==3.1.1 packaging==20.4 parameterized==0.7.0 +paramiko==3.4.0 pluggy==0.13.1 +protobuf==3.19.4 py==1.11.0 pycodestyle==2.6.0 +pycparser==2.21 +PyNaCl==1.5.0 pyparsing==2.4.7 pyrsistent==0.16.0 pytest==7.3.2 pytest-rerunfailures==9.1.1 pytest-timeout==1.4.2 +python-dotenv==0.21.1 PyYAML==5.3.1 redis==4.4.4 requests==2.31.0 semver==2.8.1 -setuptools==65.5.1 six==1.15.0 stomp.py==4.1.22 termcolor==1.1.0 texttable==0.9.1 toml==0.10.1 +tomli==2.0.1 +typing_extensions==4.9.0 urllib3==1.26.18 wcwidth==0.2.5 websocket-client==0.47.0 -zipp>=1.2.0,<=3.1.0 -protobuf==3.19.4 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 +wrapt==1.16.0 +zipp==3.1.0 diff --git a/libbeat/tests/system/requirements_aix.txt b/libbeat/tests/system/requirements_aix.txt index a0b2b0025887..a9df181606d3 100644 --- a/libbeat/tests/system/requirements_aix.txt +++ b/libbeat/tests/system/requirements_aix.txt @@ -1,51 +1,74 @@ +async-timeout==4.0.3 attrs==19.3.0 autopep8==1.5.4 +backoff==2.2.1 backports.ssl-match-hostname==3.5.0.1 +bcrypt==4.1.2 cached-property==1.4.2 certifi==2023.7.22 +cffi==1.16.0 chardet==3.0.4 +charset-normalizer==3.3.2 +cryptography==41.0.7 deepdiff==4.2.0 -docker==4.1.0 +Deprecated==1.2.14 +distro==1.9.0 +docker==6.0.1 +docker-compose==1.29.2 docker-pycreds==0.4.0 dockerpty==0.4.1 docopt==0.6.2 elasticsearch==7.8.1 enum34==1.1.6 +exceptiongroup==1.2.0 +googleapis-common-protos==1.56.4 +grpcio==1.60.0 idna==2.6 importlib-metadata==1.7.0 iniconfig==1.0.1 ipaddress==1.0.19 -Jinja2==2.11.3 +Jinja2==3.1.3 jsondiff==1.1.2 jsonschema==3.2.0 kafka-python==1.4.3 -MarkupSafe==1.1.1 +MarkupSafe==2.1.3 more-itertools==8.4.0 -opentelemetry-api==1.11.0 -opentelemetry-exporter-otlp==1.11.0 -opentelemetry-sdk==1.11.0 +opentelemetry-api==1.13.0 +opentelemetry-exporter-otlp==1.13.0 +opentelemetry-exporter-otlp-proto-grpc==1.13.0 +opentelemetry-exporter-otlp-proto-http==1.13.0 +opentelemetry-proto==1.13.0 +opentelemetry-sdk==1.13.0 +opentelemetry-semantic-conventions==0.34b0 ordered-set==3.1.1 packaging==20.4 parameterized==0.7.0 +paramiko==3.4.0 pluggy==0.13.1 +protobuf==3.19.4 py==1.11.0 pycodestyle==2.6.0 +pycparser==2.21 +PyNaCl==1.5.0 pyparsing==2.4.7 pyrsistent==0.16.0 pytest==7.3.2 pytest-rerunfailures==9.1.1 pytest-timeout==1.4.2 +python-dotenv==0.21.1 PyYAML==5.3.1 redis==4.4.4 requests==2.31.0 semver==2.8.1 -setuptools==65.5.1 six==1.15.0 stomp.py==4.1.22 termcolor==1.1.0 texttable==0.9.1 toml==0.10.1 +tomli==2.0.1 +typing_extensions==4.9.0 urllib3==1.26.18 wcwidth==0.2.5 websocket-client==0.47.0 -zipp>=1.2.0,<=3.1.0 +wrapt==1.16.0 +zipp==3.1.0 From c9adcff7214eaede57d2632f95abae3ec17579a5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 Jan 2024 14:51:49 -0500 Subject: [PATCH 057/129] build(deps): bump go.elastic.co/apm/module/apmhttp/v2 from 2.4.7 to 2.4.8 (#37656) * build(deps): bump go.elastic.co/apm/module/apmhttp/v2 Bumps [go.elastic.co/apm/module/apmhttp/v2](https://github.com/elastic/apm-agent-go) from 2.4.7 to 2.4.8. - [Release notes](https://github.com/elastic/apm-agent-go/releases) - [Changelog](https://github.com/elastic/apm-agent-go/blob/main/CHANGELOG.asciidoc) - [Commits](https://github.com/elastic/apm-agent-go/compare/v2.4.7...v2.4.8) --- updated-dependencies: - dependency-name: go.elastic.co/apm/module/apmhttp/v2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update NOTICE.txt --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- NOTICE.txt | 8 ++++---- go.mod | 4 ++-- go.sum | 8 ++++---- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index 207dc8035f33..ed44e6d88c6e 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -23479,11 +23479,11 @@ Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/module/apmelasti -------------------------------------------------------------------------------- Dependency : go.elastic.co/apm/module/apmhttp/v2 -Version: v2.4.7 +Version: v2.4.8 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/module/apmhttp/v2@v2.4.7/LICENSE: +Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/module/apmhttp/v2@v2.4.8/LICENSE: Apache License Version 2.0, January 2004 @@ -23690,11 +23690,11 @@ Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/module/apmhttp/v -------------------------------------------------------------------------------- Dependency : go.elastic.co/apm/v2 -Version: v2.4.7 +Version: v2.4.8 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/v2@v2.4.7/LICENSE: +Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/v2@v2.4.8/LICENSE: Apache License Version 2.0, January 2004 diff --git a/go.mod b/go.mod index 3f326b006a76..635b9605ef74 100644 --- a/go.mod +++ b/go.mod @@ -220,8 +220,8 @@ require ( github.com/sergi/go-diff v1.3.1 github.com/shirou/gopsutil/v3 v3.22.10 go.elastic.co/apm/module/apmelasticsearch/v2 v2.4.7 - go.elastic.co/apm/module/apmhttp/v2 v2.4.7 - go.elastic.co/apm/v2 v2.4.7 + go.elastic.co/apm/module/apmhttp/v2 v2.4.8 + go.elastic.co/apm/v2 v2.4.8 go.mongodb.org/mongo-driver v1.5.1 golang.org/x/tools/go/vcs v0.1.0-deprecated google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb diff --git a/go.sum b/go.sum index 3c6faaf866ad..7bbbdb029366 100644 --- a/go.sum +++ b/go.sum @@ -1922,10 +1922,10 @@ github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= go.elastic.co/apm/module/apmelasticsearch/v2 v2.4.7 h1:Gfp+wxPotE1s5X5ufn3W28zYaSunKEWE/x9Xo+5WzoI= go.elastic.co/apm/module/apmelasticsearch/v2 v2.4.7/go.mod h1:taxw6rHuZtTF0p0DAv1xAg0fkvGprVIJu92JvbcgifU= -go.elastic.co/apm/module/apmhttp/v2 v2.4.7 h1:IL+DRK8ODO791ai/l1g/8dk6E3VPomiV0jbRIxBWbqM= -go.elastic.co/apm/module/apmhttp/v2 v2.4.7/go.mod h1:Itj4PGNVO33Tpp/9UPf4A6pCezTmADRRi/ytLAGms24= -go.elastic.co/apm/v2 v2.4.7 h1:m5B2m59KgbiupuzFUkKqEvwHABIZxl2Ob0tCgc0XG9w= -go.elastic.co/apm/v2 v2.4.7/go.mod h1:+CiBUdrrAGnGCL9TNx7tQz3BrfYV23L8Ljvotoc87so= +go.elastic.co/apm/module/apmhttp/v2 v2.4.8 h1:C1piLq4wcFHFB7jpFW1WPEyEl6zbRpf9SAY8S8tX7Qk= +go.elastic.co/apm/module/apmhttp/v2 v2.4.8/go.mod h1:FzO5ptAs5oKB3xE1/hpxMA0a7mLIycp2RZdBtp4+uTA= +go.elastic.co/apm/v2 v2.4.8 h1:3RuqxDjhgumT1BYxJGnVPdGY12lCzF+KGyvtSTynCYo= +go.elastic.co/apm/v2 v2.4.8/go.mod h1:+CiBUdrrAGnGCL9TNx7tQz3BrfYV23L8Ljvotoc87so= go.elastic.co/ecszap v1.0.2 h1:iW5OGx8IiokiUzx/shD4AJCPFMC9uUtr7ycaiEIU++I= go.elastic.co/ecszap v1.0.2/go.mod h1:dJkSlK3BTiwG/qXhCwe50Mz/jwu854vSip8sIeQhNZg= go.elastic.co/fastjson v1.1.0 h1:3MrGBWWVIxe/xvsbpghtkFoPciPhOCmjsR/HfwEeQR4= From 6b9e7e4ca1ee6aca6acbe9dc203ec6299085305a Mon Sep 17 00:00:00 2001 From: Craig MacKenzie Date: Wed, 17 Jan 2024 15:43:04 -0500 Subject: [PATCH 058/129] Move Go 1.21 update to breaking changes. (#37663) This update obsoletes support for Windows 8.1. --- CHANGELOG.next.asciidoc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index be590958c3d0..862231e015e3 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -10,6 +10,8 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Affecting all Beats* +- Upgrade to Go 1.21.6. Removes support for Windows 8.1. See https://tip.golang.org/doc/go1.21#windows. {pull}37615[37615] + *Auditbeat* @@ -121,7 +123,6 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - elasticsearch output now supports `idle_connection_timeout`. {issue}35616[35615] {pull}36843[36843] - Upgrade golang/x/net to v0.17.0. Updates the publicsuffix table used by the registered_domain processor. {pull}36969[36969] Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will disable the netinfo.enabled option of add_host_metadata processor -- Upgrade to Go 1.21.6. {pull}37615[37615] - The Elasticsearch output can now configure performance presets with the `preset` configuration field. {pull}37259[37259] - Upgrade to elastic-agent-libs v0.7.3 and golang.org/x/crypto v0.17.0. {pull}37544[37544] - Make more selective the Pod autodiscovery upon node and namespace update events. {issue}37338[37338] {pull}37431[37431] From 13ec0b30f5502720ff5079853e6f437478348ec7 Mon Sep 17 00:00:00 2001 From: Dan Kortschak <90160302+efd6@users.noreply.github.com> Date: Thu, 18 Jan 2024 16:27:59 +1030 Subject: [PATCH 059/129] x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/graph: add support for user-defined query selection (#37653) The $select optional queries were previously hardcoded. Make these accessible via a configuration group at the root level. --- CHANGELOG.next.asciidoc | 1 + .../inputs/input-entity-analytics.asciidoc | 21 ++++++ .../provider/azuread/fetcher/graph/graph.go | 23 +++++-- .../azuread/fetcher/graph/graph_test.go | 65 ++++++++++++------- 4 files changed, 83 insertions(+), 27 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 862231e015e3..c6b4b4e6e4f3 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -166,6 +166,7 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d - Prevent CEL input from re-entering the eval loop when an evaluation failed. {pull}37161[37161] - Update CEL extensions library to v1.7.0. {pull}37172[37172] - Add support for complete URL replacement in HTTPJSON chain steps. {pull}37486[37486] +- Add support for user-defined query selection in EntraID entity analytics provider. {pull}37653[37653] *Auditbeat* diff --git a/x-pack/filebeat/docs/inputs/input-entity-analytics.asciidoc b/x-pack/filebeat/docs/inputs/input-entity-analytics.asciidoc index bb86de8ebcc5..86143f727bc5 100644 --- a/x-pack/filebeat/docs/inputs/input-entity-analytics.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-entity-analytics.asciidoc @@ -314,6 +314,27 @@ so. Altering this value will also require a change to `login_scopes`. Override the default authentication scopes. Only change if directed to do so. +[float] +===== `select.users` + +Override the default https://learn.microsoft.com/en-us/graph/api/user-get?view=graph-rest-1.0&tabs=http#optional-query-parameters[user query selections]. +This is a list of optional query parameters. The default is `["accountEnabled", "userPrincipalName", +"mail", "displayName", "givenName", "surname", "jobTitle", "officeLocation", "mobilePhone", +"businessPhones"]`. + +[float] +===== `select.groups` + +Override the default https://learn.microsoft.com/en-us/graph/api/user-get?view=graph-rest-1.0&tabs=http#optional-query-parameters[group query selections]. +This is a list of optional query parameters. The default is `["displayName", "members"]`. + +[float] +===== `select.devices` + +Override the default https://learn.microsoft.com/en-us/graph/api/user-get?view=graph-rest-1.0&tabs=http#optional-query-parameters[device query selections]. +This is a list of optional query parameters. The default is `["accountEnabled", "deviceId", +"displayName", "operatingSystem", "operatingSystemVersion", "physicalIds", "extensionAttributes", +"alternativeSecurityIds"]`. [id="provider-okta"] ==== Okta User Identities (`okta`) diff --git a/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/graph/graph.go b/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/graph/graph.go index 44754e10fa60..6cabdf887e8f 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/graph/graph.go +++ b/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/graph/graph.go @@ -15,6 +15,7 @@ import ( "io" "net/http" "net/url" + "strings" "github.com/google/uuid" @@ -98,11 +99,18 @@ type removed struct { // conf contains parameters needed to configure the fetcher. type graphConf struct { - APIEndpoint string `config:"api_endpoint"` + APIEndpoint string `config:"api_endpoint"` + Select selection `config:"select"` Transport httpcommon.HTTPTransportSettings `config:",inline"` } +type selection struct { + UserQuery []string `config:"users"` + GroupQuery []string `config:"groups"` + DeviceQuery []string `config:"devices"` +} + // graph implements the fetcher.Fetcher interface. type graph struct { conf graphConf @@ -345,21 +353,21 @@ func New(cfg *config.C, logger *logp.Logger, auth authenticator.Authenticator) ( if err != nil { return nil, fmt.Errorf("invalid groups URL endpoint: %w", err) } - groupsURL.RawQuery = url.QueryEscape(defaultGroupsQuery) + groupsURL.RawQuery = url.QueryEscape(formatQuery(c.Select.GroupQuery, defaultGroupsQuery)) f.groupsURL = groupsURL.String() usersURL, err := url.Parse(f.conf.APIEndpoint + "/users/delta") if err != nil { return nil, fmt.Errorf("invalid users URL endpoint: %w", err) } - usersURL.RawQuery = url.QueryEscape(defaultUsersQuery) + usersURL.RawQuery = url.QueryEscape(formatQuery(c.Select.UserQuery, defaultUsersQuery)) f.usersURL = usersURL.String() devicesURL, err := url.Parse(f.conf.APIEndpoint + "/devices/delta") if err != nil { return nil, fmt.Errorf("invalid devices URL endpoint: %w", err) } - devicesURL.RawQuery = url.QueryEscape(defaultDevicesQuery) + devicesURL.RawQuery = url.QueryEscape(formatQuery(c.Select.DeviceQuery, defaultDevicesQuery)) f.devicesURL = devicesURL.String() // The API takes a departure from the query approach here, so we @@ -374,6 +382,13 @@ func New(cfg *config.C, logger *logp.Logger, auth authenticator.Authenticator) ( return &f, nil } +func formatQuery(query []string, dflt string) string { + if len(query) == 0 { + return dflt + } + return "$select=" + strings.Join(query, ",") +} + // newUserFromAPI translates an API-representation of a user to a fetcher.User. func newUserFromAPI(u userAPI) (*fetcher.User, error) { var newUser fetcher.User diff --git a/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/graph/graph_test.go b/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/graph/graph_test.go index e54a05a2bd5b..f439cc916797 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/graph/graph_test.go +++ b/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/graph/graph_test.go @@ -12,6 +12,7 @@ import ( "net/http/httptest" "path" "reflect" + "strings" "testing" "time" @@ -457,28 +458,46 @@ func TestGraph_Devices(t *testing.T) { }, } - rawConf := graphConf{ - APIEndpoint: "http://" + testSrv.addr, - } - c, err := config.NewConfigFrom(&rawConf) - require.NoError(t, err) - auth := mock.New(mock.DefaultTokenValue) - - f, err := New(c, logp.L(), auth) - require.NoError(t, err) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - gotDevices, gotDeltaLink, gotErr := f.Devices(ctx, "") - - require.NoError(t, gotErr) - // Using go-cmp because testify is too weak for this comparison. - // reflect.DeepEqual works, but won't show a reasonable diff. - exporter := cmp.Exporter(func(t reflect.Type) bool { - return t == reflect.TypeOf(collections.UUIDSet{}) - }) - if !cmp.Equal(wantDevices, gotDevices, exporter) { - t.Errorf("unexpected result:\n--- got\n--- want\n%s", cmp.Diff(wantDevices, gotDevices, exporter)) + for _, test := range []struct { + name string + selection selection + }{ + {name: "default_selection"}, + { + name: "user_selection", + selection: selection{ + UserQuery: strings.Split(strings.TrimPrefix(defaultUsersQuery, "$select="), ","), + GroupQuery: strings.Split(strings.TrimPrefix(defaultGroupsQuery, "$select="), ","), + DeviceQuery: strings.Split(strings.TrimPrefix(defaultDevicesQuery, "$select="), ","), + }, + }, + } { + t.Run(test.name, func(t *testing.T) { + rawConf := graphConf{ + APIEndpoint: "http://" + testSrv.addr, + Select: test.selection, + } + c, err := config.NewConfigFrom(&rawConf) + require.NoError(t, err) + auth := mock.New(mock.DefaultTokenValue) + + f, err := New(c, logp.L(), auth) + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + gotDevices, gotDeltaLink, gotErr := f.Devices(ctx, "") + + require.NoError(t, gotErr) + // Using go-cmp because testify is too weak for this comparison. + // reflect.DeepEqual works, but won't show a reasonable diff. + exporter := cmp.Exporter(func(t reflect.Type) bool { + return t == reflect.TypeOf(collections.UUIDSet{}) + }) + if !cmp.Equal(wantDevices, gotDevices, exporter) { + t.Errorf("unexpected result:\n--- got\n--- want\n%s", cmp.Diff(wantDevices, gotDevices, exporter)) + } + require.Equal(t, wantDeltaLink, gotDeltaLink) + }) } - require.Equal(t, wantDeltaLink, gotDeltaLink) } From afb25f51d7c3d52c800be33bc96071db1d951b53 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 18 Jan 2024 12:38:50 -0500 Subject: [PATCH 060/129] chore: Update snapshot.yml (#37672) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Made with ❤️️ by updatecli Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 350dd078eeef..f13e4d18bb36 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-sq0d327c-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-knq1zsti-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.13.0-sq0d327c-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.13.0-knq1zsti-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.13.0-sq0d327c-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.13.0-knq1zsti-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From b9e706cf5d6fc62cb8d4ae4ff4ad36e9f572fbff Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 18 Jan 2024 13:29:25 -0500 Subject: [PATCH 061/129] build(deps): bump go.elastic.co/apm/module/apmelasticsearch/v2 from 2.4.7 to 2.4.8 (#37664) * build(deps): bump go.elastic.co/apm/module/apmelasticsearch/v2 Bumps [go.elastic.co/apm/module/apmelasticsearch/v2](https://github.com/elastic/apm-agent-go) from 2.4.7 to 2.4.8. - [Release notes](https://github.com/elastic/apm-agent-go/releases) - [Changelog](https://github.com/elastic/apm-agent-go/blob/main/CHANGELOG.asciidoc) - [Commits](https://github.com/elastic/apm-agent-go/compare/v2.4.7...v2.4.8) --- updated-dependencies: - dependency-name: go.elastic.co/apm/module/apmelasticsearch/v2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update NOTICE.txt --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- NOTICE.txt | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index ed44e6d88c6e..c7e77209c2c2 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -23268,11 +23268,11 @@ Contents of probable licence file $GOMODCACHE/github.com/xdg/scram@v1.0.3/LICENS -------------------------------------------------------------------------------- Dependency : go.elastic.co/apm/module/apmelasticsearch/v2 -Version: v2.4.7 +Version: v2.4.8 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/module/apmelasticsearch/v2@v2.4.7/LICENSE: +Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/module/apmelasticsearch/v2@v2.4.8/LICENSE: Apache License Version 2.0, January 2004 diff --git a/go.mod b/go.mod index 635b9605ef74..af936d4063e7 100644 --- a/go.mod +++ b/go.mod @@ -219,7 +219,7 @@ require ( github.com/pkg/xattr v0.4.9 github.com/sergi/go-diff v1.3.1 github.com/shirou/gopsutil/v3 v3.22.10 - go.elastic.co/apm/module/apmelasticsearch/v2 v2.4.7 + go.elastic.co/apm/module/apmelasticsearch/v2 v2.4.8 go.elastic.co/apm/module/apmhttp/v2 v2.4.8 go.elastic.co/apm/v2 v2.4.8 go.mongodb.org/mongo-driver v1.5.1 diff --git a/go.sum b/go.sum index 7bbbdb029366..e238a72691a5 100644 --- a/go.sum +++ b/go.sum @@ -1920,8 +1920,8 @@ github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= -go.elastic.co/apm/module/apmelasticsearch/v2 v2.4.7 h1:Gfp+wxPotE1s5X5ufn3W28zYaSunKEWE/x9Xo+5WzoI= -go.elastic.co/apm/module/apmelasticsearch/v2 v2.4.7/go.mod h1:taxw6rHuZtTF0p0DAv1xAg0fkvGprVIJu92JvbcgifU= +go.elastic.co/apm/module/apmelasticsearch/v2 v2.4.8 h1:4j3wI1e+WV6u+9ZR7lorkJI2rnJfjOWtkMeZG08PbRI= +go.elastic.co/apm/module/apmelasticsearch/v2 v2.4.8/go.mod h1:C9ajbSjZ3akTrFOjBr+pMq8bPVOH9vhIG+knZAuPW3s= go.elastic.co/apm/module/apmhttp/v2 v2.4.8 h1:C1piLq4wcFHFB7jpFW1WPEyEl6zbRpf9SAY8S8tX7Qk= go.elastic.co/apm/module/apmhttp/v2 v2.4.8/go.mod h1:FzO5ptAs5oKB3xE1/hpxMA0a7mLIycp2RZdBtp4+uTA= go.elastic.co/apm/v2 v2.4.8 h1:3RuqxDjhgumT1BYxJGnVPdGY12lCzF+KGyvtSTynCYo= From 74945355f9435e61cec4715057d6140dc4a28ab7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 18 Jan 2024 14:14:30 -0500 Subject: [PATCH 062/129] build(deps): bump protobuf in /libbeat/tests/system (#37662) Bumps [protobuf](https://github.com/protocolbuffers/protobuf) from 3.19.4 to 3.19.5. - [Release notes](https://github.com/protocolbuffers/protobuf/releases) - [Changelog](https://github.com/protocolbuffers/protobuf/blob/main/protobuf_release.bzl) - [Commits](https://github.com/protocolbuffers/protobuf/compare/v3.19.4...v3.19.5) --- updated-dependencies: - dependency-name: protobuf dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Craig MacKenzie --- libbeat/tests/system/requirements.txt | 2 +- libbeat/tests/system/requirements_aix.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libbeat/tests/system/requirements.txt b/libbeat/tests/system/requirements.txt index a9df181606d3..8bdb021e8ec5 100644 --- a/libbeat/tests/system/requirements.txt +++ b/libbeat/tests/system/requirements.txt @@ -45,7 +45,7 @@ packaging==20.4 parameterized==0.7.0 paramiko==3.4.0 pluggy==0.13.1 -protobuf==3.19.4 +protobuf==3.19.5 py==1.11.0 pycodestyle==2.6.0 pycparser==2.21 diff --git a/libbeat/tests/system/requirements_aix.txt b/libbeat/tests/system/requirements_aix.txt index a9df181606d3..8bdb021e8ec5 100644 --- a/libbeat/tests/system/requirements_aix.txt +++ b/libbeat/tests/system/requirements_aix.txt @@ -45,7 +45,7 @@ packaging==20.4 parameterized==0.7.0 paramiko==3.4.0 pluggy==0.13.1 -protobuf==3.19.4 +protobuf==3.19.5 py==1.11.0 pycodestyle==2.6.0 pycparser==2.21 From ca036402c80cf71c86fbfa9e72a6b3dc42c594bf Mon Sep 17 00:00:00 2001 From: Tetiana Kravchenko Date: Mon, 22 Jan 2024 11:06:19 +0100 Subject: [PATCH 063/129] Add missing kubernetes fields; update istio doc to use kubernetes.pod.ip instead of host (#37578) * add missing kubernetes fields; update istio doc to use kubernetes.pod.ip instead of host Signed-off-by: Tetiana Kravchenko * extend documentation with full list of fields; fix add_resource_metadata.deployment and add_resource_metadata.cronjob as the default value is false Signed-off-by: Tetiana Kravchenko * test all variables and adjust documentation accordingly Signed-off-by: Tetiana Kravchenko * Update libbeat/docs/shared-autodiscover.asciidoc Co-authored-by: Andrew Gizas * Update libbeat/docs/shared-autodiscover.asciidoc Co-authored-by: Andrew Gizas --------- Signed-off-by: Tetiana Kravchenko Co-authored-by: Andrew Gizas --- libbeat/docs/shared-autodiscover.asciidoc | 155 +++++++++++++++--- .../module/istio/proxy/_meta/docs.asciidoc | 2 +- 2 files changed, 133 insertions(+), 24 deletions(-) diff --git a/libbeat/docs/shared-autodiscover.asciidoc b/libbeat/docs/shared-autodiscover.asciidoc index ed116fd65959..e27c055cc951 100644 --- a/libbeat/docs/shared-autodiscover.asciidoc +++ b/libbeat/docs/shared-autodiscover.asciidoc @@ -178,8 +178,8 @@ Configuration parameters: * `node` or `namespace`: Specify labels and annotations filters for the extra metadata coming from node and namespace. By default all labels are included while annotations are not. To change default behaviour `include_labels`, `exclude_labels` and `include_annotations` can be defined. Those settings are useful when storing labels and annotations that require special handling to avoid overloading the storage output. Note: wildcards are not supported for those settings. The enrichment of `node` or `namespace` metadata can be individually disabled by setting `enabled: false`. - * `deployment`: If resource is `pod` and it is created from a `deployment`, by default the deployment name is added, this can be disabled by setting `deployment: false`. - * `cronjob`: If resource is `pod` and it is created from a `cronjob`, by default the cronjob name is added, this can be disabled by setting `cronjob: false`. + * `deployment`: If resource is `pod` and it is created from a `deployment`, by default the deployment name isn't added, this can be enabled by setting `deployment: true`. + * `cronjob`: If resource is `pod` and it is created from a `cronjob`, by default the cronjob name isn't added, this can be enabled by setting `cronjob: true`. + Example: ["source","yaml",subs="attributes"] @@ -190,8 +190,8 @@ Example: node: include_labels: ["nodelabel2"] include_annotations: ["nodeannotation1"] - deployment: false - cronjob: false + # deployment: false + # cronjob: false ------------------------------------------------------------------------------------- `unique`:: (Optional) Defaults to `false`. Marking an autodiscover provider as unique results into @@ -203,39 +203,148 @@ Example: Different Beats that refer to the same leader lease will be competitors in holding the lease and only one will be elected as leader each time. -The configuration of templates and conditions is similar to that of the Docker provider. Configuration templates can -contain variables from the autodiscover event. They can be accessed under data namespace. +Configuration templates can contain variables from the autodiscover event. These variables can be accessed under the `data` +namespace, e.g. to access Pod IP: `${data.kubernetes.pod.ip}`. -These are the fields available within config templating. The `kubernetes.*` fields will be available on each emitted event. +These are the fields available within config templating. The `kubernetes.*` fields will be available on each emitted event: [float] ====== Generic fields: * host - * port (if exposed) - * kubernetes.labels - * kubernetes.annotations [float] ====== Pod specific: - * kubernetes.container.id - * kubernetes.container.image - * kubernetes.container.name - * kubernetes.namespace - * kubernetes.node.name - * kubernetes.pod.name - * kubernetes.pod.uid +|=== +|Key |Type |Description + +|`port` +|`string` +|Pod port. If pod has multiple ports exposed should be used `ports.` instead + +|`kubernetes.namespace` +|`string` +|Namespace, where the Pod is running + +|`kubernetes.namespace_uuid` +|`string` +|UUID of the Namespace, where the Pod is running + +|`kubernetes.namespace_annotations.*` +|`object` +|Annotations of the Namespace, where the Pod is running. Annotations should be used in not dedoted format, e.g. `kubernetes.namespace_annotations.app.kubernetes.io/name` + +|`kubernetes.pod.name` +|`string` +|Name of the Pod + +|`kubernetes.pod.uid` +|`string` +|UID of the Pod + +|`kubernetes.pod.ip` +|`string` +|IP of the Pod + +|`kubernetes.labels.*` +|`object` +|Object of the Pod labels. Labels should be used in not dedoted format, e.g. `kubernetes.labels.app.kubernetes.io/name` + +|`kubernetes.annotations.*` +|`object` +|Object of the Pod annotations. Annotations should be used in not dedoted format, e.g. `kubernetes.annotations.test.io/test` + +|`kubernetes.container.name` +|`string` +|Name of the container + +|`kubernetes.container.runtime` +|`string` +|Runtime of the container + +|`kubernetes.container.id` +|`string` +|ID of the container + +|`kubernetes.container.image` +|`string` +|Image of the container + +|`kubernetes.node.name` +|`string` +|Name of the Node + +|`kubernetes.node.uid` +|`string` +|UID of the Node + +|`kubernetes.node.hostname` +|`string` +|Hostname of the Node +|=== [float] ====== Node specific: - * kubernetes.node.name - * kubernetes.node.uid +|=== +|Key |Type |Description + +|`kubernetes.labels.*` +|`object` +|Object of labels of the Node + +|`kubernetes.annotations.*` +|`object` +|Object of annotations of the Node + +|`kubernetes.node.name` +|`string` +|Name of the Node + +|`kubernetes.node.uid` +|`string` +|UID of the Node + +|`kubernetes.node.hostname` +|`string` +|Hostname of the Node +|=== [float] ====== Service specific: - * kubernetes.namespace - * kubernetes.service.name - * kubernetes.service.uid - * kubernetes.annotations +|=== +|Key |Type |Description + +|`port` +|`string` +|Service port + +|`kubernetes.namespace` +|`string` +|Namespace of the Service + +|`kubernetes.namespace_uuid` +|`string` +|UUID of the Namespace of the Service + +|`kubernetes.namespace_annotations.*` +|`object` +|Annotations of the Namespace of the Service. Annotations should be used in not dedoted format, e.g. `kubernetes.namespace_annotations.app.kubernetes.io/name` + +|`kubernetes.labels.*` +|`object` +|Object of the Service labels + +|`kubernetes.annotations.*` +|`object` +|Object of the Service annotations + +|`kubernetes.service.name` +|`string` +|Name of the Service + +|`kubernetes.service.uid` +|`string` +|UID of the Service +|=== If the `include_annotations` config is added to the provider config, then the list of annotations present in the config are added to the event. diff --git a/x-pack/metricbeat/module/istio/proxy/_meta/docs.asciidoc b/x-pack/metricbeat/module/istio/proxy/_meta/docs.asciidoc index b3c5f3a8807c..06ae69d97e03 100644 --- a/x-pack/metricbeat/module/istio/proxy/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/istio/proxy/_meta/docs.asciidoc @@ -29,5 +29,5 @@ metricbeat.autodiscover: config: - module: istio metricsets: ["proxy"] - hosts: "${data.host}:15090" + hosts: "${data.kubernetes.pod.ip}:15090" -------------------------------------------- From c23411acc04b13d2576702530828f131cb3a22e1 Mon Sep 17 00:00:00 2001 From: kaiyan-sheng Date: Mon, 22 Jan 2024 07:15:19 -0700 Subject: [PATCH 064/129] Ignore @tmp directories in test (#37677) --- dev-tools/mage/gotest.go | 12 ++++----- .../fields/module_fields_collector.go | 9 ++++--- pytest.ini | 1 + x-pack/metricbeat/Jenkinsfile.yml | 25 +++++++++---------- 4 files changed, 24 insertions(+), 23 deletions(-) diff --git a/dev-tools/mage/gotest.go b/dev-tools/mage/gotest.go index 5065882fdb8f..bc49c3e643cd 100644 --- a/dev-tools/mage/gotest.go +++ b/dev-tools/mage/gotest.go @@ -22,7 +22,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "log" "os" "os/exec" @@ -169,15 +168,16 @@ func DefaultTestBinaryArgs() TestBinaryArgs { // Use MODULE=module to run only tests for `module`. func GoTestIntegrationForModule(ctx context.Context) error { module := EnvOr("MODULE", "") - modulesFileInfo, err := ioutil.ReadDir("./module") + modulesFileInfo, err := os.ReadDir("./module") if err != nil { return err } foundModule := false - failedModules := []string{} + failedModules := make([]string, 0, len(modulesFileInfo)) for _, fi := range modulesFileInfo { - if !fi.IsDir() { + // skip the ones that are not directories or with suffix @tmp, which are created by Jenkins build job + if !fi.IsDir() || strings.HasSuffix(fi.Name(), "@tmp") { continue } if module != "" && module != fi.Name() { @@ -289,7 +289,7 @@ func GoTest(ctx context.Context, params GoTestArgs) error { } if params.OutputFile != "" { - fileOutput, err := os.Create(createDir(params.OutputFile)) + fileOutput, err := os.Create(CreateDir(params.OutputFile)) if err != nil { return fmt.Errorf("failed to create go test output file: %w", err) } @@ -356,7 +356,7 @@ func makeCommand(ctx context.Context, env map[string]string, cmd string, args .. for k, v := range env { c.Env = append(c.Env, k+"="+v) } - c.Stdout = ioutil.Discard + c.Stdout = io.Discard if mg.Verbose() { c.Stdout = os.Stdout } diff --git a/libbeat/generator/fields/module_fields_collector.go b/libbeat/generator/fields/module_fields_collector.go index 85f917cacf60..2bf7c7190d43 100644 --- a/libbeat/generator/fields/module_fields_collector.go +++ b/libbeat/generator/fields/module_fields_collector.go @@ -18,9 +18,9 @@ package fields import ( - "io/ioutil" "os" "path/filepath" + "strings" ) var indentByModule = map[string]int{ @@ -38,9 +38,10 @@ func GetModules(modulesDir string) ([]string, error) { return nil, err } - var names []string + names := make([]string, 0, len(moduleInfos)) for _, info := range moduleInfos { - if !info.IsDir() { + // skip the ones that are not directories or with suffix @tmp, which are created by Jenkins build job + if !info.IsDir() || strings.HasSuffix(info.Name(), "@tmp") { continue } names = append(names, info.Name()) @@ -80,7 +81,7 @@ func CollectFiles(module string, modulesPath string) ([]*YmlFile, error) { files = append(files, ymls...) modulesRoot := filepath.Base(modulesPath) - sets, err := ioutil.ReadDir(filepath.Join(modulesPath, module)) + sets, err := os.ReadDir(filepath.Join(modulesPath, module)) if err != nil { return nil, err } diff --git a/pytest.ini b/pytest.ini index 5112e7736685..c8a34025f6f0 100644 --- a/pytest.ini +++ b/pytest.ini @@ -17,3 +17,4 @@ filterwarnings = ignore:distutils Version classes are deprecated. Use packaging.version instead.:DeprecationWarning:.*compose.* ignore:distutils Version classes are deprecated. Use packaging.version instead.:DeprecationWarning:.*docker.* ignore:HTTPResponse.getheaders\(\) is deprecated and will be removed in urllib3 v2.1.0. Instead access HTTPResponse.headers directly.:DeprecationWarning + ignore:The 'warn' method is deprecated, use 'warning' instead:DeprecationWarning diff --git a/x-pack/metricbeat/Jenkinsfile.yml b/x-pack/metricbeat/Jenkinsfile.yml index f9fbd76a6ce5..27574b62adad 100644 --- a/x-pack/metricbeat/Jenkinsfile.yml +++ b/x-pack/metricbeat/Jenkinsfile.yml @@ -24,19 +24,18 @@ stages: mage: "mage pythonIntegTest" withModule: true stage: mandatory - # Skip test until fixed https://github.com/elastic/beats/issues/37498 - #cloud: - # cloud: "mage build test" - # withModule: true ## run the ITs only if the changeset affects a specific module. - # dirs: ## run the cloud tests for the given modules. - # - "x-pack/metricbeat/module/aws" - # when: ## Override the top-level when. - # parameters: - # - "awsCloudTests" - # comments: - # - "/test x-pack/metricbeat for aws cloud" - # labels: - # - "aws" + cloud: + cloud: "mage build test" + withModule: true ## run the ITs only if the changeset affects a specific module. + dirs: ## run the cloud tests for the given modules. + - "x-pack/metricbeat/module/aws" + when: ## Override the top-level when. + parameters: + - "awsCloudTests" + comments: + - "/test x-pack/metricbeat for aws cloud" + labels: + - "aws" # stage: extended # Skip test until fixed https://github.com/elastic/beats/issues/36425 #cloudAWS: From afbba842e18b140fb9a2f4b469ad32d62a82db7e Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 22 Jan 2024 10:39:31 -0500 Subject: [PATCH 065/129] chore: Update snapshot.yml (#37674) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Made with ❤️️ by updatecli Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index f13e4d18bb36..0901d9638f64 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-knq1zsti-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-l534sdis-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.13.0-knq1zsti-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.13.0-l534sdis-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.13.0-knq1zsti-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.13.0-l534sdis-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From b2e2ecc36c5415d5f85d386233c963e05763a64d Mon Sep 17 00:00:00 2001 From: sharbuz <87968844+sharbuz@users.noreply.github.com> Date: Tue, 23 Jan 2024 16:00:12 +0200 Subject: [PATCH 066/129] libbeat and packetbeat buildkite pipelines init (#37669) * libbeat and packetbeat buildkite pipelines init * add blank pipelines * fix remarks --- .buildkite/libbeat/pipeline.libbeat.yml | 5 ++ .buildkite/packetbeat/pipeline.packetbeat.yml | 5 ++ .buildkite/pull-requests.json | 32 +++++++ catalog-info.yaml | 90 +++++++++++++++++++ 4 files changed, 132 insertions(+) create mode 100644 .buildkite/libbeat/pipeline.libbeat.yml create mode 100644 .buildkite/packetbeat/pipeline.packetbeat.yml diff --git a/.buildkite/libbeat/pipeline.libbeat.yml b/.buildkite/libbeat/pipeline.libbeat.yml new file mode 100644 index 000000000000..34321b61161b --- /dev/null +++ b/.buildkite/libbeat/pipeline.libbeat.yml @@ -0,0 +1,5 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json + +steps: + - label: "Example test" + command: echo "Hello!" diff --git a/.buildkite/packetbeat/pipeline.packetbeat.yml b/.buildkite/packetbeat/pipeline.packetbeat.yml new file mode 100644 index 000000000000..34321b61161b --- /dev/null +++ b/.buildkite/packetbeat/pipeline.packetbeat.yml @@ -0,0 +1,5 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json + +steps: + - label: "Example test" + command: echo "Hello!" diff --git a/.buildkite/pull-requests.json b/.buildkite/pull-requests.json index 43d8974f3bf6..cc8ff9ab7a52 100644 --- a/.buildkite/pull-requests.json +++ b/.buildkite/pull-requests.json @@ -95,6 +95,38 @@ "skip_target_branches": [ ], "skip_ci_on_only_changed": [ ], "always_require_ci_on_changed": [ "^deploy/kubernetes/.*", ".buildkite/deploy/kubernetes/.*", "^libbeat/docs/version.asciidoc"] + }, + { + "enabled": true, + "pipelineSlug": "beats-libbeat", + "allow_org_users": true, + "allowed_repo_permissions": ["admin", "write"], + "allowed_list": [ ], + "set_commit_status": true, + "build_on_commit": true, + "build_on_comment": true, + "trigger_comment_regex": "^/test libbeat$|^/test filebeat", + "always_trigger_comment_regex": "^/test libbeat$", + "skip_ci_labels": [ ], + "skip_target_branches": [ ], + "skip_ci_on_only_changed": [ ], + "always_require_ci_on_changed": ["^go.mod", "^pytest.ini", "^dev-tools/.*", "^libbeat/.*", "^testing/.*"] + }, + { + "enabled": true, + "pipelineSlug": "beats-packetbeat", + "allow_org_users": true, + "allowed_repo_permissions": ["admin", "write"], + "allowed_list": [ ], + "set_commit_status": true, + "build_on_commit": true, + "build_on_comment": true, + "trigger_comment_regex": "^/test packetbeat$", + "always_trigger_comment_regex": "^/test packetbeat$", + "skip_ci_labels": [ ], + "skip_target_branches": [ ], + "skip_ci_on_only_changed": [ ], + "always_require_ci_on_changed": ["^packetbeat/.*", ".buildkite/packetbeat/.*", "^go.mod", "^pytest.ini", "^dev-tools/.*", "^libbeat/.*", "^testing/.*"] } ] } diff --git a/catalog-info.yaml b/catalog-info.yaml index 92757fd4c134..77ad8d8ae03b 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -287,3 +287,93 @@ spec: access_level: MANAGE_BUILD_AND_READ everyone: access_level: READ_ONLY + +--- +# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json +apiVersion: backstage.io/v1alpha1 +kind: Resource +metadata: + name: buildkite-pipeline-beats-libbeat + description: "Beats libbeat pipeline" + links: + - title: Pipeline + url: https://buildkite.com/elastic/beats-libbeat + +spec: + type: buildkite-pipeline + owner: group:ingest-fp + system: buildkite + implementation: + apiVersion: buildkite.elastic.dev/v1 + kind: Pipeline + metadata: + name: beats-libbeat + description: "Beats libbeat pipeline" + spec: +# branch_configuration: "main 7.17 8.* v7.17 v8.*" TODO: temporarily commented to build PRs from forks + pipeline_file: ".buildkite/libbeat/pipeline.libbeat.yml" +# maximum_timeout_in_minutes: 120 TODO: uncomment when pipeline is ready + provider_settings: + build_pull_request_forks: false + build_pull_requests: true # requires filter_enabled and filter_condition settings as below when used with buildkite-pr-bot + build_tags: true + filter_enabled: true + filter_condition: >- + build.pull_request.id == null || (build.creator.name == 'elasticmachine' && build.pull_request.id != null) + repository: elastic/beats + cancel_intermediate_builds: true + cancel_intermediate_builds_branch_filter: "!main !7.17 !8.*" + skip_intermediate_builds: true + skip_intermediate_builds_branch_filter: "!main !7.17 !8.*" + # env: + # ELASTIC_PR_COMMENTS_ENABLED: "true" TODO: uncomment when pipeline is ready + teams: + ingest-fp: + access_level: MANAGE_BUILD_AND_READ + everyone: + access_level: READ_ONLY + +--- +# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json +apiVersion: backstage.io/v1alpha1 +kind: Resource +metadata: + name: buildkite-pipeline-beats-packetbeat + description: "Beats packetbeat pipeline" + links: + - title: Pipeline + url: https://buildkite.com/elastic/beats-libbeat + +spec: + type: buildkite-pipeline + owner: group:ingest-fp + system: buildkite + implementation: + apiVersion: buildkite.elastic.dev/v1 + kind: Pipeline + metadata: + name: beats-packetbeat + description: "Beats packetbeat pipeline" + spec: +# branch_configuration: "main 7.17 8.* v7.17 v8.*" TODO: temporarily commented to build PRs from forks + pipeline_file: ".buildkite/libbeat/pipeline.packetbeat.yml" +# maximum_timeout_in_minutes: 120 TODO: uncomment when pipeline is ready + provider_settings: + build_pull_request_forks: false + build_pull_requests: true # requires filter_enabled and filter_condition settings as below when used with buildkite-pr-bot + build_tags: true + filter_enabled: true + filter_condition: >- + build.pull_request.id == null || (build.creator.name == 'elasticmachine' && build.pull_request.id != null) + repository: elastic/beats + cancel_intermediate_builds: true + cancel_intermediate_builds_branch_filter: "!main !7.17 !8.*" + skip_intermediate_builds: true + skip_intermediate_builds_branch_filter: "!main !7.17 !8.*" + # env: + # ELASTIC_PR_COMMENTS_ENABLED: "true" TODO: uncomment when pipeline is ready + teams: + ingest-fp: + access_level: MANAGE_BUILD_AND_READ + everyone: + access_level: READ_ONLY From 6383c565ffb3ea90edeaedca1deeb818edd226a8 Mon Sep 17 00:00:00 2001 From: sharbuz <87968844+sharbuz@users.noreply.github.com> Date: Tue, 23 Jan 2024 17:45:27 +0200 Subject: [PATCH 067/129] fix the packetbeat bk pipeline init config (#37706) * fix libbeat bk pipeline init * fix packetbeat bk pipeline init --- catalog-info.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/catalog-info.yaml b/catalog-info.yaml index 77ad8d8ae03b..4d6c956f1f3f 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -356,7 +356,7 @@ spec: description: "Beats packetbeat pipeline" spec: # branch_configuration: "main 7.17 8.* v7.17 v8.*" TODO: temporarily commented to build PRs from forks - pipeline_file: ".buildkite/libbeat/pipeline.packetbeat.yml" + pipeline_file: ".buildkite/packetbeat/pipeline.packetbeat.yml" # maximum_timeout_in_minutes: 120 TODO: uncomment when pipeline is ready provider_settings: build_pull_request_forks: false From 3bf2a825e91ca68f6776d4794bb30ded5b75cd91 Mon Sep 17 00:00:00 2001 From: sharbuz <87968844+sharbuz@users.noreply.github.com> Date: Wed, 24 Jan 2024 12:39:58 +0200 Subject: [PATCH 068/129] temporary desable filesystem check for MacOS unit-tests (#37687) --- metricbeat/module/system/test_system.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metricbeat/module/system/test_system.py b/metricbeat/module/system/test_system.py index b1c21d698ff3..de113e5e4b0e 100644 --- a/metricbeat/module/system/test_system.py +++ b/metricbeat/module/system/test_system.py @@ -258,7 +258,7 @@ def test_diskio(self): self.assertCountEqual( SYSTEM_DISK_HOST_FIELDS, host_disk.keys()) - @unittest.skipUnless(re.match("(?i)win|linux|darwin|freebsd|openbsd", sys.platform), "os") + @unittest.skipUnless(re.match("(?i)win|linux|freebsd|openbsd", sys.platform), "os") def test_filesystem(self): """ Test system/filesystem output. From 90228e66040f055f8bcd5395e7161a3d3f26018e Mon Sep 17 00:00:00 2001 From: Craig MacKenzie Date: Wed, 24 Jan 2024 09:02:39 -0500 Subject: [PATCH 069/129] Update to protobuf 3.19.5 everywhere. (#37711) --- auditbeat/tests/system/requirements.txt | 2 +- dev-tools/requirements.txt | 2 +- heartbeat/tests/system/requirements.txt | 2 +- .../module/kubernetes/_meta/terraform/eks/requirements.txt | 2 +- metricbeat/tests/system/requirements.txt | 2 +- packetbeat/tests/system/gen/memcache/requirements.txt | 2 +- x-pack/functionbeat/tests/system/requirements.txt | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/auditbeat/tests/system/requirements.txt b/auditbeat/tests/system/requirements.txt index 553625fe698c..c2399b66f80b 100644 --- a/auditbeat/tests/system/requirements.txt +++ b/auditbeat/tests/system/requirements.txt @@ -1 +1 @@ -protobuf==3.19.4 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 +protobuf==3.19.5 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 diff --git a/dev-tools/requirements.txt b/dev-tools/requirements.txt index e9d917548f16..f69927dbe3c6 100644 --- a/dev-tools/requirements.txt +++ b/dev-tools/requirements.txt @@ -1,3 +1,3 @@ elasticsearch requests -protobuf==3.19.4 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 +protobuf==3.19.5 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 diff --git a/heartbeat/tests/system/requirements.txt b/heartbeat/tests/system/requirements.txt index 553625fe698c..c2399b66f80b 100644 --- a/heartbeat/tests/system/requirements.txt +++ b/heartbeat/tests/system/requirements.txt @@ -1 +1 @@ -protobuf==3.19.4 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 +protobuf==3.19.5 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 diff --git a/metricbeat/module/kubernetes/_meta/terraform/eks/requirements.txt b/metricbeat/module/kubernetes/_meta/terraform/eks/requirements.txt index 57d386ba6a0a..7402ff16caa1 100644 --- a/metricbeat/module/kubernetes/_meta/terraform/eks/requirements.txt +++ b/metricbeat/module/kubernetes/_meta/terraform/eks/requirements.txt @@ -10,4 +10,4 @@ rsa==4.7.2 s3transfer==0.3.3 six==1.14.0 urllib3==1.26.5 -protobuf==3.19.4 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 +protobuf==3.19.5 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 diff --git a/metricbeat/tests/system/requirements.txt b/metricbeat/tests/system/requirements.txt index 711013bb2f88..98713863fc0d 100644 --- a/metricbeat/tests/system/requirements.txt +++ b/metricbeat/tests/system/requirements.txt @@ -1,4 +1,4 @@ kafka-python==1.4.3 elasticsearch==7.1.0 semver==2.8.1 -protobuf==3.19.4 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 +protobuf==3.19.5 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 diff --git a/packetbeat/tests/system/gen/memcache/requirements.txt b/packetbeat/tests/system/gen/memcache/requirements.txt index 346c9010386f..1666df74b640 100644 --- a/packetbeat/tests/system/gen/memcache/requirements.txt +++ b/packetbeat/tests/system/gen/memcache/requirements.txt @@ -1,2 +1,2 @@ pylibmc -protobuf==3.19.4 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 +protobuf==3.19.5 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 diff --git a/x-pack/functionbeat/tests/system/requirements.txt b/x-pack/functionbeat/tests/system/requirements.txt index 553625fe698c..c2399b66f80b 100644 --- a/x-pack/functionbeat/tests/system/requirements.txt +++ b/x-pack/functionbeat/tests/system/requirements.txt @@ -1 +1 @@ -protobuf==3.19.4 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 +protobuf==3.19.5 #Temporary change because of protobuf new version bug: https://github.com/protocolbuffers/protobuf/issues/10051 From ebd85124f0a7b98604cc3380a52d269d59150216 Mon Sep 17 00:00:00 2001 From: Andrew Kroh Date: Wed, 24 Jan 2024 15:32:47 -0500 Subject: [PATCH 070/129] package_test.go - Support OCI Image Layout (#37727) Modify TestDocker such that is can read both the original docker image layout and the OCI Image Layout. This works by reading the config and layer file names from the manifest.yml instead of assuming their names. Fixes #37726 --- dev-tools/packaging/package_test.go | 69 ++++++++++++++++++++++------- 1 file changed, 54 insertions(+), 15 deletions(-) diff --git a/dev-tools/packaging/package_test.go b/dev-tools/packaging/package_test.go index 363e6d904d4a..e01b6c566e5a 100644 --- a/dev-tools/packaging/package_test.go +++ b/dev-tools/packaging/package_test.go @@ -30,10 +30,10 @@ import ( "flag" "fmt" "io" - "io/ioutil" "os" "path/filepath" "regexp" + "slices" "strings" "testing" @@ -106,6 +106,7 @@ func TestZip(t *testing.T) { func TestDocker(t *testing.T) { dockers := getFiles(t, regexp.MustCompile(`\.docker\.tar\.gz$`)) for _, docker := range dockers { + t.Log(docker) checkDocker(t, docker) } } @@ -713,13 +714,19 @@ func readZip(t *testing.T, zipFile string, inspectors ...inspector) (*packageFil } func readDocker(dockerFile string) (*packageFile, *dockerInfo, error) { + // Read the manifest file first so that the config file and layer + // names are known in advance. + manifest, err := getDockerManifest(dockerFile) + if err != nil { + return nil, nil, err + } + file, err := os.Open(dockerFile) if err != nil { return nil, nil, err } defer file.Close() - var manifest *dockerManifest var info *dockerInfo layers := make(map[string]*packageFile) @@ -740,22 +747,17 @@ func readDocker(dockerFile string) (*packageFile, *dockerInfo, error) { } switch { - case header.Name == "manifest.json": - manifest, err = readDockerManifest(tarReader) - if err != nil { - return nil, nil, err - } - case strings.HasSuffix(header.Name, ".json") && header.Name != "manifest.json": + case header.Name == manifest.Config: info, err = readDockerInfo(tarReader) if err != nil { return nil, nil, err } - case strings.HasSuffix(header.Name, "/layer.tar"): + case slices.Contains(manifest.Layers, header.Name): layer, err := readTarContents(header.Name, tarReader) if err != nil { return nil, nil, err } - layers[filepath.Dir(header.Name)] = layer + layers[header.Name] = layer } } @@ -769,10 +771,9 @@ func readDocker(dockerFile string) (*packageFile, *dockerInfo, error) { // Read layers in order and for each file keep only the entry seen in the later layer p := &packageFile{Name: filepath.Base(dockerFile), Contents: map[string]packageEntry{}} for _, layer := range manifest.Layers { - layerID := filepath.Dir(layer) - layerFile, found := layers[layerID] + layerFile, found := layers[layer] if !found { - return nil, nil, fmt.Errorf("layer not found: %s", layerID) + return nil, nil, fmt.Errorf("layer not found: %s", layer) } for name, entry := range layerFile.Contents { // Check only files in working dir and entrypoint @@ -798,6 +799,44 @@ func readDocker(dockerFile string) (*packageFile, *dockerInfo, error) { return p, info, nil } +// getDockerManifest opens a gzipped tar file to read the Docker manifest.json +// that it is expected to contain. +func getDockerManifest(file string) (*dockerManifest, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + gzipReader, err := gzip.NewReader(f) + if err != nil { + return nil, err + } + defer gzipReader.Close() + + var manifest *dockerManifest + tarReader := tar.NewReader(gzipReader) + for { + header, err := tarReader.Next() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return nil, err + } + + if header.Name == "manifest.json" { + manifest, err = readDockerManifest(tarReader) + if err != nil { + return nil, err + } + break + } + } + + return manifest, nil +} + type dockerManifest struct { Config string RepoTags []string @@ -805,7 +844,7 @@ type dockerManifest struct { } func readDockerManifest(r io.Reader) (*dockerManifest, error) { - data, err := ioutil.ReadAll(r) + data, err := io.ReadAll(r) if err != nil { return nil, err } @@ -833,7 +872,7 @@ type dockerInfo struct { } func readDockerInfo(r io.Reader) (*dockerInfo, error) { - data, err := ioutil.ReadAll(r) + data, err := io.ReadAll(r) if err != nil { return nil, err } From ac6917c54ba3f3115b2ac8514d2aaab0997b9fdf Mon Sep 17 00:00:00 2001 From: Dan Kortschak <90160302+efd6@users.noreply.github.com> Date: Thu, 25 Jan 2024 13:42:37 +1030 Subject: [PATCH 071/129] x-pack/filebeat/input/cel: update mito version to v1.8.0 (#37718) This change adds support for runtime error location reporting. --- CHANGELOG.next.asciidoc | 1 + NOTICE.txt | 58 ++++++++++--------- go.mod | 6 +- go.sum | 12 ++-- .../filebeat/docs/inputs/input-cel.asciidoc | 2 +- x-pack/filebeat/input/cel/config.go | 2 +- x-pack/filebeat/input/cel/input.go | 21 ++++--- x-pack/filebeat/input/cel/input_test.go | 5 +- 8 files changed, 58 insertions(+), 49 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index c6b4b4e6e4f3..0a34b39ec7b2 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -167,6 +167,7 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d - Update CEL extensions library to v1.7.0. {pull}37172[37172] - Add support for complete URL replacement in HTTPJSON chain steps. {pull}37486[37486] - Add support for user-defined query selection in EntraID entity analytics provider. {pull}37653[37653] +- Update CEL extensions library to v1.8.0 to provide runtime error location reporting. {issue}37304[37304] {pull}37718[37718] *Auditbeat* diff --git a/NOTICE.txt b/NOTICE.txt index c7e77209c2c2..dca30dc3f419 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -15589,11 +15589,11 @@ limitations under the License. -------------------------------------------------------------------------------- Dependency : github.com/elastic/mito -Version: v1.7.0 +Version: v1.8.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/mito@v1.7.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/mito@v1.8.0/LICENSE: Apache License @@ -17538,11 +17538,11 @@ Contents of probable licence file $GOMODCACHE/github.com/gomodule/redigo@v1.8.3/ -------------------------------------------------------------------------------- Dependency : github.com/google/cel-go -Version: v0.17.7 +Version: v0.19.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/google/cel-go@v0.17.7/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/google/cel-go@v0.19.0/LICENSE: Apache License @@ -31307,39 +31307,41 @@ THE SOFTWARE. -------------------------------------------------------------------------------- -Dependency : github.com/antlr/antlr4/runtime/Go/antlr/v4 -Version: v4.0.0-20230305170008-8188dc5388df +Dependency : github.com/antlr4-go/antlr/v4 +Version: v4.13.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/antlr/antlr4/runtime/!go/antlr/v4@v4.0.0-20230305170008-8188dc5388df/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/antlr4-go/antlr/v4@v4.13.0/LICENSE: -Copyright 2021 The ANTLR Project +Copyright (c) 2012-2023 The ANTLR Project. All rights reserved. -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. - 3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. +3. Neither name of copyright holders nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- diff --git a/go.mod b/go.mod index af936d4063e7..4d023a1cc13a 100644 --- a/go.mod +++ b/go.mod @@ -205,10 +205,10 @@ require ( github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3 github.com/elastic/elastic-agent-system-metrics v0.9.1 github.com/elastic/go-elasticsearch/v8 v8.11.1 - github.com/elastic/mito v1.7.0 + github.com/elastic/mito v1.8.0 github.com/elastic/toutoumomoma v0.0.0-20221026030040-594ef30cb640 github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15 - github.com/google/cel-go v0.17.7 + github.com/google/cel-go v0.19.0 github.com/googleapis/gax-go/v2 v2.12.0 github.com/gorilla/handlers v1.5.1 github.com/gorilla/mux v1.8.0 @@ -247,7 +247,7 @@ require ( github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 // indirect github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect github.com/andybalholm/brotli v1.0.5 // indirect - github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect + github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/apache/arrow/go/v12 v12.0.0 // indirect github.com/apache/thrift v0.19.0 // indirect github.com/armon/go-radix v1.0.0 // indirect diff --git a/go.sum b/go.sum index e238a72691a5..894ff70759ad 100644 --- a/go.sum +++ b/go.sum @@ -248,8 +248,8 @@ github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/ github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= -github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= +github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= +github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= github.com/apache/arrow/go/arrow v0.0.0-20200923215132-ac86123a3f01/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= @@ -703,8 +703,8 @@ github.com/elastic/gopacket v1.1.20-0.20211202005954-d412fca7f83a h1:8WfL/X6fK11 github.com/elastic/gopacket v1.1.20-0.20211202005954-d412fca7f83a/go.mod h1:riddUzxTSBpJXk3qBHtYr4qOhFhT6k/1c0E3qkQjQpA= github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= -github.com/elastic/mito v1.7.0 h1:cb4/z7Pt1Sonw92ucUMPcfbzX8MC+b6Hvf4ZMBJWg74= -github.com/elastic/mito v1.7.0/go.mod h1:nh7WSVimSs4d0N9Zakw+ZNOZL0wKl+jmQLT49JLxRQs= +github.com/elastic/mito v1.8.0 h1:i3GOtcnNuEEH2XMqnQdPvNjIBA8m0VKuTTfvusfCfnU= +github.com/elastic/mito v1.8.0/go.mod h1:n7AvUVtYQQXb8fq87FI8z67TNzuhwBV3kHBkDT1qJYQ= github.com/elastic/ristretto v0.1.1-0.20220602190459-83b0895ca5b3 h1:ChPwRVv1RR4a0cxoGjKcyWjTEpxYfm5gydMIzo32cAw= github.com/elastic/ristretto v0.1.1-0.20220602190459-83b0895ca5b3/go.mod h1:RAy2GVV4sTWVlNMavv3xhLsk18rxhfhDnombTe6EF5c= github.com/elastic/sarama v1.19.1-0.20220310193331-ebc2b0d8eef3 h1:FzA0/n4iMt8ojGDGRoiFPSHFvvdVIvxOxyLtiFnrLBM= @@ -1027,8 +1027,8 @@ github.com/gomodule/redigo v1.8.3/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUz github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ= -github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/cel-go v0.19.0 h1:vVgaZoHPBDd1lXCYGQOh5A06L4EtuIfmqQ/qnSXSKiU= +github.com/google/cel-go v0.19.0/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v1.12.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= diff --git a/x-pack/filebeat/docs/inputs/input-cel.asciidoc b/x-pack/filebeat/docs/inputs/input-cel.asciidoc index a2512580169d..837ea80ea1ee 100644 --- a/x-pack/filebeat/docs/inputs/input-cel.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-cel.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] :type: cel -:mito_version: v1.7.0 +:mito_version: v1.8.0 :mito_docs: https://pkg.go.dev/github.com/elastic/mito@{mito_version} [id="{beatname_lc}-input-{type}"] diff --git a/x-pack/filebeat/input/cel/config.go b/x-pack/filebeat/input/cel/config.go index 94b41190fa6c..3ce271afaa3c 100644 --- a/x-pack/filebeat/input/cel/config.go +++ b/x-pack/filebeat/input/cel/config.go @@ -89,7 +89,7 @@ func (c config) Validate() error { if len(c.Regexps) != 0 { patterns = map[string]*regexp.Regexp{".": nil} } - _, err = newProgram(context.Background(), c.Program, root, client, nil, nil, patterns, c.XSDs, logp.L().Named("input.cel"), nil) + _, _, err = newProgram(context.Background(), c.Program, root, client, nil, nil, patterns, c.XSDs, logp.L().Named("input.cel"), nil) if err != nil { return fmt.Errorf("failed to check program: %w", err) } diff --git a/x-pack/filebeat/input/cel/input.go b/x-pack/filebeat/input/cel/input.go index e90ee60535a3..420c61a1e645 100644 --- a/x-pack/filebeat/input/cel/input.go +++ b/x-pack/filebeat/input/cel/input.go @@ -151,7 +151,7 @@ func (i input) run(env v2.Context, src *source, cursor map[string]interface{}, p Password: cfg.Auth.Basic.Password, } } - prg, err := newProgram(ctx, cfg.Program, root, client, limiter, auth, patterns, cfg.XSDs, log, trace) + prg, ast, err := newProgram(ctx, cfg.Program, root, client, limiter, auth, patterns, cfg.XSDs, log, trace) if err != nil { return err } @@ -233,7 +233,7 @@ func (i input) run(env v2.Context, src *source, cursor map[string]interface{}, p log.Debugw("request state", logp.Namespace("cel"), "state", redactor{state: state, cfg: cfg.Redact}) metrics.executions.Add(1) start := i.now().In(time.UTC) - state, err = evalWith(ctx, prg, state, start) + state, err = evalWith(ctx, prg, ast, state, start) log.Debugw("response state", logp.Namespace("cel"), "state", redactor{state: state, cfg: cfg.Redact}) if err != nil { switch { @@ -898,10 +898,10 @@ var ( } ) -func newProgram(ctx context.Context, src, root string, client *http.Client, limiter *rate.Limiter, auth *lib.BasicAuth, patterns map[string]*regexp.Regexp, xsd map[string]string, log *logp.Logger, trace *httplog.LoggingRoundTripper) (cel.Program, error) { +func newProgram(ctx context.Context, src, root string, client *http.Client, limiter *rate.Limiter, auth *lib.BasicAuth, patterns map[string]*regexp.Regexp, xsd map[string]string, log *logp.Logger, trace *httplog.LoggingRoundTripper) (cel.Program, *cel.Ast, error) { xml, err := lib.XML(nil, xsd) if err != nil { - return nil, fmt.Errorf("failed to build xml type hints: %w", err) + return nil, nil, fmt.Errorf("failed to build xml type hints: %w", err) } opts := []cel.EnvOption{ cel.Declarations(decls.NewVar(root, decls.Dyn)), @@ -930,19 +930,19 @@ func newProgram(ctx context.Context, src, root string, client *http.Client, limi } env, err := cel.NewEnv(opts...) if err != nil { - return nil, fmt.Errorf("failed to create env: %w", err) + return nil, nil, fmt.Errorf("failed to create env: %w", err) } ast, iss := env.Compile(src) if iss.Err() != nil { - return nil, fmt.Errorf("failed compilation: %w", iss.Err()) + return nil, nil, fmt.Errorf("failed compilation: %w", iss.Err()) } prg, err := env.Program(ast) if err != nil { - return nil, fmt.Errorf("failed program instantiation: %w", err) + return nil, nil, fmt.Errorf("failed program instantiation: %w", err) } - return prg, nil + return prg, ast, nil } func debug(log *logp.Logger, trace *httplog.LoggingRoundTripper) func(string, any) { @@ -960,7 +960,7 @@ func debug(log *logp.Logger, trace *httplog.LoggingRoundTripper) func(string, an } } -func evalWith(ctx context.Context, prg cel.Program, state map[string]interface{}, now time.Time) (map[string]interface{}, error) { +func evalWith(ctx context.Context, prg cel.Program, ast *cel.Ast, state map[string]interface{}, now time.Time) (map[string]interface{}, error) { out, _, err := prg.ContextEval(ctx, map[string]interface{}{ // Replace global program "now" with current time. This is necessary // as the lib.Time now global is static at program instantiation time @@ -974,6 +974,9 @@ func evalWith(ctx context.Context, prg cel.Program, state map[string]interface{} "now": now, root: state, }) + if err != nil { + err = lib.DecoratedError{AST: ast, Err: err} + } if e := ctx.Err(); e != nil { err = e } diff --git a/x-pack/filebeat/input/cel/input_test.go b/x-pack/filebeat/input/cel/input_test.go index c3d31f6ef627..1ee7704f8263 100644 --- a/x-pack/filebeat/input/cel/input_test.go +++ b/x-pack/filebeat/input/cel/input_test.go @@ -1333,7 +1333,10 @@ var inputTests = []struct { want: []map[string]interface{}{ { "error": map[string]interface{}{ - "message": "failed eval: no such overload", // This is the best we get for some errors from CEL. + // This is the best we get for some errors from CEL. + "message": `failed eval: ERROR: :3:56: no such overload + | bytes(get(state.url+'/'+r.id).Body).decode_json()).as(events, { + | .......................................................^`, }, }, }, From d1912a512a44b477eb43b6f733a3df19250ef9b8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 Jan 2024 04:33:44 +0000 Subject: [PATCH 072/129] build(deps): bump github.com/elastic/elastic-agent-libs from 0.7.3 to 0.7.4 (#37641) * build(deps): bump github.com/elastic/elastic-agent-libs Bumps [github.com/elastic/elastic-agent-libs](https://github.com/elastic/elastic-agent-libs) from 0.7.3 to 0.7.4. - [Release notes](https://github.com/elastic/elastic-agent-libs/releases) - [Commits](https://github.com/elastic/elastic-agent-libs/compare/v0.7.3...v0.7.4) --- updated-dependencies: - dependency-name: github.com/elastic/elastic-agent-libs dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update NOTICE.txt * Fix tests * fix typo * Fix linter --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: michel-laterman --- NOTICE.txt | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- x-pack/filebeat/input/lumberjack/server_test.go | 10 ++++++---- 4 files changed, 11 insertions(+), 9 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index dca30dc3f419..ac9818f77d37 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -12701,11 +12701,11 @@ SOFTWARE -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-libs -Version: v0.7.3 +Version: v0.7.4 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.7.3/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.7.4/LICENSE: Apache License Version 2.0, January 2004 diff --git a/go.mod b/go.mod index 4d023a1cc13a..d003eeca9219 100644 --- a/go.mod +++ b/go.mod @@ -201,7 +201,7 @@ require ( github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20220623125934-28468a6701b5 github.com/elastic/bayeux v1.0.5 github.com/elastic/elastic-agent-autodiscover v0.6.7 - github.com/elastic/elastic-agent-libs v0.7.3 + github.com/elastic/elastic-agent-libs v0.7.4 github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3 github.com/elastic/elastic-agent-system-metrics v0.9.1 github.com/elastic/go-elasticsearch/v8 v8.11.1 diff --git a/go.sum b/go.sum index 894ff70759ad..043ad9afca0a 100644 --- a/go.sum +++ b/go.sum @@ -662,8 +662,8 @@ github.com/elastic/elastic-agent-autodiscover v0.6.7 h1:+KVjltN0rPsBrU8b156gV4lO github.com/elastic/elastic-agent-autodiscover v0.6.7/go.mod h1:hFeFqneS2r4jD0/QzGkrNk0YVdN0JGh7lCWdsH7zcI4= github.com/elastic/elastic-agent-client/v7 v7.6.0 h1:FEn6FjzynW4TIQo5G096Tr7xYK/P5LY9cSS6wRbXZTc= github.com/elastic/elastic-agent-client/v7 v7.6.0/go.mod h1:GlUKrbVd/O1CRAZonpBeN3J0RlVqP6VGcrBjFWca+aM= -github.com/elastic/elastic-agent-libs v0.7.3 h1:tc6JDXYR+2XFMHJVv+7+M0OwAbZPxm3caLJEd943dlE= -github.com/elastic/elastic-agent-libs v0.7.3/go.mod h1:9hlSaDPm0XTrUWrZjwvckgov1pDHnsGyybzAjNe/1wA= +github.com/elastic/elastic-agent-libs v0.7.4 h1:/cmwOLwNAyJDNeR6sFIbHCDHDLPX2zAb/MAxQq7BRpo= +github.com/elastic/elastic-agent-libs v0.7.4/go.mod h1:pGMj5myawdqu+xE+WKvM5FQzKQ/MonikkWOzoFTJxaU= github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3 h1:sb+25XJn/JcC9/VL8HX4r4QXSUq4uTNzGS2kxOE7u1U= github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3/go.mod h1:rWarFM7qYxJKsi9WcV6ONcFjH/NA3niDNpTxO+8/GVI= github.com/elastic/elastic-agent-system-metrics v0.9.1 h1:r0ofKHgPpl+W09ie7tzGcCDC0d4NZbQUv37rSgHf4FM= diff --git a/x-pack/filebeat/input/lumberjack/server_test.go b/x-pack/filebeat/input/lumberjack/server_test.go index 1c2aa2de3e08..c7db6abf0b54 100644 --- a/x-pack/filebeat/input/lumberjack/server_test.go +++ b/x-pack/filebeat/input/lumberjack/server_test.go @@ -52,7 +52,8 @@ func TestServer(t *testing.T) { c := makeTestConfig() c.TLS = serverConf // Disable mTLS requirements in the server. - c.TLS.ClientAuth = 0 // tls.NoClientCert + var clientAuth = tlscommon.TLSClientAuthNone + c.TLS.ClientAuth = &clientAuth c.TLS.VerificationMode = tlscommon.VerifyNone testSendReceive(t, c, 10, clientConf) @@ -127,7 +128,7 @@ func sendData(ctx context.Context, t testing.TB, bindAddress string, numberOfEve }() t.Log("Lumberjack client connected.") - var events []interface{} + events := make([]interface{}, 0, numberOfEvents) for i := 0; i < numberOfEvents; i++ { events = append(events, map[string]interface{}{ "message": "hello world!", @@ -220,11 +221,12 @@ func tlsSetup(t *testing.T) (clientConfig *tls.Config, serverConfig *tlscommon.S MinVersion: tls.VersionTLS12, } + var clientAuth = tlscommon.TLSClientAuthRequired + serverConfig = &tlscommon.ServerConfig{ // NOTE: VerifyCertificate is ineffective unless ClientAuth is set to RequireAndVerifyClientCert. VerificationMode: tlscommon.VerifyCertificate, - // Unfortunately ServerConfig uses an unexported type in an exported field. - ClientAuth: 4, // tls.RequireAndVerifyClientCert + ClientAuth: &clientAuth, // tls.RequireAndVerifyClientCert CAs: []string{ string(certData.ca.CertPEM(t)), }, From 67abb7a2103c2d8d39625d825722f887655f36f1 Mon Sep 17 00:00:00 2001 From: Dan Kortschak <90160302+efd6@users.noreply.github.com> Date: Thu, 25 Jan 2024 17:09:16 +1030 Subject: [PATCH 073/129] x-pack/filebeat/input/httpjson: propagate request trace configuration to chain children (#37682) This ensures that all chain child clients will log request traces with the same details as the top level request trace logger. --- CHANGELOG.next.asciidoc | 1 + x-pack/filebeat/input/httpjson/input.go | 10 ++++++++++ 2 files changed, 11 insertions(+) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 0a34b39ec7b2..a3510db85377 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -168,6 +168,7 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d - Add support for complete URL replacement in HTTPJSON chain steps. {pull}37486[37486] - Add support for user-defined query selection in EntraID entity analytics provider. {pull}37653[37653] - Update CEL extensions library to v1.8.0 to provide runtime error location reporting. {issue}37304[37304] {pull}37718[37718] +- Add request trace logging for chained API requests. {issue}37551[36551] {pull}37682[37682] *Auditbeat* diff --git a/x-pack/filebeat/input/httpjson/input.go b/x-pack/filebeat/input/httpjson/input.go index 928c056d2d39..17877b607013 100644 --- a/x-pack/filebeat/input/httpjson/input.go +++ b/x-pack/filebeat/input/httpjson/input.go @@ -122,6 +122,16 @@ func run(ctx v2.Context, cfg config, pub inputcursor.Publisher, crsr *inputcurso if cfg.Request.Tracer != nil { id := sanitizeFileName(ctx.ID) cfg.Request.Tracer.Filename = strings.ReplaceAll(cfg.Request.Tracer.Filename, "*", id) + + // Propagate tracer behaviour to all chain children. + for i, c := range cfg.Chain { + if c.Step != nil { // Request is validated as required. + cfg.Chain[i].Step.Request.Tracer = cfg.Request.Tracer + } + if c.While != nil { // Request is validated as required. + cfg.Chain[i].While.Request.Tracer = cfg.Request.Tracer + } + } } metrics := newInputMetrics(reg) From b09ac16b7b4cb448c623a0a052a40db1a76b950e Mon Sep 17 00:00:00 2001 From: Dan Kortschak <90160302+efd6@users.noreply.github.com> Date: Thu, 25 Jan 2024 19:43:12 +1030 Subject: [PATCH 074/129] filebeat/input/{tcp,udp}: relax requirements that proc entries be present when an address is (#37714) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The previous logic required that if an address is present according to net.LookupIP, then it must be present in the /proc/net entries. This may not the case when a tcp/udp listener is created without specifying tcp4/udp4 for an IPv4 host address and there is an expectation of finding the socket in the /proc/net/{tcp,udp} table. So only complain if the entry has ever been found and never skip storing a metric even when there is a legitimate reason to expect its presence — because it has been seen in the past. This second part is an extension to reduce the loss of metric data, even if it is only partial. Also fix the base of the queue length parsers. This was incorrectly claimed to be decimal due to misreading the kernel source. --- CHANGELOG.next.asciidoc | 2 ++ filebeat/input/tcp/input.go | 39 ++++++++++++++++++++++++--------- filebeat/input/udp/input.go | 43 ++++++++++++++++++++++++++----------- 3 files changed, 62 insertions(+), 22 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index a3510db85377..3fe46b65e0c2 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -74,6 +74,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Fix handling of Juniper SRX structured data when there is no leading junos element. {issue}36270[36270] {pull}36308[36308] - Fix Filebeat Cisco module with missing escape character {issue}36325[36325] {pull}36326[36326] - Added a fix for Crowdstrike pipeline handling process arrays {pull}36496[36496] +- Fix TCP/UDP metric queue length parsing base. {pull}37714[37714] *Heartbeat* @@ -169,6 +170,7 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d - Add support for user-defined query selection in EntraID entity analytics provider. {pull}37653[37653] - Update CEL extensions library to v1.8.0 to provide runtime error location reporting. {issue}37304[37304] {pull}37718[37718] - Add request trace logging for chained API requests. {issue}37551[36551] {pull}37682[37682] +- Relax TCP/UDP metric polling expectations to improve metric collection. {pull}37714[37714] *Auditbeat* diff --git a/filebeat/input/tcp/input.go b/filebeat/input/tcp/input.go index 762c6b6ba355..1b3ffa7c2aa4 100644 --- a/filebeat/input/tcp/input.go +++ b/filebeat/input/tcp/input.go @@ -238,31 +238,50 @@ func (m *inputMetrics) poll(addr, addr6 []string, each time.Duration, log *logp. // base level for the rx_queue values and ensures that if the // constructed address values are malformed we panic early // within the period of system testing. + want4 := true rx, err := procNetTCP("/proc/net/tcp", addr, hasUnspecified, addrIsUnspecified) if err != nil { - log.Warnf("failed to get initial tcp stats from /proc: %v", err) + want4 = false + log.Infof("did not get initial tcp stats from /proc: %v", err) } + want6 := true rx6, err := procNetTCP("/proc/net/tcp6", addr6, hasUnspecified6, addrIsUnspecified6) if err != nil { - log.Warnf("failed to get initial tcp6 stats from /proc: %v", err) + want6 = false + log.Infof("did not get initial tcp6 stats from /proc: %v", err) + } + if !want4 && !want6 { + log.Warnf("failed to get initial tcp or tcp6 stats from /proc: %v", err) + } else { + m.rxQueue.Set(uint64(rx + rx6)) } - m.rxQueue.Set(uint64(rx + rx6)) t := time.NewTicker(each) for { select { case <-t.C: + var found bool rx, err := procNetTCP("/proc/net/tcp", addr, hasUnspecified, addrIsUnspecified) if err != nil { - log.Warnf("failed to get tcp stats from /proc: %v", err) - continue + if want4 { + log.Warnf("failed to get tcp stats from /proc: %v", err) + } + } else { + found = true + want4 = true } rx6, err := procNetTCP("/proc/net/tcp6", addr6, hasUnspecified6, addrIsUnspecified6) if err != nil { - log.Warnf("failed to get tcp6 stats from /proc: %v", err) - continue + if want6 { + log.Warnf("failed to get tcp6 stats from /proc: %v", err) + } + } else { + found = true + want6 = true + } + if found { + m.rxQueue.Set(uint64(rx + rx6)) } - m.rxQueue.Set(uint64(rx + rx6)) case <-m.done: t.Stop() return @@ -323,10 +342,10 @@ func procNetTCP(path string, addr []string, hasUnspecified bool, addrIsUnspecifi } found = true - // queue lengths are decimal, e.g.: + // queue lengths are hex, e.g.: // - https://elixir.bootlin.com/linux/v6.2.11/source/net/ipv4/tcp_ipv4.c#L2643 // - https://elixir.bootlin.com/linux/v6.2.11/source/net/ipv6/tcp_ipv6.c#L1987 - v, err := strconv.ParseInt(string(r), 10, 64) + v, err := strconv.ParseInt(string(r), 16, 64) if err != nil { return 0, fmt.Errorf("failed to parse rx_queue: %w", err) } diff --git a/filebeat/input/udp/input.go b/filebeat/input/udp/input.go index 831fb41c2ee6..cd7ca0c56051 100644 --- a/filebeat/input/udp/input.go +++ b/filebeat/input/udp/input.go @@ -231,33 +231,52 @@ func (m *inputMetrics) poll(addr, addr6 []string, each time.Duration, log *logp. // base level for the rx_queue and drops values and ensures that // if the constructed address values are malformed we panic early // within the period of system testing. + want4 := true rx, drops, err := procNetUDP("/proc/net/udp", addr, hasUnspecified, addrIsUnspecified) if err != nil { - log.Warnf("failed to get initial udp stats from /proc: %v", err) + want4 = false + log.Infof("did not get initial udp stats from /proc: %v", err) } + want6 := true rx6, drops6, err := procNetUDP("/proc/net/udp6", addr6, hasUnspecified6, addrIsUnspecified6) if err != nil { - log.Warnf("failed to get initial udp6 stats from /proc: %v", err) + want6 = false + log.Infof("did not get initial udp6 stats from /proc: %v", err) + } + if !want4 && !want6 { + log.Warnf("failed to get initial udp or udp6 stats from /proc: %v", err) + } else { + m.rxQueue.Set(uint64(rx + rx6)) + m.drops.Set(uint64(drops + drops6)) } - m.rxQueue.Set(uint64(rx + rx6)) - m.drops.Set(uint64(drops + drops6)) t := time.NewTicker(each) for { select { case <-t.C: + var found bool rx, drops, err := procNetUDP("/proc/net/udp", addr, hasUnspecified, addrIsUnspecified) if err != nil { - log.Warnf("failed to get udp stats from /proc: %v", err) - continue + if want4 { + log.Warnf("failed to get udp stats from /proc: %v", err) + } + } else { + found = true + want4 = true } rx6, drops6, err := procNetUDP("/proc/net/udp6", addr6, hasUnspecified6, addrIsUnspecified6) if err != nil { - log.Warnf("failed to get udp6 stats from /proc: %v", err) - continue + if want6 { + log.Warnf("failed to get udp6 stats from /proc: %v", err) + } + } else { + found = true + want6 = true + } + if found { + m.rxQueue.Set(uint64(rx + rx6)) + m.drops.Set(uint64(drops + drops6)) } - m.rxQueue.Set(uint64(rx + rx6)) - m.drops.Set(uint64(drops + drops6)) case <-m.done: t.Stop() return @@ -321,10 +340,10 @@ func procNetUDP(path string, addr []string, hasUnspecified bool, addrIsUnspecifi } found = true - // queue lengths and drops are decimal, e.g.: + // queue lengths and drops are hex, e.g.: // - https://elixir.bootlin.com/linux/v6.2.11/source/net/ipv4/udp.c#L3110 // - https://elixir.bootlin.com/linux/v6.2.11/source/net/ipv6/datagram.c#L1048 - v, err := strconv.ParseInt(string(r), 10, 64) + v, err := strconv.ParseInt(string(r), 16, 64) if err != nil { return 0, 0, fmt.Errorf("failed to parse rx_queue: %w", err) } From 77abcf36934ae6f08f3d6c335657bbc8218dd642 Mon Sep 17 00:00:00 2001 From: Richa Talwar <102972658+ritalwar@users.noreply.github.com> Date: Thu, 25 Jan 2024 14:44:10 +0530 Subject: [PATCH 075/129] metricbeat/module/mongodb/replstatus: Update `getOpTimestamp` in `replstatus` to fix sort and temp files generation issue (#37688) * Update getOpTimestamp implementation --- CHANGELOG.next.asciidoc | 1 + metricbeat/module/mongodb/replstatus/info.go | 47 ++++++++++---------- 2 files changed, 25 insertions(+), 23 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 3fe46b65e0c2..308f607a8eed 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -194,6 +194,7 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d - Add a `/inputs/` route to the HTTP monitoring endpoint that exposes metrics for each metricset instance. {pull}36971[36971] - Add linux IO metrics to system/process {pull}37213[37213] - Add new memory/cgroup metrics to Kibana module {pull}37232[37232] +- Update `getOpTimestamp` in `replstatus` to fix sort and temp files generation issue in mongodb. {pull}37688[37688] *Osquerybeat* diff --git a/metricbeat/module/mongodb/replstatus/info.go b/metricbeat/module/mongodb/replstatus/info.go index 037aeda09502..a444fa03b1ff 100644 --- a/metricbeat/module/mongodb/replstatus/info.go +++ b/metricbeat/module/mongodb/replstatus/info.go @@ -21,11 +21,10 @@ import ( "context" "errors" "fmt" + "time" "go.mongodb.org/mongo-driver/bson" - "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/mongo" - "go.mongodb.org/mongo-driver/mongo/options" ) type oplogInfo struct { @@ -71,14 +70,9 @@ func getReplicationInfo(client *mongo.Client) (*oplogInfo, error) { } // get first and last items in the oplog - firstTs, err := getOpTimestamp(collection, "$natural") + firstTs, lastTs, err := getOpTimestamp(collection) if err != nil { - return nil, fmt.Errorf("could not get first operation timestamp in op log: %w", err) - } - - lastTs, err := getOpTimestamp(collection, "-$natural") - if err != nil { - return nil, fmt.Errorf("could not get last operation timestamp in op log: %w", err) + return nil, fmt.Errorf("could not get operation timestamp in op log: %w", err) } diff := lastTs - firstTs @@ -92,28 +86,35 @@ func getReplicationInfo(client *mongo.Client) (*oplogInfo, error) { }, nil } -func getOpTimestamp(collection *mongo.Collection, sort string) (uint32, error) { - opt := options.Find().SetSort(bson.D{{Key: sort, Value: 1}}) - cursor, err := collection.Find(context.Background(), bson.D{}, opt) - if err != nil { - return 0, fmt.Errorf("could not get cursor on collection '%s': %w", collection.Name(), err) +func getOpTimestamp(collection *mongo.Collection) (uint32, uint32, error) { + + // Find both first and last timestamps using $min and $max + pipeline := bson.A{ + bson.M{"$group": bson.M{"_id": 1, "minTS": bson.M{"$min": "$ts"}, "maxTS": bson.M{"$max": "$ts"}}}, } - if !cursor.Next(context.Background()) { - return 0, errors.New("objects not found in local.oplog.rs") + cursor, err := collection.Aggregate(context.Background(), pipeline) + if err != nil { + return 0, 0, fmt.Errorf("could not get operation timestamps in op log: %w", err) } + defer cursor.Close(context.Background()) - var opTime map[string]interface{} - if err = cursor.Decode(&opTime); err != nil { - return 0, fmt.Errorf("error decoding response: %w", err) + var result struct { + MinTS time.Time `bson:"minTS"` + MaxTS time.Time `bson:"maxTS"` } - ts, ok := opTime["ts"].(primitive.Timestamp) - if !ok { - return 0, errors.New("an expected timestamp was not found") + if !cursor.Next(context.Background()) { + return 0, 0, errors.New("no documents found in op log") } + if err := cursor.Decode(&result); err != nil { + return 0, 0, fmt.Errorf("error decoding response for timestamps: %w", err) + } + + minTS := uint32(result.MinTS.Unix()) + maxTS := uint32(result.MaxTS.Unix()) - return ts.T, nil + return minTS, maxTS, nil } func contains(s []string, x string) bool { From aa72a3fa0d039d3a1fda709355db2e48a4f3975f Mon Sep 17 00:00:00 2001 From: Marc Guasch Date: Thu, 25 Jan 2024 13:29:40 +0100 Subject: [PATCH 076/129] [m365_defender] Fix log data stream cursor and query (#37116) * Fix m365_defender cursor value and query building. * Add PR number * Remove formatDate function * Fix changelog --------- Co-authored-by: Bharat Pasupula <123897612+bhapas@users.noreply.github.com> --- CHANGELOG.next.asciidoc | 1 + .../module/microsoft/m365_defender/config/defender.yml | 7 +++---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 308f607a8eed..8203c6d8f0bc 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -74,6 +74,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Fix handling of Juniper SRX structured data when there is no leading junos element. {issue}36270[36270] {pull}36308[36308] - Fix Filebeat Cisco module with missing escape character {issue}36325[36325] {pull}36326[36326] - Added a fix for Crowdstrike pipeline handling process arrays {pull}36496[36496] +- Fix m365_defender cursor value and query building. {pull}37116[37116] - Fix TCP/UDP metric queue length parsing base. {pull}37714[37714] *Heartbeat* diff --git a/x-pack/filebeat/module/microsoft/m365_defender/config/defender.yml b/x-pack/filebeat/module/microsoft/m365_defender/config/defender.yml index 6716568ba141..3d8747586153 100644 --- a/x-pack/filebeat/module/microsoft/m365_defender/config/defender.yml +++ b/x-pack/filebeat/module/microsoft/m365_defender/config/defender.yml @@ -19,9 +19,8 @@ request.transforms: value: "MdatpPartner-Elastic-Filebeat/1.0.0" - set: target: "url.params.$filter" - value: 'lastUpdateTime gt [[formatDate .cursor.lastUpdateTime "2006-01-02T15:04:05.9999999Z"]]' + value: 'lastUpdateTime gt [[.cursor.lastUpdateTime]]' default: 'lastUpdateTime gt [[formatDate (now (parseDuration "-55m")) "2006-01-02T15:04:05.9999999Z"]]' - response.split: target: body.value ignore_empty_value: true @@ -31,10 +30,10 @@ response.split: split: target: body.alerts.entities keep_parent: true - cursor: lastUpdateTime: - value: "[[.last_response.body.lastUpdateTime]]" + value: "[[.last_event.lastUpdateTime]]" + ignore_empty_value: true {{ else if eq .input "file" }} From 56e198bd4be0b0d782944c8ee865d55dcadeaed0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 Jan 2024 13:02:11 +0000 Subject: [PATCH 077/129] build(deps): bump github.com/elastic/go-libaudit/v2 from 2.4.0 to 2.5.0 (#37742) --- NOTICE.txt | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index ac9818f77d37..6770b3e0bffb 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -13648,11 +13648,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/go-elasticsearc -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-libaudit/v2 -Version: v2.4.0 +Version: v2.5.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-libaudit/v2@v2.4.0/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-libaudit/v2@v2.5.0/LICENSE.txt: Apache License diff --git a/go.mod b/go.mod index d003eeca9219..462f575b24fe 100644 --- a/go.mod +++ b/go.mod @@ -71,7 +71,7 @@ require ( github.com/eclipse/paho.mqtt.golang v1.3.5 github.com/elastic/elastic-agent-client/v7 v7.6.0 github.com/elastic/go-concert v0.2.0 - github.com/elastic/go-libaudit/v2 v2.4.0 + github.com/elastic/go-libaudit/v2 v2.5.0 github.com/elastic/go-licenser v0.4.1 github.com/elastic/go-lookslike v1.0.1 github.com/elastic/go-lumber v0.1.2-0.20220819171948-335fde24ea0f diff --git a/go.sum b/go.sum index 043ad9afca0a..fca63002617e 100644 --- a/go.sum +++ b/go.sum @@ -677,8 +677,8 @@ github.com/elastic/go-concert v0.2.0 h1:GAQrhRVXprnNjtvTP9pWJ1d4ToEA4cU5ci7TwTa2 github.com/elastic/go-concert v0.2.0/go.mod h1:HWjpO3IAEJUxOeaJOWXWEp7imKd27foxz9V5vegC/38= github.com/elastic/go-elasticsearch/v8 v8.11.1 h1:1VgTgUTbpqQZ4uE+cPjkOvy/8aw1ZvKcU0ZUE5Cn1mc= github.com/elastic/go-elasticsearch/v8 v8.11.1/go.mod h1:GU1BJHO7WeamP7UhuElYwzzHtvf9SDmeVpSSy9+o6Qg= -github.com/elastic/go-libaudit/v2 v2.4.0 h1:PqaGnB+dncrdUXqzQMyJu/dGysAtk6m5V3GIBMY473I= -github.com/elastic/go-libaudit/v2 v2.4.0/go.mod h1:AjlnhinP+kKQuUJoXLVrqxBM8uyhQmkzoV6jjsCFP4Q= +github.com/elastic/go-libaudit/v2 v2.5.0 h1:5OK919QRnGtcjVBz3n/cs5F42im1mPlVTA9TyIn2K54= +github.com/elastic/go-libaudit/v2 v2.5.0/go.mod h1:AjlnhinP+kKQuUJoXLVrqxBM8uyhQmkzoV6jjsCFP4Q= github.com/elastic/go-licenser v0.4.1 h1:1xDURsc8pL5zYT9R29425J3vkHdt4RT5TNEMeRN48x4= github.com/elastic/go-licenser v0.4.1/go.mod h1:V56wHMpmdURfibNBggaSBfqgPxyT1Tldns1i87iTEvU= github.com/elastic/go-lookslike v1.0.1 h1:qVieyn6i/kx4xntar1cEB0qrGHVGNCX5KC8czAaTW/0= From 685be2f7fd2b28436d2202e853b12f002da71660 Mon Sep 17 00:00:00 2001 From: Panos Koutsovasilis Date: Thu, 25 Jan 2024 16:47:18 +0200 Subject: [PATCH 078/129] move (re-license) tracing package and introduce 'allowundefined' in kprobe struct tag (#37602) * fix: replace deprecated io/ioutil with os * fix: rename local vars so they don't collide with built-in functions * feat: introduce support for allowundefined tag * fix: remove unnecessary named return variable * feat: expose the option to set the wakeup_events for the perf channel * feat: move tracing from x-pack/auditbeat to auditbeat * legal: re-license tracing from Elastic License to Apache License Version 2.0 * fix: remove deprecated ioutil in events_test.go * fix: replace naked return(s) * fix: pre-allocate slices wherever the len is known * fix: use errors.Is to check for a specific error * fix: remove unused withTime struct field from PerfChannel * fix: properly use make(chan struct{}) * fix: use raw string with regexp.MustCompile * fix: replace missed naked return(s) * fix: replace pre-allocating len of the slices with cap * feat: modernise tracing endian.go to use binary.NativeEndian * feat: refactor copyInt and readInt to use unsafe.Slice * fix: revert pollAll in perfevent.go to named returns as these can be properly documented * fix: remove redundant endian.go and utilise directly binary.NativeEndian * fix: return explicitly the named returns in pollAll * Revert "fix: remove redundant endian.go and utilise directly binary.NativeEndian" This reverts commit 19d9c28c6d394c74bac05904c9e7ab70f548ce08. --- .../auditbeat => auditbeat}/tracing/cpu.go | 40 +++++++--- .../tracing/cpu_test.go | 19 ++++- .../tracing/decoder.go | 39 +++++++--- auditbeat/tracing/doc.go | 22 ++++++ auditbeat/tracing/endian.go | 28 +++++++ .../tracing/events_test.go | 26 +++++-- auditbeat/tracing/int_aligned.go | 71 +++++++++++++++++ .../tracing/int_unaligned.go | 25 ++++-- .../tracing/perfevent.go | 76 +++++++++++++------ .../auditbeat => auditbeat}/tracing/probe.go | 19 ++++- .../tracing/tracefs.go | 32 +++++--- .../auditbeat/module/system/socket/events.go | 2 +- .../module/system/socket/guess/creds.go | 2 +- .../module/system/socket/guess/cskxmit6.go | 2 +- .../module/system/socket/guess/deref.go | 2 +- .../module/system/socket/guess/guess.go | 2 +- .../module/system/socket/guess/inetsock.go | 2 +- .../module/system/socket/guess/inetsock6.go | 2 +- .../module/system/socket/guess/inetsockaf.go | 2 +- .../module/system/socket/guess/iplocalout.go | 2 +- .../module/system/socket/guess/skbuff.go | 2 +- .../module/system/socket/guess/sockaddrin.go | 2 +- .../module/system/socket/guess/sockaddrin6.go | 2 +- .../module/system/socket/guess/socketsk.go | 2 +- .../module/system/socket/guess/syscallargs.go | 2 +- .../system/socket/guess/tcpsendmsgargs.go | 2 +- .../system/socket/guess/tcpsendmsgsk.go | 2 +- .../module/system/socket/guess/udpsendmsg.go | 2 +- .../module/system/socket/helper/probes.go | 2 +- .../module/system/socket/helper/types.go | 2 +- .../auditbeat/module/system/socket/kprobes.go | 2 +- .../module/system/socket/kprobes_test.go | 2 +- .../module/system/socket/socket_linux.go | 2 +- .../auditbeat/module/system/socket/state.go | 2 +- .../module/system/socket/state_test.go | 2 +- .../module/system/socket/template.go | 2 +- x-pack/auditbeat/tracing/doc.go | 9 --- x-pack/auditbeat/tracing/endian.go | 29 ------- x-pack/auditbeat/tracing/int_aligned.go | 57 -------------- 39 files changed, 350 insertions(+), 192 deletions(-) rename {x-pack/auditbeat => auditbeat}/tracing/cpu.go (73%) rename {x-pack/auditbeat => auditbeat}/tracing/cpu_test.go (76%) rename {x-pack/auditbeat => auditbeat}/tracing/decoder.go (90%) create mode 100644 auditbeat/tracing/doc.go create mode 100644 auditbeat/tracing/endian.go rename {x-pack/auditbeat => auditbeat}/tracing/events_test.go (90%) create mode 100644 auditbeat/tracing/int_aligned.go rename {x-pack/auditbeat => auditbeat}/tracing/int_unaligned.go (52%) rename {x-pack/auditbeat => auditbeat}/tracing/perfevent.go (88%) rename {x-pack/auditbeat => auditbeat}/tracing/probe.go (80%) rename {x-pack/auditbeat => auditbeat}/tracing/tracefs.go (89%) delete mode 100644 x-pack/auditbeat/tracing/doc.go delete mode 100644 x-pack/auditbeat/tracing/endian.go delete mode 100644 x-pack/auditbeat/tracing/int_aligned.go diff --git a/x-pack/auditbeat/tracing/cpu.go b/auditbeat/tracing/cpu.go similarity index 73% rename from x-pack/auditbeat/tracing/cpu.go rename to auditbeat/tracing/cpu.go index e0fd15e09ceb..280cc395bf10 100644 --- a/x-pack/auditbeat/tracing/cpu.go +++ b/auditbeat/tracing/cpu.go @@ -1,6 +1,19 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. //go:build linux @@ -9,7 +22,7 @@ package tracing import ( "bytes" "fmt" - "io/ioutil" + "os" "strconv" "strings" ) @@ -72,7 +85,7 @@ func (s CPUSet) AsList() []int { // NewCPUSetFromFile creates a new CPUSet from the contents of a file. func NewCPUSetFromFile(path string) (cpus CPUSet, err error) { - contents, err := ioutil.ReadFile(path) + contents, err := os.ReadFile(path) if err != nil { return cpus, err } @@ -84,9 +97,12 @@ func NewCPUSetFromFile(path string) (cpus CPUSet, err error) { // Where: // RANGE := | - func NewCPUSetFromExpression(contents string) (CPUSet, error) { - var ranges [][]int - var max, count int - for _, expr := range strings.Split(contents, ",") { + expressions := strings.Split(contents, ",") + + ranges := make([][]int, 0, len(expressions)) + + var maximum, count int + for _, expr := range expressions { if len(expr) == 0 { continue } @@ -99,16 +115,16 @@ func NewCPUSetFromExpression(contents string) (CPUSet, error) { } num := int(num16) r = append(r, num) - if num+1 > max { - max = num + 1 + if num+1 > maximum { + maximum = num + 1 } } ranges = append(ranges, r) } - if max == 0 { + if maximum == 0 { return CPUSet{}, nil } - mask := make([]bool, max) + mask := make([]bool, maximum) for _, r := range ranges { from, to := -1, -1 switch len(r) { diff --git a/x-pack/auditbeat/tracing/cpu_test.go b/auditbeat/tracing/cpu_test.go similarity index 76% rename from x-pack/auditbeat/tracing/cpu_test.go rename to auditbeat/tracing/cpu_test.go index 3f6921895daf..bfce3a72de0d 100644 --- a/x-pack/auditbeat/tracing/cpu_test.go +++ b/auditbeat/tracing/cpu_test.go @@ -1,6 +1,19 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. //go:build linux diff --git a/x-pack/auditbeat/tracing/decoder.go b/auditbeat/tracing/decoder.go similarity index 90% rename from x-pack/auditbeat/tracing/decoder.go rename to auditbeat/tracing/decoder.go index 8755b25f5dd9..d669e8c8e982 100644 --- a/x-pack/auditbeat/tracing/decoder.go +++ b/auditbeat/tracing/decoder.go @@ -1,6 +1,19 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. //go:build linux @@ -183,9 +196,13 @@ func NewStructDecoder(desc ProbeFormat, allocFn AllocateFn) (Decoder, error) { } var name string + var allowUndefined bool var greedy bool for idx, param := range strings.Split(values, ",") { switch param { + case "allowundefined": + // it is okay not to find it in the desc.Fields + allowUndefined = true case "greedy": greedy = true default: @@ -214,6 +231,9 @@ func NewStructDecoder(desc ProbeFormat, allocFn AllocateFn) (Decoder, error) { inField, found := desc.Fields[name] if !found { + if allowUndefined { + continue + } return nil, fmt.Errorf("field '%s' not found in kprobe format description", name) } @@ -326,14 +346,14 @@ func (d *structDecoder) Decode(raw []byte, meta Metadata) (s interface{}, err er case FieldTypeString: offset := uintptr(MachineEndian.Uint16(raw[dec.src:])) - len := uintptr(MachineEndian.Uint16(raw[dec.src+2:])) - if offset+len > n { + length := uintptr(MachineEndian.Uint16(raw[dec.src+2:])) + if offset+length > n { return nil, fmt.Errorf("perf event string data for field %s overflows message of size %d", dec.name, n) } - if len > 0 && raw[offset+len-1] == 0 { - len-- + if length > 0 && raw[offset+length-1] == 0 { + length-- } - *(*string)(unsafe.Add(destPtr, dec.dst)) = string(raw[offset : offset+len]) + *(*string)(unsafe.Add(destPtr, dec.dst)) = string(raw[offset : offset+length]) case FieldTypeMeta: *(*Metadata)(unsafe.Add(destPtr, dec.dst)) = meta @@ -357,7 +377,8 @@ type dumpDecoder struct { // - integer of 64bit (u64 / s64). // - dump consecutive memory. func NewDumpDecoder(format ProbeFormat) (Decoder, error) { - var fields []Field + fields := make([]Field, 0, len(format.Fields)) + for name, field := range format.Fields { if strings.Index(name, "arg") != 0 { continue diff --git a/auditbeat/tracing/doc.go b/auditbeat/tracing/doc.go new file mode 100644 index 000000000000..5f4e8b92331e --- /dev/null +++ b/auditbeat/tracing/doc.go @@ -0,0 +1,22 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package tracing provides a set of tools built on top of +// golang.org/x/sys/unix/linux/perf that simplify working with KProbes and +// UProbes, using tracing perf channels to receive events from the kernel and +// decoding of this raw events into more useful types. +package tracing diff --git a/auditbeat/tracing/endian.go b/auditbeat/tracing/endian.go new file mode 100644 index 000000000000..d7fa00c6fa20 --- /dev/null +++ b/auditbeat/tracing/endian.go @@ -0,0 +1,28 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux + +package tracing + +import ( + "encoding/binary" +) + +// MachineEndian is either binary.BigEndian or binary.LittleEndian, depending +// on the current architecture. +var MachineEndian = binary.NativeEndian diff --git a/x-pack/auditbeat/tracing/events_test.go b/auditbeat/tracing/events_test.go similarity index 90% rename from x-pack/auditbeat/tracing/events_test.go rename to auditbeat/tracing/events_test.go index d89f4946ca19..0b5efaec53a5 100644 --- a/x-pack/auditbeat/tracing/events_test.go +++ b/auditbeat/tracing/events_test.go @@ -1,6 +1,19 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. //go:build linux @@ -9,7 +22,6 @@ package tracing import ( "fmt" "io" - "io/ioutil" "os" "path/filepath" "strings" @@ -301,7 +313,7 @@ func TestKProbeReal(t *testing.T) { func TestKProbeEventsList(t *testing.T) { // Make dir to monitor. - tmpDir, err := ioutil.TempDir("", "events_test") + tmpDir, err := os.MkdirTemp("", "events_test") if err != nil { t.Fatal(err) } @@ -358,7 +370,7 @@ w:future feature func TestKProbeEventsAddRemoveKProbe(t *testing.T) { // Make dir to monitor. - tmpDir, err := ioutil.TempDir("", "events_test") + tmpDir, err := os.MkdirTemp("", "events_test") if err != nil { t.Fatal(err) } @@ -397,7 +409,7 @@ w:future feature off, err := file.Seek(int64(0), io.SeekStart) assert.NoError(t, err) assert.Equal(t, int64(0), off) - contents, err := ioutil.ReadAll(file) + contents, err := io.ReadAll(file) assert.NoError(t, err) expected := append([]byte(baseContents), []byte( `p:kprobe/myprobe sys_open path=+0(%di):string mode=%si diff --git a/auditbeat/tracing/int_aligned.go b/auditbeat/tracing/int_aligned.go new file mode 100644 index 000000000000..cbcadf96f324 --- /dev/null +++ b/auditbeat/tracing/int_aligned.go @@ -0,0 +1,71 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux && !386 && !amd64 && !amd64p32 + +// Alignment-safe integer reading and writing functions. + +package tracing + +import ( + "errors" + "unsafe" +) + +var errBadSize = errors.New("bad size for integer") + +func copyInt(dst unsafe.Pointer, src unsafe.Pointer, len uint8) error { + copy(unsafe.Slice((*byte)(dst), len), unsafe.Slice((*byte)(src), len)) + return nil +} + +func readInt(ptr unsafe.Pointer, len uint8, signed bool) (any, error) { + var value any + asSlice := unsafe.Slice((*byte)(ptr), len) + switch len { + case 1: + if signed { + value = int8(asSlice[0]) + } else { + value = asSlice[0] + } + case 2: + if signed { + value = int16(MachineEndian.Uint16(asSlice)) + } else { + value = MachineEndian.Uint16(asSlice) + } + + case 4: + if signed { + value = int32(MachineEndian.Uint32(asSlice)) + } else { + value = MachineEndian.Uint32(asSlice) + } + + case 8: + if signed { + value = int64(MachineEndian.Uint64(asSlice)) + } else { + value = MachineEndian.Uint64(asSlice) + } + + default: + return nil, errBadSize + } + return value, nil +} diff --git a/x-pack/auditbeat/tracing/int_unaligned.go b/auditbeat/tracing/int_unaligned.go similarity index 52% rename from x-pack/auditbeat/tracing/int_unaligned.go rename to auditbeat/tracing/int_unaligned.go index 38a767dd6421..d4c1a3f6b167 100644 --- a/x-pack/auditbeat/tracing/int_unaligned.go +++ b/auditbeat/tracing/int_unaligned.go @@ -1,6 +1,19 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. //go:build linux && (386 || amd64 || amd64p32) @@ -35,7 +48,9 @@ func copyInt(dst unsafe.Pointer, src unsafe.Pointer, len uint8) error { return nil } -func readInt(ptr unsafe.Pointer, len uint8, signed bool) (value interface{}, err error) { +func readInt(ptr unsafe.Pointer, len uint8, signed bool) (any, error) { + var value any + switch len { case 1: if signed { @@ -67,5 +82,5 @@ func readInt(ptr unsafe.Pointer, len uint8, signed bool) (value interface{}, err default: return nil, errBadSize } - return + return value, nil } diff --git a/x-pack/auditbeat/tracing/perfevent.go b/auditbeat/tracing/perfevent.go similarity index 88% rename from x-pack/auditbeat/tracing/perfevent.go rename to auditbeat/tracing/perfevent.go index 4b97772b18fc..36f595aa6761 100644 --- a/x-pack/auditbeat/tracing/perfevent.go +++ b/auditbeat/tracing/perfevent.go @@ -1,6 +1,19 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. //go:build linux @@ -57,14 +70,14 @@ type PerfChannel struct { cpus CPUSet // Settings - attr perf.Attr - mappedPages int - pid int - pollTimeout time.Duration - sizeSampleC int - sizeErrC int - sizeLostC int - withTime bool + attr perf.Attr + mappedPages int + pid int + pollTimeout time.Duration + sizeSampleC int + sizeErrC int + sizeLostC int + wakeUpEvents uint32 } // PerfChannelConf instances change the configuration of a perf channel. @@ -89,14 +102,15 @@ func NewPerfChannel(cfg ...PerfChannelConf) (channel *PerfChannel, err error) { // Defaults channel = &PerfChannel{ - sizeSampleC: 1024, - sizeErrC: 8, - sizeLostC: 64, - mappedPages: 64, - pollTimeout: time.Millisecond * 200, - done: make(chan struct{}, 0), - streams: make(map[uint64]stream), - pid: perf.AllThreads, + sizeSampleC: 1024, + sizeErrC: 8, + sizeLostC: 64, + mappedPages: 64, + wakeUpEvents: 1, + pollTimeout: time.Millisecond * 200, + done: make(chan struct{}), + streams: make(map[uint64]stream), + pid: perf.AllThreads, attr: perf.Attr{ Type: perf.TracepointEvent, ClockID: unix.CLOCK_MONOTONIC, @@ -108,8 +122,6 @@ func NewPerfChannel(cfg ...PerfChannelConf) (channel *PerfChannel, err error) { }, }, } - channel.attr.SetSamplePeriod(1) - channel.attr.SetWakeupEvents(1) // Load the list of online CPUs from /sys/devices/system/cpu/online. // This is necessary in order to to install each kprobe on all online CPUs. @@ -130,6 +142,10 @@ func NewPerfChannel(cfg ...PerfChannelConf) (channel *PerfChannel, err error) { return nil, err } } + + channel.attr.SetSamplePeriod(1) + channel.attr.SetWakeupEvents(channel.wakeUpEvents) + return channel, nil } @@ -157,6 +173,18 @@ func WithErrBufferSize(size int) PerfChannelConf { } } +// WithWakeUpEvents configures sets how many samples happen before an overflow +// notification happens. Setting wakeUpEvents to 0 is equivalent to 1. +func WithWakeUpEvents(wakeUpEvents uint32) PerfChannelConf { + return func(channel *PerfChannel) error { + if wakeUpEvents == 0 { + wakeUpEvents = 1 + } + channel.wakeUpEvents = wakeUpEvents + return nil + } +} + // WithLostBufferSize configures the capacity of the channel used to pass lost // event notifications (PerfChannel.LostC()). func WithLostBufferSize(size int) PerfChannelConf { @@ -462,7 +490,7 @@ func (m *recordMerger) readSampleNonBlock(ev *perf.Event, ctx context.Context) ( return nil, false } if err != nil { - if err == perf.ErrBadRecord { + if errors.Is(err, perf.ErrBadRecord) { m.channel.lostC <- ^uint64(0) continue } @@ -503,7 +531,7 @@ func pollAll(evs []*perf.Event, timeout time.Duration) (active int, closed int, } ts := unix.NsecToTimespec(timeout.Nanoseconds()) - for err = unix.EINTR; err == unix.EINTR; { + for err = unix.EINTR; errors.Is(err, unix.EINTR); { _, err = unix.Ppoll(pollfds, &ts, nil) } if err != nil { @@ -518,5 +546,5 @@ func pollAll(evs []*perf.Event, timeout time.Duration) (active int, closed int, closed++ } } - return + return active, closed, err } diff --git a/x-pack/auditbeat/tracing/probe.go b/auditbeat/tracing/probe.go similarity index 80% rename from x-pack/auditbeat/tracing/probe.go rename to auditbeat/tracing/probe.go index 61bf353ef5f5..5bfd5977c075 100644 --- a/x-pack/auditbeat/tracing/probe.go +++ b/auditbeat/tracing/probe.go @@ -1,6 +1,19 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. //go:build linux diff --git a/x-pack/auditbeat/tracing/tracefs.go b/auditbeat/tracing/tracefs.go similarity index 89% rename from x-pack/auditbeat/tracing/tracefs.go rename to auditbeat/tracing/tracefs.go index b26eb17312c3..532eb75ca459 100644 --- a/x-pack/auditbeat/tracing/tracefs.go +++ b/auditbeat/tracing/tracefs.go @@ -1,6 +1,19 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. //go:build linux @@ -26,9 +39,9 @@ const ( var ( // p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe // r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe - kprobeRegexp *regexp.Regexp = regexp.MustCompile("^([pr])[0-9]*:(?:([^/ ]*)/)?([^/ ]+) ([^ ]+) ?(.*)") + kprobeRegexp *regexp.Regexp = regexp.MustCompile(`^([pr])[0-9]*:(?:([^/ ]*)/)?([^/ ]+) ([^ ]+) ?(.*)`) - formatRegexp *regexp.Regexp = regexp.MustCompile("\\s+([^:]+):([^;]*);") + formatRegexp *regexp.Regexp = regexp.MustCompile(`\s+([^:]+):([^;]*);`) ) // TraceFS is an accessor to manage event tracing via tracefs or debugfs. @@ -72,13 +85,14 @@ func IsTraceFSAvailableAt(path string) error { // IsTraceFSAvailable returns nil if a tracefs or debugfs supporting KProbes // is available at the well-known paths. Otherwise returns an error. -func IsTraceFSAvailable() (err error) { +func IsTraceFSAvailable() error { + var err error for _, path := range []string{traceFSPath, debugFSTracingPath} { if err = IsTraceFSAvailableAt(path); err == nil { - break + return nil } } - return + return err } // ListKProbes lists the currently installed kprobes / kretprobes @@ -122,7 +136,7 @@ func (dfs *TraceFS) listProbes(filename string) (probes []Probe, err error) { } // AddKProbe installs a new kprobe/kretprobe. -func (dfs *TraceFS) AddKProbe(probe Probe) (err error) { +func (dfs *TraceFS) AddKProbe(probe Probe) error { return dfs.appendToFile(kprobeCfgFile, probe.String()) } diff --git a/x-pack/auditbeat/module/system/socket/events.go b/x-pack/auditbeat/module/system/socket/events.go index ad652b9aac57..beb0a988a7c9 100644 --- a/x-pack/auditbeat/module/system/socket/events.go +++ b/x-pack/auditbeat/module/system/socket/events.go @@ -18,7 +18,7 @@ import ( "golang.org/x/sys/unix" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" + "github.com/elastic/beats/v7/auditbeat/tracing" ) const ( diff --git a/x-pack/auditbeat/module/system/socket/guess/creds.go b/x-pack/auditbeat/module/system/socket/guess/creds.go index 7df1b0c1c2f5..8c808dcdbe53 100644 --- a/x-pack/auditbeat/module/system/socket/guess/creds.go +++ b/x-pack/auditbeat/module/system/socket/guess/creds.go @@ -14,8 +14,8 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/cskxmit6.go b/x-pack/auditbeat/module/system/socket/guess/cskxmit6.go index d77dc7a2bbe0..258d9f21a4f9 100644 --- a/x-pack/auditbeat/module/system/socket/guess/cskxmit6.go +++ b/x-pack/auditbeat/module/system/socket/guess/cskxmit6.go @@ -13,8 +13,8 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/deref.go b/x-pack/auditbeat/module/system/socket/guess/deref.go index 7996a8cd8b36..e2c3c0082c54 100644 --- a/x-pack/auditbeat/module/system/socket/guess/deref.go +++ b/x-pack/auditbeat/module/system/socket/guess/deref.go @@ -13,8 +13,8 @@ import ( "strconv" "syscall" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/guess.go b/x-pack/auditbeat/module/system/socket/guess/guess.go index 05c2aa4668a1..718afa0ad7b5 100644 --- a/x-pack/auditbeat/module/system/socket/guess/guess.go +++ b/x-pack/auditbeat/module/system/socket/guess/guess.go @@ -12,8 +12,8 @@ import ( "fmt" "time" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/inetsock.go b/x-pack/auditbeat/module/system/socket/guess/inetsock.go index 707db38b7e74..f9d1db85639f 100644 --- a/x-pack/auditbeat/module/system/socket/guess/inetsock.go +++ b/x-pack/auditbeat/module/system/socket/guess/inetsock.go @@ -14,8 +14,8 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/inetsock6.go b/x-pack/auditbeat/module/system/socket/guess/inetsock6.go index 4a937a554854..438c09d65c3f 100644 --- a/x-pack/auditbeat/module/system/socket/guess/inetsock6.go +++ b/x-pack/auditbeat/module/system/socket/guess/inetsock6.go @@ -14,8 +14,8 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/inetsockaf.go b/x-pack/auditbeat/module/system/socket/guess/inetsockaf.go index 60fbfed71055..69676b41a2d9 100644 --- a/x-pack/auditbeat/module/system/socket/guess/inetsockaf.go +++ b/x-pack/auditbeat/module/system/socket/guess/inetsockaf.go @@ -12,8 +12,8 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/iplocalout.go b/x-pack/auditbeat/module/system/socket/guess/iplocalout.go index 6a997af23ae2..26a95405e8ec 100644 --- a/x-pack/auditbeat/module/system/socket/guess/iplocalout.go +++ b/x-pack/auditbeat/module/system/socket/guess/iplocalout.go @@ -13,8 +13,8 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/skbuff.go b/x-pack/auditbeat/module/system/socket/guess/skbuff.go index 85589f8a4fe3..ba53089aed38 100644 --- a/x-pack/auditbeat/module/system/socket/guess/skbuff.go +++ b/x-pack/auditbeat/module/system/socket/guess/skbuff.go @@ -17,8 +17,8 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/sockaddrin.go b/x-pack/auditbeat/module/system/socket/guess/sockaddrin.go index 2a76d564ba5b..bfaebf544af4 100644 --- a/x-pack/auditbeat/module/system/socket/guess/sockaddrin.go +++ b/x-pack/auditbeat/module/system/socket/guess/sockaddrin.go @@ -14,8 +14,8 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/sockaddrin6.go b/x-pack/auditbeat/module/system/socket/guess/sockaddrin6.go index 5564015530b7..f9f7c1874215 100644 --- a/x-pack/auditbeat/module/system/socket/guess/sockaddrin6.go +++ b/x-pack/auditbeat/module/system/socket/guess/sockaddrin6.go @@ -13,8 +13,8 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/socketsk.go b/x-pack/auditbeat/module/system/socket/guess/socketsk.go index 5ebc0ab7de65..3c12cd294630 100644 --- a/x-pack/auditbeat/module/system/socket/guess/socketsk.go +++ b/x-pack/auditbeat/module/system/socket/guess/socketsk.go @@ -12,8 +12,8 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/syscallargs.go b/x-pack/auditbeat/module/system/socket/guess/syscallargs.go index 3930e7134b9c..902940985b93 100644 --- a/x-pack/auditbeat/module/system/socket/guess/syscallargs.go +++ b/x-pack/auditbeat/module/system/socket/guess/syscallargs.go @@ -12,8 +12,8 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/tcpsendmsgargs.go b/x-pack/auditbeat/module/system/socket/guess/tcpsendmsgargs.go index faa3910ba5f6..058736eec56f 100644 --- a/x-pack/auditbeat/module/system/socket/guess/tcpsendmsgargs.go +++ b/x-pack/auditbeat/module/system/socket/guess/tcpsendmsgargs.go @@ -10,8 +10,8 @@ package guess import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/tcpsendmsgsk.go b/x-pack/auditbeat/module/system/socket/guess/tcpsendmsgsk.go index 450a336df6e1..73f810e74146 100644 --- a/x-pack/auditbeat/module/system/socket/guess/tcpsendmsgsk.go +++ b/x-pack/auditbeat/module/system/socket/guess/tcpsendmsgsk.go @@ -12,8 +12,8 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/guess/udpsendmsg.go b/x-pack/auditbeat/module/system/socket/guess/udpsendmsg.go index 5ab70f92a480..09241e6641ea 100644 --- a/x-pack/auditbeat/module/system/socket/guess/udpsendmsg.go +++ b/x-pack/auditbeat/module/system/socket/guess/udpsendmsg.go @@ -10,8 +10,8 @@ package guess import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/helper/probes.go b/x-pack/auditbeat/module/system/socket/helper/probes.go index 24ad0eda3d9a..3ebb3e2cfcbc 100644 --- a/x-pack/auditbeat/module/system/socket/helper/probes.go +++ b/x-pack/auditbeat/module/system/socket/helper/probes.go @@ -12,7 +12,7 @@ import ( "strings" "text/template" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/helper/types.go b/x-pack/auditbeat/module/system/socket/helper/types.go index 1365aeaf9e0f..d466e847e751 100644 --- a/x-pack/auditbeat/module/system/socket/helper/types.go +++ b/x-pack/auditbeat/module/system/socket/helper/types.go @@ -7,7 +7,7 @@ package helper import ( - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" + "github.com/elastic/beats/v7/auditbeat/tracing" ) // Logger exposes logging functions. diff --git a/x-pack/auditbeat/module/system/socket/kprobes.go b/x-pack/auditbeat/module/system/socket/kprobes.go index 3660f6a5a1dc..a87813459910 100644 --- a/x-pack/auditbeat/module/system/socket/kprobes.go +++ b/x-pack/auditbeat/module/system/socket/kprobes.go @@ -14,8 +14,8 @@ import ( "github.com/joeshaw/multierror" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/module/system/socket/kprobes_test.go b/x-pack/auditbeat/module/system/socket/kprobes_test.go index fdaeac8f8bc6..8ddca79e957a 100644 --- a/x-pack/auditbeat/module/system/socket/kprobes_test.go +++ b/x-pack/auditbeat/module/system/socket/kprobes_test.go @@ -11,9 +11,9 @@ import ( "strings" "testing" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/guess" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" ) func probeName(p tracing.Probe) string { diff --git a/x-pack/auditbeat/module/system/socket/socket_linux.go b/x-pack/auditbeat/module/system/socket/socket_linux.go index c7b7a9794538..b334b8488921 100644 --- a/x-pack/auditbeat/module/system/socket/socket_linux.go +++ b/x-pack/auditbeat/module/system/socket/socket_linux.go @@ -23,13 +23,13 @@ import ( "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/guess" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" "github.com/elastic/go-perf" diff --git a/x-pack/auditbeat/module/system/socket/state.go b/x-pack/auditbeat/module/system/socket/state.go index a302bba0caa5..19bb729a8442 100644 --- a/x-pack/auditbeat/module/system/socket/state.go +++ b/x-pack/auditbeat/module/system/socket/state.go @@ -20,12 +20,12 @@ import ( "github.com/joeshaw/multierror" "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/flowhash" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/dns" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" "github.com/elastic/go-libaudit/v2/aucoalesce" ) diff --git a/x-pack/auditbeat/module/system/socket/state_test.go b/x-pack/auditbeat/module/system/socket/state_test.go index 611581c5d30c..fd3e125cc408 100644 --- a/x-pack/auditbeat/module/system/socket/state_test.go +++ b/x-pack/auditbeat/module/system/socket/state_test.go @@ -18,10 +18,10 @@ import ( "github.com/stretchr/testify/assert" "golang.org/x/sys/unix" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/dns" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" ) type logWrapper testing.T diff --git a/x-pack/auditbeat/module/system/socket/template.go b/x-pack/auditbeat/module/system/socket/template.go index 84f890e5be19..c1a97a163b9d 100644 --- a/x-pack/auditbeat/module/system/socket/template.go +++ b/x-pack/auditbeat/module/system/socket/template.go @@ -10,8 +10,8 @@ import ( "strings" "unsafe" + "github.com/elastic/beats/v7/auditbeat/tracing" "github.com/elastic/beats/v7/libbeat/common" - "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/auditbeat/tracing/doc.go b/x-pack/auditbeat/tracing/doc.go deleted file mode 100644 index 0d716eaf7c97..000000000000 --- a/x-pack/auditbeat/tracing/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -// Package tracing provides a set of tools built on top of -// golang.org/x/sys/unix/linux/perf that simplify working with KProbes and -// UProbes, using tracing perf channels to receive events from the kernel and -// decoding of this raw events into more useful types. -package tracing diff --git a/x-pack/auditbeat/tracing/endian.go b/x-pack/auditbeat/tracing/endian.go deleted file mode 100644 index acb18aa9afa2..000000000000 --- a/x-pack/auditbeat/tracing/endian.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//go:build linux - -package tracing - -import ( - "encoding/binary" - "unsafe" -) - -// MachineEndian is either binary.BigEndian or binary.LittleEndian, depending -// on the current architecture. -var MachineEndian = getCPUEndianness() - -func getCPUEndianness() binary.ByteOrder { - myInt32 := new(uint32) - copy((*[4]byte)(unsafe.Pointer(myInt32))[:], []byte{0x12, 0x34, 0x56, 0x78}) - switch *myInt32 { - case 0x12345678: - return binary.BigEndian - case 0x78563412: - return binary.LittleEndian - default: - panic("cannot determine endianness") - } -} diff --git a/x-pack/auditbeat/tracing/int_aligned.go b/x-pack/auditbeat/tracing/int_aligned.go deleted file mode 100644 index 6c8c4c539725..000000000000 --- a/x-pack/auditbeat/tracing/int_aligned.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//go:build linux && !386 && !amd64 && !amd64p32 - -// Alignment-safe integer reading and writing functions. - -package tracing - -import ( - "errors" - "unsafe" -) - -var errBadSize = errors.New("bad size for integer") - -func copyInt(dst unsafe.Pointer, src unsafe.Pointer, len uint8) error { - copy((*(*[maxIntSizeBytes]byte)(dst))[:len], (*(*[maxIntSizeBytes]byte)(src))[:len]) - return nil -} - -func readInt(ptr unsafe.Pointer, len uint8, signed bool) (value interface{}, err error) { - asSlice := (*(*[maxIntSizeBytes]byte)(ptr))[:] - switch len { - case 1: - if signed { - value = int8(asSlice[0]) - } else { - value = uint8(asSlice[0]) - } - case 2: - if signed { - value = int16(MachineEndian.Uint16(asSlice)) - } else { - value = MachineEndian.Uint16(asSlice) - } - - case 4: - if signed { - value = int32(MachineEndian.Uint32(asSlice)) - } else { - value = MachineEndian.Uint32(asSlice) - } - - case 8: - if signed { - value = int64(MachineEndian.Uint64(asSlice)) - } else { - value = MachineEndian.Uint64(asSlice) - } - - default: - return nil, errBadSize - } - return -} From 6c8ca37ee1334226bd7c1768bc87009cfb361237 Mon Sep 17 00:00:00 2001 From: kaiyan-sheng Date: Thu, 25 Jan 2024 08:28:33 -0700 Subject: [PATCH 079/129] add terraform variables (#37713) --- .../_meta/terraform/variables.tf | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/x-pack/filebeat/input/awscloudwatch/_meta/terraform/variables.tf b/x-pack/filebeat/input/awscloudwatch/_meta/terraform/variables.tf index 2c4fb00786bc..78b0a4741477 100644 --- a/x-pack/filebeat/input/awscloudwatch/_meta/terraform/variables.tf +++ b/x-pack/filebeat/input/awscloudwatch/_meta/terraform/variables.tf @@ -3,3 +3,26 @@ variable "aws_region" { type = string default = "us-east-1" } + +variable "BRANCH" { + description = "Branch name or pull request for tagging purposes" + default = "unknown-branch" +} + +variable "BUILD_ID" { + description = "Build ID in the CI for tagging purposes" + default = "unknown-build" +} + +variable "CREATED_DATE" { + description = "Creation date in epoch time for tagging purposes" + default = "unknown-date" +} + +variable "ENVIRONMENT" { + default = "unknown-environment" +} + +variable "REPO" { + default = "unknown-repo-name" +} From d6bed8d82d1a1a594679311ee83bbc9746cbbcf8 Mon Sep 17 00:00:00 2001 From: Andrew Kroh Date: Thu, 25 Jan 2024 10:58:38 -0500 Subject: [PATCH 080/129] packetbeat - bump npcap to 1.79 (#37733) --- CHANGELOG.next.asciidoc | 1 + x-pack/packetbeat/magefile.go | 2 +- x-pack/packetbeat/npcap/installer/LICENSE | 2 +- x-pack/packetbeat/tests/system/app_test.go | 2 +- 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 8203c6d8f0bc..300599547614 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -202,6 +202,7 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d *Packetbeat* +- Bump Windows Npcap version to v1.79. {pull}37733[37733] *Packetbeat* diff --git a/x-pack/packetbeat/magefile.go b/x-pack/packetbeat/magefile.go index c5df7ef2deb2..acef21538d62 100644 --- a/x-pack/packetbeat/magefile.go +++ b/x-pack/packetbeat/magefile.go @@ -36,7 +36,7 @@ import ( // the packetbeat executable. It is used to specify which npcap builder crossbuild // image to use and the installer to obtain from the cloud store for testing. const ( - NpcapVersion = "1.78" + NpcapVersion = "1.79" installer = "npcap-" + NpcapVersion + "-oem.exe" ) diff --git a/x-pack/packetbeat/npcap/installer/LICENSE b/x-pack/packetbeat/npcap/installer/LICENSE index 1073eb3ff69f..d7823e2e47ad 100644 --- a/x-pack/packetbeat/npcap/installer/LICENSE +++ b/x-pack/packetbeat/npcap/installer/LICENSE @@ -1,6 +1,6 @@ -------------------------------------------------------------------------------- Dependency : Npcap (https://nmap.org/npcap/) -Version: 1.78 +Version: 1.79 Licence type: Commercial -------------------------------------------------------------------------------- diff --git a/x-pack/packetbeat/tests/system/app_test.go b/x-pack/packetbeat/tests/system/app_test.go index fa1a359be70a..0f3668820837 100644 --- a/x-pack/packetbeat/tests/system/app_test.go +++ b/x-pack/packetbeat/tests/system/app_test.go @@ -29,7 +29,7 @@ import ( ) // Keep in sync with NpcapVersion in magefile.go. -const NpcapVersion = "1.78" +const NpcapVersion = "1.79" func TestWindowsNpcapInstaller(t *testing.T) { if runtime.GOOS != "windows" { From db4c44a1ef7b9e1aada8acca02d3da19c3bcc4d2 Mon Sep 17 00:00:00 2001 From: Florian Lehner Date: Thu, 25 Jan 2024 18:13:57 +0100 Subject: [PATCH 081/129] filebeat: minor cleanup and performance improvement (#37644) * filebeat/generator/fields: replace for loop with single instruction Improve performance by replacing a loop with a single instruction. Signed-off-by: Florian Lehner * filebeat/fileset/flags: remove duplicate import Simplify code by replacing a duplicate import. Signed-off-by: Florian Lehner * filebeat/generator/fields: precompile regex Precompile the fixed regex once instead for each iteration. Signed-off-by: Florian Lehner --------- Signed-off-by: Florian Lehner Co-authored-by: Denis --- filebeat/fileset/flags.go | 7 +++---- filebeat/generator/fields/fields.go | 13 ++++--------- 2 files changed, 7 insertions(+), 13 deletions(-) diff --git a/filebeat/fileset/flags.go b/filebeat/fileset/flags.go index a8ef562d757d..674c6fe7fd9f 100644 --- a/filebeat/fileset/flags.go +++ b/filebeat/fileset/flags.go @@ -23,7 +23,6 @@ import ( "strings" "github.com/elastic/elastic-agent-libs/config" - conf "github.com/elastic/elastic-agent-libs/config" ) // Modules related command line flags. @@ -32,11 +31,11 @@ var ( moduleOverrides = config.SettingFlag(nil, "M", "Module configuration overwrite") ) -type ModuleOverrides map[string]map[string]*conf.C // module -> fileset -> Config +type ModuleOverrides map[string]map[string]*config.C // module -> fileset -> Config // Get returns an array of configuration overrides that should be merged in order. -func (mo *ModuleOverrides) Get(module, fileset string) []*conf.C { - ret := []*conf.C{} +func (mo *ModuleOverrides) Get(module, fileset string) []*config.C { + ret := []*config.C{} moduleWildcard := (*mo)["*"]["*"] if moduleWildcard != nil { diff --git a/filebeat/generator/fields/fields.go b/filebeat/generator/fields/fields.go index 4727990b2332..ba3216c04f36 100644 --- a/filebeat/generator/fields/fields.go +++ b/filebeat/generator/fields/fields.go @@ -179,15 +179,12 @@ func addNewField(fs []field, f field) []field { return append(fs, f) } -func getSemanticElementsFromPatterns(patterns []string) ([]field, error) { - r, err := regexp.Compile("{[\\.\\w\\:]*}") - if err != nil { - return nil, err - } +var semanticElementsRegex = regexp.MustCompile(`{[\.\w\:]*}`) +func getSemanticElementsFromPatterns(patterns []string) ([]field, error) { var fs []field for _, lp := range patterns { - pp := r.FindAllString(lp, -1) + pp := semanticElementsRegex.FindAllString(lp, -1) for _, p := range pp { f := newField(p) if f.SemanticElements == nil { @@ -221,9 +218,7 @@ func accumulateRemoveFields(remove interface{}, out []string) []string { case string: return append(out, vs) case []string: - for _, vv := range vs { - out = append(out, vv) - } + out = append(out, vs...) case []interface{}: for _, vv := range vs { vvs := vv.(string) From e9d8572381ba1fbe50281fe87fac85772d0eb51a Mon Sep 17 00:00:00 2001 From: Norrie Taylor <91171431+norrietaylor@users.noreply.github.com> Date: Thu, 25 Jan 2024 11:16:01 -0800 Subject: [PATCH 082/129] Update codeowners as per 2024 Security Integrations Team reorganization (#37696) Co-authored-by: Pierre HILBERT --- .github/CODEOWNERS | 160 ++++++++++++++++++++--------------------- .github/dependabot.yml | 2 +- 2 files changed, 81 insertions(+), 81 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 140ccf9d73f7..a7237f0b031f 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -20,7 +20,7 @@ CHANGELOG* /.ci/ @elastic/elastic-agent-data-plane /.github/ @elastic/elastic-agent-data-plane -/auditbeat/ @elastic/security-external-integrations +/auditbeat/ @elastic/sec-linux-platform /deploy/ @elastic/elastic-agent-data-plane /deploy/kubernetes @elastic/elastic-agent-data-plane @elastic/obs-cloudnative-monitoring /dev-tools/ @elastic/elastic-agent-data-plane @@ -28,10 +28,10 @@ CHANGELOG* /docs/ @elastic/elastic-agent-data-plane /filebeat @elastic/elastic-agent-data-plane /filebeat/docs/ # Listed without an owner to avoid maintaining doc ownership for each input and module. -/filebeat/input/syslog/ @elastic/security-external-integrations -/filebeat/input/winlog/ @elastic/security-external-integrations +/filebeat/input/syslog/ @elastic/sec-deployment-and-devices +/filebeat/input/winlog/ @elastic/sec-windows-platform /filebeat/module/apache @elastic/obs-infraobs-integrations -/filebeat/module/auditd @elastic/security-external-integrations +/filebeat/module/auditd @elastic/sec-linux-platform /filebeat/module/elasticsearch/ @elastic/infra-monitoring-ui /filebeat/module/haproxy @elastic/obs-infraobs-integrations /filebeat/module/icinga # TODO: find right team @@ -40,14 +40,14 @@ CHANGELOG* /filebeat/module/kibana/ @elastic/infra-monitoring-ui /filebeat/module/logstash/ @elastic/infra-monitoring-ui /filebeat/module/mongodb @elastic/obs-infraobs-integrations -/filebeat/module/mysql @elastic/security-external-integrations +/filebeat/module/mysql @elastic/obs-infraobs-integrations /filebeat/module/nats @elastic/obs-infraobs-integrations /filebeat/module/nginx @elastic/obs-infraobs-integrations -/filebeat/module/osquery @elastic/security-external-integrations -/filebeat/module/pensando @elastic/security-external-integrations +/filebeat/module/osquery @elastic/sec-deployment-and-devices +/filebeat/module/pensando @elastic/sec-deployment-and-devices /filebeat/module/postgresql @elastic/obs-infraobs-integrations /filebeat/module/redis @elastic/obs-infraobs-integrations -/filebeat/module/santa @elastic/security-external-integrations +/filebeat/module/santa @elastic/security-service-integrations /filebeat/module/system @elastic/elastic-agent-data-plane /filebeat/module/traefik # TODO: find right team /heartbeat/ @elastic/obs-ds-hosted-services @@ -55,13 +55,13 @@ CHANGELOG* /libbeat/ @elastic/elastic-agent-data-plane /libbeat/docs/processors-list.asciidoc @elastic/ingest-docs /libbeat/management @elastic/elastic-agent-control-plane -/libbeat/processors/cache/ @elastic/security-external-integrations -/libbeat/processors/community_id/ @elastic/security-external-integrations -/libbeat/processors/decode_xml/ @elastic/security-external-integrations -/libbeat/processors/decode_xml_wineventlog/ @elastic/security-external-integrations -/libbeat/processors/dns/ @elastic/security-external-integrations -/libbeat/processors/registered_domain/ @elastic/security-external-integrations -/libbeat/processors/translate_sid/ @elastic/security-external-integrations +/libbeat/processors/cache/ @elastic/security-service-integrations +/libbeat/processors/community_id/ @elastic/sec-deployment-and-devices +/libbeat/processors/decode_xml/ @elastic/security-service-integrations +/libbeat/processors/decode_xml_wineventlog/ @elastic/sec-windows-platform +/libbeat/processors/dns/ @elastic/sec-deployment-and-devices +/libbeat/processors/registered_domain/ @elastic/sec-deployment-and-devices +/libbeat/processors/translate_sid/ @elastic/sec-windows-platform /libbeat/processors/add_cloud_metadata @elastic/obs-cloud-monitoring /libbeat/processors/add_kubernetes_metadata @elastic/obs-cloudnative-monitoring /licenses/ @elastic/elastic-agent-data-plane @@ -96,82 +96,82 @@ CHANGELOG* /metricbeat/module/system/ @elastic/elastic-agent-data-plane /metricbeat/module/vsphere @elastic/obs-infraobs-integrations /metricbeat/module/zookeeper @elastic/obs-infraobs-integrations -/packetbeat/ @elastic/security-external-integrations +/packetbeat/ @elastic/sec-linux-platform /script/ @elastic/elastic-agent-data-plane /testing/ @elastic/elastic-agent-data-plane /tools/ @elastic/elastic-agent-data-plane -/winlogbeat/ @elastic/security-external-integrations -/x-pack/auditbeat/ @elastic/security-external-integrations +/winlogbeat/ @elastic/sec-windows-platform +/x-pack/auditbeat/ @elastic/sec-linux-platform /x-pack/elastic-agent/ @elastic/elastic-agent-control-plane /x-pack/filebeat @elastic/elastic-agent-data-plane /x-pack/filebeat/docs/ # Listed without an owner to avoid maintaining doc ownership for each input and module. /x-pack/filebeat/input/awscloudwatch/ @elastic/obs-cloud-monitoring /x-pack/filebeat/input/awss3/ @elastic/obs-cloud-monitoring -/x-pack/filebeat/input/azureblobstorage/ @elastic/security-external-integrations +/x-pack/filebeat/input/azureblobstorage/ @elastic/security-service-integrations /x-pack/filebeat/input/azureeventhub/ @elastic/obs-cloud-monitoring -/x-pack/filebeat/input/cel/ @elastic/security-external-integrations +/x-pack/filebeat/input/cel/ @elastic/security-service-integrations /x-pack/filebeat/input/cometd/ @elastic/obs-infraobs-integrations -/x-pack/filebeat/input/entityanalytics/ @elastic/security-external-integrations -/x-pack/filebeat/input/gcppubsub/ @elastic/security-external-integrations -/x-pack/filebeat/input/gcs/ @elastic/security-external-integrations -/x-pack/filebeat/input/http_endpoint/ @elastic/security-external-integrations -/x-pack/filebeat/input/httpjson/ @elastic/security-external-integrations -/x-pack/filebeat/input/internal/httplog @elastic/security-external-integrations -/x-pack/filebeat/input/internal/httpmon @elastic/security-external-integrations -/x-pack/filebeat/input/lumberjack/ @elastic/security-external-integrations -/x-pack/filebeat/input/netflow/ @elastic/security-external-integrations -/x-pack/filebeat/input/o365audit/ @elastic/security-external-integrations +/x-pack/filebeat/input/entityanalytics/ @elastic/security-service-integrations +/x-pack/filebeat/input/gcppubsub/ @elastic/security-service-integrations +/x-pack/filebeat/input/gcs/ @elastic/security-service-integrations +/x-pack/filebeat/input/http_endpoint/ @elastic/security-service-integrations +/x-pack/filebeat/input/httpjson/ @elastic/security-service-integrations +/x-pack/filebeat/input/internal/httplog @elastic/security-service-integrations +/x-pack/filebeat/input/internal/httpmon @elastic/security-service-integrations +/x-pack/filebeat/input/lumberjack/ @elastic/security-service-integrations +/x-pack/filebeat/input/netflow/ @elastic/sec-deployment-and-devices +/x-pack/filebeat/input/o365audit/ @elastic/security-service-integrations /x-pack/filebeat/module/activemq @elastic/obs-infraobs-integrations /x-pack/filebeat/module/aws @elastic/obs-cloud-monitoring /x-pack/filebeat/module/awsfargate @elastic/obs-cloud-monitoring /x-pack/filebeat/module/azure @elastic/obs-cloud-monitoring -/x-pack/filebeat/module/barracuda @elastic/security-external-integrations -/x-pack/filebeat/module/bluecoat @elastic/security-external-integrations -/x-pack/filebeat/module/cef @elastic/security-external-integrations -/x-pack/filebeat/module/checkpoint @elastic/security-external-integrations -/x-pack/filebeat/module/cisco @elastic/security-external-integrations -/x-pack/filebeat/module/coredns @elastic/security-external-integrations -/x-pack/filebeat/module/crowdstrike @elastic/security-external-integrations -/x-pack/filebeat/module/cyberarkpas @elastic/security-external-integrations -/x-pack/filebeat/module/cylance @elastic/security-external-integrations -/x-pack/filebeat/module/envoyproxy @elastic/security-external-integrations -/x-pack/filebeat/module/f5 @elastic/security-external-integrations -/x-pack/filebeat/module/fortinet @elastic/security-external-integrations -/x-pack/filebeat/module/gcp @elastic/security-external-integrations -/x-pack/filebeat/module/google_workspace @elastic/security-external-integrations +/x-pack/filebeat/module/barracuda @elastic/security-service-integrations +/x-pack/filebeat/module/bluecoat @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/cef @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/checkpoint @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/cisco @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/coredns @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/crowdstrike @elastic/security-service-integrations +/x-pack/filebeat/module/cyberarkpas @elastic/security-service-integrations +/x-pack/filebeat/module/cylance @elastic/security-service-integrations +/x-pack/filebeat/module/envoyproxy @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/f5 @elastic/security-service-integrations +/x-pack/filebeat/module/fortinet @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/gcp @elastic/security-service-integrations +/x-pack/filebeat/module/google_workspace @elastic/security-service-integrations /x-pack/filebeat/module/ibmmq @elastic/obs-infraobs-integrations -/x-pack/filebeat/module/imperva @elastic/security-external-integrations -/x-pack/filebeat/module/infoblox @elastic/security-external-integrations -/x-pack/filebeat/module/iptables @elastic/security-external-integrations -/x-pack/filebeat/module/juniper @elastic/security-external-integrations -/x-pack/filebeat/module/microsoft @elastic/security-external-integrations -/x-pack/filebeat/module/misp @elastic/security-external-integrations +/x-pack/filebeat/module/imperva @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/infoblox @elastic/security-service-integrations +/x-pack/filebeat/module/iptables @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/juniper @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/microsoft @elastic/sec-windows-platform +/x-pack/filebeat/module/misp @elastic/security-service-integrations /x-pack/filebeat/module/mssql @elastic/obs-infraobs-integrations -/x-pack/filebeat/module/mysqlenterprise @elastic/security-external-integrations -/x-pack/filebeat/module/netflow @elastic/security-external-integrations -/x-pack/filebeat/module/netscout @elastic/security-external-integrations -/x-pack/filebeat/module/o365 @elastic/security-external-integrations -/x-pack/filebeat/module/okta @elastic/security-external-integrations -/x-pack/filebeat/module/oracle @elastic/security-external-integrations -/x-pack/filebeat/module/panw @elastic/security-external-integrations -/x-pack/filebeat/module/proofpoint @elastic/security-external-integrations +/x-pack/filebeat/module/mysqlenterprise @elastic/sec-windows-platform +/x-pack/filebeat/module/netflow @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/netscout @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/o365 @elastic/security-service-integrations +/x-pack/filebeat/module/okta @elastic/security-service-integrations +/x-pack/filebeat/module/oracle @elastic/obs-infraobs-integrations +/x-pack/filebeat/module/panw @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/proofpoint @elastic/security-service-integrations /x-pack/filebeat/module/rabbitmq @elastic/obs-infraobs-integrations -/x-pack/filebeat/module/radware @elastic/security-external-integrations +/x-pack/filebeat/module/radware @elastic/sec-deployment-and-devices /x-pack/filebeat/module/salesforce @elastic/obs-infraobs-integrations -/x-pack/filebeat/module/snort @elastic/security-external-integrations -/x-pack/filebeat/module/snyk @elastic/security-external-integrations -/x-pack/filebeat/module/sonicwall @elastic/security-external-integrations -/x-pack/filebeat/module/sophos @elastic/security-external-integrations -/x-pack/filebeat/module/squid @elastic/security-external-integrations -/x-pack/filebeat/module/suricata @elastic/security-external-integrations -/x-pack/filebeat/module/threatintel @elastic/security-external-integrations -/x-pack/filebeat/module/tomcat @elastic/security-external-integrations -/x-pack/filebeat/module/zeek @elastic/security-external-integrations +/x-pack/filebeat/module/snort @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/snyk @elastic/security-service-integrations +/x-pack/filebeat/module/sonicwall @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/sophos @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/squid @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/suricata @elastic/sec-deployment-and-devices +/x-pack/filebeat/module/threatintel @elastic/security-service-integrations +/x-pack/filebeat/module/tomcat @elastic/obs-infraobs-integrations +/x-pack/filebeat/module/zeek @elastic/sec-deployment-and-devices /x-pack/filebeat/module/zookeeper @elastic/obs-infraobs-integrations -/x-pack/filebeat/module/zoom @elastic/security-external-integrations -/x-pack/filebeat/module/zscaler @elastic/security-external-integrations -/x-pack/filebeat/modules.d/zoom.yml.disabled @elastic/security-external-integrations -/x-pack/filebeat/processors/decode_cef/ @elastic/security-external-integrations +/x-pack/filebeat/module/zoom @elastic/security-service-integrations +/x-pack/filebeat/module/zscaler @elastic/security-service-integrations +/x-pack/filebeat/modules.d/zoom.yml.disabled @elastic/security-service-integrations +/x-pack/filebeat/processors/decode_cef/ @elastic/sec-deployment-and-devices /x-pack/heartbeat/ @elastic/obs-ds-hosted-services /x-pack/metricbeat/ @elastic/elastic-agent-data-plane /x-pack/metricbeat/docs/ # Listed without an owner to avoid maintaining doc ownership for each input and module. @@ -186,7 +186,7 @@ CHANGELOG* /x-pack/metricbeat/module/containerd/ @elastic/obs-cloudnative-monitoring /x-pack/metricbeat/module/coredns @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/enterprisesearch @elastic/ent-search-application-backend -/x-pack/metricbeat/module/gcp @elastic/obs-ds-hosted-services @elastic/obs-infraobs-integrations @elastic/security-external-integrations +/x-pack/metricbeat/module/gcp @elastic/obs-ds-hosted-services @elastic/obs-infraobs-integrations @elastic/security-service-integrations /x-pack/metricbeat/module/gcp/billing @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/gcp/cloudrun_metrics @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/gcp/cloudsql_mysql @elastic/obs-infraobs-integrations @@ -195,16 +195,16 @@ CHANGELOG* /x-pack/metricbeat/module/gcp/carbon @elastic/obs-ds-hosted-services /x-pack/metricbeat/module/gcp/compute @elastic/obs-ds-hosted-services /x-pack/metricbeat/module/gcp/dataproc @elastic/obs-infraobs-integrations -/x-pack/metricbeat/module/gcp/dns @elastic/security-external-integrations +/x-pack/metricbeat/module/gcp/dns @elastic/security-service-integrations /x-pack/metricbeat/module/gcp/firestore @elastic/obs-infraobs-integrations -/x-pack/metricbeat/module/gcp/firewall @elastic/security-external-integrations +/x-pack/metricbeat/module/gcp/firewall @elastic/security-service-integrations /x-pack/metricbeat/module/gcp/gke @elastic/obs-ds-hosted-services /x-pack/metricbeat/module/gcp/loadbalancing_logs @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/gcp/loadbalancing_metrics @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/gcp/pubsub @elastic/obs-ds-hosted-services /x-pack/metricbeat/module/gcp/redis @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/gcp/storage @elastic/obs-ds-hosted-services -/x-pack/metricbeat/module/gcp/vpcflow @elastic/security-external-integrations +/x-pack/metricbeat/module/gcp/vpcflow @elastic/security-service-integrations /x-pack/metricbeat/module/ibmmq @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/iis @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/istio/ @elastic/obs-cloudnative-monitoring @@ -216,7 +216,7 @@ CHANGELOG* /x-pack/metricbeat/module/statsd @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/stan/ @elastic/obs-cloudnative-monitoring /x-pack/metricbeat/module/tomcat @elastic/obs-infraobs-integrations -/x-pack/osquerybeat/ @elastic/security-external-integrations -/x-pack/packetbeat/ @elastic/security-external-integrations -/x-pack/winlogbeat/ @elastic/security-external-integrations -/x-pack/libbeat/reader/parquet/ @elastic/security-external-integrations +/x-pack/osquerybeat/ @elastic/sec-deployment-and-devices +/x-pack/packetbeat/ @elastic/sec-linux-platform +/x-pack/winlogbeat/ @elastic/sec-windows-platform +/x-pack/libbeat/reader/parquet/ @elastic/security-service-integrations diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 994a24bfb490..304f3add387e 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -29,7 +29,7 @@ updates: - dependency-name: go.elastic.co/go-licence-detector # Team:Service-Integrations - dependency-name: github.com/elastic/bayeux - # Team:Security-External Integrations + # Team:Security-Linux Platform - dependency-name: github.com/elastic/go-libaudit/* - dependency-name: github.com/elastic/go-perf - dependency-name: github.com/elastic/go-seccomp-bpf From 50bfbbf039f0d50e8f359d56be90ff7ffc1ca3f4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 Jan 2024 22:07:45 +0000 Subject: [PATCH 083/129] build(deps): bump github.com/elastic/elastic-agent-libs from 0.7.4 to 0.7.5 (#37755) * build(deps): bump github.com/elastic/elastic-agent-libs Bumps [github.com/elastic/elastic-agent-libs](https://github.com/elastic/elastic-agent-libs) from 0.7.4 to 0.7.5. - [Release notes](https://github.com/elastic/elastic-agent-libs/releases) - [Commits](https://github.com/elastic/elastic-agent-libs/compare/v0.7.4...v0.7.5) --- updated-dependencies: - dependency-name: github.com/elastic/elastic-agent-libs dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update NOTICE.txt * Add changelog. --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Craig MacKenzie --- CHANGELOG.next.asciidoc | 1 + NOTICE.txt | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 300599547614..6b7c6f0a4d23 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -55,6 +55,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Fix panic when MaxRetryInterval is specified, but RetryInterval is not {pull}35820[35820] - Support build of projects outside of beats directory {pull}36126[36126] - Support Elastic Agent control protocol chunking support {pull}37343[37343] +- Upgrade elastic-agent-libs to v0.7.5. Removes obsolete "Treating the CommonName field on X.509 certificates as a host name..." deprecation warning for 8.0. {pull}37755[37755] *Auditbeat* diff --git a/NOTICE.txt b/NOTICE.txt index 6770b3e0bffb..8ae0f7044886 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -12701,11 +12701,11 @@ SOFTWARE -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-libs -Version: v0.7.4 +Version: v0.7.5 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.7.4/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.7.5/LICENSE: Apache License Version 2.0, January 2004 diff --git a/go.mod b/go.mod index 462f575b24fe..c5b87bbeaa95 100644 --- a/go.mod +++ b/go.mod @@ -201,7 +201,7 @@ require ( github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20220623125934-28468a6701b5 github.com/elastic/bayeux v1.0.5 github.com/elastic/elastic-agent-autodiscover v0.6.7 - github.com/elastic/elastic-agent-libs v0.7.4 + github.com/elastic/elastic-agent-libs v0.7.5 github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3 github.com/elastic/elastic-agent-system-metrics v0.9.1 github.com/elastic/go-elasticsearch/v8 v8.11.1 diff --git a/go.sum b/go.sum index fca63002617e..f496c268c916 100644 --- a/go.sum +++ b/go.sum @@ -662,8 +662,8 @@ github.com/elastic/elastic-agent-autodiscover v0.6.7 h1:+KVjltN0rPsBrU8b156gV4lO github.com/elastic/elastic-agent-autodiscover v0.6.7/go.mod h1:hFeFqneS2r4jD0/QzGkrNk0YVdN0JGh7lCWdsH7zcI4= github.com/elastic/elastic-agent-client/v7 v7.6.0 h1:FEn6FjzynW4TIQo5G096Tr7xYK/P5LY9cSS6wRbXZTc= github.com/elastic/elastic-agent-client/v7 v7.6.0/go.mod h1:GlUKrbVd/O1CRAZonpBeN3J0RlVqP6VGcrBjFWca+aM= -github.com/elastic/elastic-agent-libs v0.7.4 h1:/cmwOLwNAyJDNeR6sFIbHCDHDLPX2zAb/MAxQq7BRpo= -github.com/elastic/elastic-agent-libs v0.7.4/go.mod h1:pGMj5myawdqu+xE+WKvM5FQzKQ/MonikkWOzoFTJxaU= +github.com/elastic/elastic-agent-libs v0.7.5 h1:4UMqB3BREvhwecYTs/L23oQp1hs/XUkcunPlmTZn5yg= +github.com/elastic/elastic-agent-libs v0.7.5/go.mod h1:pGMj5myawdqu+xE+WKvM5FQzKQ/MonikkWOzoFTJxaU= github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3 h1:sb+25XJn/JcC9/VL8HX4r4QXSUq4uTNzGS2kxOE7u1U= github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3/go.mod h1:rWarFM7qYxJKsi9WcV6ONcFjH/NA3niDNpTxO+8/GVI= github.com/elastic/elastic-agent-system-metrics v0.9.1 h1:r0ofKHgPpl+W09ie7tzGcCDC0d4NZbQUv37rSgHf4FM= From d4d2ddce3230f21f01a19afdb836626f7b0af4b5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 Jan 2024 22:33:03 +0000 Subject: [PATCH 084/129] build(deps): bump github.com/elastic/go-elasticsearch/v8 from 8.11.1 to 8.12.0 (#37673) * build(deps): bump github.com/elastic/go-elasticsearch/v8 Bumps [github.com/elastic/go-elasticsearch/v8](https://github.com/elastic/go-elasticsearch) from 8.11.1 to 8.12.0. - [Release notes](https://github.com/elastic/go-elasticsearch/releases) - [Changelog](https://github.com/elastic/go-elasticsearch/blob/main/CHANGELOG.md) - [Commits](https://github.com/elastic/go-elasticsearch/compare/v8.11.1...v8.12.0) --- updated-dependencies: - dependency-name: github.com/elastic/go-elasticsearch/v8 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update NOTICE.txt * Update changelog. --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Craig MacKenzie --- CHANGELOG.next.asciidoc | 2 +- NOTICE.txt | 235 ++++++++++++++++++++++++++++++++++++++-- go.mod | 12 +- go.sum | 26 +++-- 4 files changed, 244 insertions(+), 31 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 6b7c6f0a4d23..b8c46fa18c59 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -181,7 +181,7 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d *Heartbeat* - Added status to monitor run log report. - +- Upgrade github.com/elastic/go-elasticsearch/v8 to v8.12.0. {pull}37673[37673] *Metricbeat* diff --git a/NOTICE.txt b/NOTICE.txt index 8ae0f7044886..c803ff33e8ea 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -13437,11 +13437,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/go-concert@v0.2 -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-elasticsearch/v8 -Version: v8.11.1 +Version: v8.12.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-elasticsearch/v8@v8.11.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-elasticsearch/v8@v8.12.0/LICENSE: Apache License Version 2.0, January 2004 @@ -37382,11 +37382,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-transport-go/v8 -Version: v8.3.0 +Version: v8.4.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-transport-go/v8@v8.3.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-transport-go/v8@v8.4.0/LICENSE: Apache License Version 2.0, January 2004 @@ -38235,11 +38235,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/go-logr/logr -Version: v1.2.4 +Version: v1.3.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/go-logr/logr@v1.2.4/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/go-logr/logr@v1.3.0/LICENSE: Apache License Version 2.0, January 2004 @@ -52426,11 +52426,11 @@ Contents of probable licence file $GOMODCACHE/go.opencensus.io@v0.24.0/LICENSE: -------------------------------------------------------------------------------- Dependency : go.opentelemetry.io/otel -Version: v1.19.0 +Version: v1.21.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel@v1.19.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel@v1.21.0/LICENSE: Apache License Version 2.0, January 2004 @@ -52637,11 +52637,222 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel@v1.19.0/L -------------------------------------------------------------------------------- Dependency : go.opentelemetry.io/otel/metric -Version: v1.19.0 +Version: v1.21.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/metric@v1.21.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : go.opentelemetry.io/otel/sdk +Version: v1.21.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/metric@v1.19.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/sdk@v1.21.0/LICENSE: Apache License Version 2.0, January 2004 @@ -52848,11 +53059,11 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/metric@v1 -------------------------------------------------------------------------------- Dependency : go.opentelemetry.io/otel/trace -Version: v1.19.0 +Version: v1.21.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/trace@v1.19.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/otel/trace@v1.21.0/LICENSE: Apache License Version 2.0, January 2004 diff --git a/go.mod b/go.mod index c5b87bbeaa95..a7044889fac4 100644 --- a/go.mod +++ b/go.mod @@ -204,7 +204,7 @@ require ( github.com/elastic/elastic-agent-libs v0.7.5 github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3 github.com/elastic/elastic-agent-system-metrics v0.9.1 - github.com/elastic/go-elasticsearch/v8 v8.11.1 + github.com/elastic/go-elasticsearch/v8 v8.12.0 github.com/elastic/mito v1.8.0 github.com/elastic/toutoumomoma v0.0.0-20221026030040-594ef30cb640 github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15 @@ -273,14 +273,14 @@ require ( github.com/docker/go-metrics v0.0.1 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect github.com/eapache/queue v1.1.0 // indirect - github.com/elastic/elastic-transport-go/v8 v8.3.0 // indirect + github.com/elastic/elastic-transport-go/v8 v8.4.0 // indirect github.com/elastic/go-windows v1.0.1 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/fearful-symmetry/gomsr v0.0.1 // indirect github.com/felixge/httpsnoop v1.0.1 // indirect github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-stack/stack v1.8.0 // indirect @@ -365,9 +365,9 @@ require ( github.com/zeebo/xxh3 v1.0.2 // indirect go.elastic.co/fastjson v1.1.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel v1.19.0 // indirect - go.opentelemetry.io/otel/metric v1.19.0 // indirect - go.opentelemetry.io/otel/trace v1.19.0 // indirect + go.opentelemetry.io/otel v1.21.0 // indirect + go.opentelemetry.io/otel/metric v1.21.0 // indirect + go.opentelemetry.io/otel/trace v1.21.0 // indirect golang.org/x/exp v0.0.0-20231127185646-65229373498e // indirect golang.org/x/term v0.15.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect diff --git a/go.sum b/go.sum index f496c268c916..79feea755704 100644 --- a/go.sum +++ b/go.sum @@ -668,15 +668,15 @@ github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b6 github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3/go.mod h1:rWarFM7qYxJKsi9WcV6ONcFjH/NA3niDNpTxO+8/GVI= github.com/elastic/elastic-agent-system-metrics v0.9.1 h1:r0ofKHgPpl+W09ie7tzGcCDC0d4NZbQUv37rSgHf4FM= github.com/elastic/elastic-agent-system-metrics v0.9.1/go.mod h1:9C1UEfj0P687HAzZepHszN6zXA+2tN2Lx3Osvq1zby8= -github.com/elastic/elastic-transport-go/v8 v8.3.0 h1:DJGxovyQLXGr62e9nDMPSxRyWION0Bh6d9eCFBriiHo= -github.com/elastic/elastic-transport-go/v8 v8.3.0/go.mod h1:87Tcz8IVNe6rVSLdBux1o/PEItLtyabHU3naC7IoqKI= +github.com/elastic/elastic-transport-go/v8 v8.4.0 h1:EKYiH8CHd33BmMna2Bos1rDNMM89+hdgcymI+KzJCGE= +github.com/elastic/elastic-transport-go/v8 v8.4.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270 h1:cWPqxlPtir4RoQVCpGSRXmLqjEHpJKbR60rxh1nQZY4= github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270/go.mod h1:Msl1pdboCbArMF/nSCDUXgQuWTeoMmE/z8607X+k7ng= github.com/elastic/glog v1.0.1-0.20210831205241-7d8b5c89dfc4/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/elastic/go-concert v0.2.0 h1:GAQrhRVXprnNjtvTP9pWJ1d4ToEA4cU5ci7TwTa20xg= github.com/elastic/go-concert v0.2.0/go.mod h1:HWjpO3IAEJUxOeaJOWXWEp7imKd27foxz9V5vegC/38= -github.com/elastic/go-elasticsearch/v8 v8.11.1 h1:1VgTgUTbpqQZ4uE+cPjkOvy/8aw1ZvKcU0ZUE5Cn1mc= -github.com/elastic/go-elasticsearch/v8 v8.11.1/go.mod h1:GU1BJHO7WeamP7UhuElYwzzHtvf9SDmeVpSSy9+o6Qg= +github.com/elastic/go-elasticsearch/v8 v8.12.0 h1:krkiCf4peJa7bZwGegy01b5xWWaYpik78wvisTeRO1U= +github.com/elastic/go-elasticsearch/v8 v8.12.0/go.mod h1:wSzJYrrKPZQ8qPuqAqc6KMR4HrBfHnZORvyL+FMFqq0= github.com/elastic/go-libaudit/v2 v2.5.0 h1:5OK919QRnGtcjVBz3n/cs5F42im1mPlVTA9TyIn2K54= github.com/elastic/go-libaudit/v2 v2.5.0/go.mod h1:AjlnhinP+kKQuUJoXLVrqxBM8uyhQmkzoV6jjsCFP4Q= github.com/elastic/go-licenser v0.4.1 h1:1xDURsc8pL5zYT9R29425J3vkHdt4RT5TNEMeRN48x4= @@ -794,8 +794,8 @@ github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk= @@ -1962,12 +1962,14 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= -go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= -go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= -go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= -go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= -go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= From 0dc012db7247246a1439311686e7a7cb8515efdb Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 26 Jan 2024 12:33:16 -0500 Subject: [PATCH 085/129] chore: Update snapshot.yml (#37760) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Made with ❤️️ by updatecli Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 0901d9638f64..608fb8fd777a 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-l534sdis-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-l5snflwr-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.13.0-l534sdis-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.13.0-l5snflwr-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.13.0-l534sdis-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.13.0-l5snflwr-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From c5e79a25d05d5bdfa9da4d187fe89523faa42afc Mon Sep 17 00:00:00 2001 From: Paul Bianciardi <70908889+paulb-elastic@users.noreply.github.com> Date: Mon, 29 Jan 2024 10:48:07 +0000 Subject: [PATCH 086/129] Update CODEOWNERS (#37765) Adds [stack-monitoring](https://github.com/orgs/elastic/teams/stack-monitoring) and removes `infra-monitoring-ui` (as being decommissioned), to review Stack Monitoring changes --- .github/CODEOWNERS | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index a7237f0b031f..d3e40d854f57 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -32,13 +32,13 @@ CHANGELOG* /filebeat/input/winlog/ @elastic/sec-windows-platform /filebeat/module/apache @elastic/obs-infraobs-integrations /filebeat/module/auditd @elastic/sec-linux-platform -/filebeat/module/elasticsearch/ @elastic/infra-monitoring-ui +/filebeat/module/elasticsearch/ @elastic/stack-monitoring /filebeat/module/haproxy @elastic/obs-infraobs-integrations /filebeat/module/icinga # TODO: find right team /filebeat/module/iis @elastic/obs-infraobs-integrations /filebeat/module/kafka @elastic/obs-infraobs-integrations -/filebeat/module/kibana/ @elastic/infra-monitoring-ui -/filebeat/module/logstash/ @elastic/infra-monitoring-ui +/filebeat/module/kibana/ @elastic/stack-monitoring +/filebeat/module/logstash/ @elastic/stack-monitoring /filebeat/module/mongodb @elastic/obs-infraobs-integrations /filebeat/module/mysql @elastic/obs-infraobs-integrations /filebeat/module/nats @elastic/obs-infraobs-integrations @@ -69,20 +69,20 @@ CHANGELOG* /metricbeat/docs/ # Listed without an owner to avoid maintaining doc ownership for each input and module. /metricbeat/helper/kubernetes @elastic/obs-cloudnative-monitoring /metricbeat/module/apache @elastic/obs-infraobs-integrations -/metricbeat/module/beat/ @elastic/infra-monitoring-ui +/metricbeat/module/beat/ @elastic/stack-monitoring /metricbeat/module/ceph @elastic/obs-infraobs-integrations /metricbeat/module/couchbase @elastic/obs-infraobs-integrations /metricbeat/module/couchdb @elastic/obs-infraobs-integrations -/metricbeat/module/elasticsearch/ @elastic/infra-monitoring-ui +/metricbeat/module/elasticsearch/ @elastic/stack-monitoring /metricbeat/module/etcd @elastic/obs-infraobs-integrations /metricbeat/module/golang @elastic/obs-infraobs-integrations /metricbeat/module/haproxy @elastic/obs-infraobs-integrations /metricbeat/module/http @elastic/obs-infraobs-integrations /metricbeat/module/jolokia @elastic/obs-infraobs-integrations /metricbeat/module/kafka @elastic/obs-infraobs-integrations -/metricbeat/module/kibana/ @elastic/infra-monitoring-ui +/metricbeat/module/kibana/ @elastic/stack-monitoring /metricbeat/module/kubernetes/ @elastic/obs-cloudnative-monitoring -/metricbeat/module/logstash/ @elastic/infra-monitoring-ui +/metricbeat/module/logstash/ @elastic/stack-monitoring /metricbeat/module/memcached @elastic/obs-infraobs-integrations /metricbeat/module/mongodb @elastic/obs-infraobs-integrations /metricbeat/module/mysql @elastic/obs-infraobs-integrations From 040d6e70d5a052247aa89fadc8717bd05ad34eeb Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 29 Jan 2024 12:32:16 -0500 Subject: [PATCH 087/129] chore: Update snapshot.yml (#37777) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Made with ❤️️ by updatecli Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 608fb8fd777a..c1e25d376f66 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-l5snflwr-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-yil7wib0-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.13.0-l5snflwr-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.13.0-yil7wib0-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.13.0-l5snflwr-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.13.0-yil7wib0-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 6d3239a599f7bbeeeb947658bc9a5e7a55622c69 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 29 Jan 2024 16:08:52 -0500 Subject: [PATCH 088/129] Add known issue for AWS S3 performance in 8.12 (#37766) (#37767) (cherry picked from commit 51f5cb4e4d23f122cc546c670cb3912c2c3f0184) Co-authored-by: Craig MacKenzie --- CHANGELOG.asciidoc | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 3305b1989b69..071357c104a2 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -7,6 +7,18 @@ === Beats version 8.12.0 https://github.com/elastic/beats/compare/v8.11.4\...v8.12.0[View commits] +==== Known Issues + +*Affecting all Beats* + +Performance regression in AWS S3 inputs using SQS notification. + +In 8.12 the default memory queue flush interval was raised from 1 second to 10 seconds. In many configurations this improves performance because it allows the output to batch more events per round trip, which improves efficiency. However, the SQS input has an extra bottleneck that interacts badly with the new value. For more details see {issue}37754[37754]. + +If you are using the Elasticsearch output, and your output configuration uses a performance preset, switch it to `preset: latency`. If you use no preset or use `preset: custom`, then set `queue.mem.flush.timeout: 1` in your queue or output configuration. + +If you are not using the Elasticsearch output, set `queue.mem.flush.timeout: 1` in your queue or output configuration. + ==== Breaking changes *Heartbeat* From 5a9613e0b17241a6e69561a55a465ff6d9503ac8 Mon Sep 17 00:00:00 2001 From: Chris Berkhout Date: Tue, 30 Jan 2024 10:55:11 +0100 Subject: [PATCH 089/129] Update SQL and Oracle module docs regarding Oracle DSNs (#37590) - Add 'oracle://' URL format. - Add note about encoding of special characters in URLs. - Align the SQL module and the Oracle module documentation. --- CHANGELOG.next.asciidoc | 1 + metricbeat/docs/modules/oracle.asciidoc | 15 ++++++++++----- metricbeat/docs/modules/sql.asciidoc | 18 +++++++++++++----- .../module/oracle/_meta/docs.asciidoc | 15 ++++++++++----- .../metricbeat/module/sql/_meta/docs.asciidoc | 19 +++++++++++++------ 5 files changed, 47 insertions(+), 21 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index b8c46fa18c59..3f06bacacb69 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -135,6 +135,7 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d *Filebeat* +- Update SQL input documentation regarding Oracle DSNs {pull}37590[37590] - add documentation for decode_xml_wineventlog processor field mappings. {pull}32456[32456] - httpjson input: Add request tracing logger. {issue}32402[32402] {pull}32412[32412] - Add cloudflare R2 to provider list in AWS S3 input. {pull}32620[32620] diff --git a/metricbeat/docs/modules/oracle.asciidoc b/metricbeat/docs/modules/oracle.asciidoc index f524967cce5d..3436caa9cc26 100644 --- a/metricbeat/docs/modules/oracle.asciidoc +++ b/metricbeat/docs/modules/oracle.asciidoc @@ -60,19 +60,24 @@ Then, Metricbeat can be launched. *Host Configuration* -The following two types of host configurations are supported: +The following types of host configuration are supported: -1. Old style host configuration for backwards compatibility: +1. An old-style Oracle connection string, for backwards compatibility: a. `hosts: ["user/pass@0.0.0.0:1521/ORCLPDB1.localdomain"]` b. `hosts: ["user/password@0.0.0.0:1521/ORCLPDB1.localdomain as sysdba"]` -2. DSN host configuration: +2. DSN configuration as a URL: + a. `hosts: ["oracle://user:pass@0.0.0.0:1521/ORCLPDB1.localdomain?sysdba=1"]` + +3. DSN configuration as a logfmt-encoded parameter list: a. `hosts: ['user="user" password="pass" connectString="0.0.0.0:1521/ORCLPDB1.localdomain"']` b. `hosts: ['user="user" password="password" connectString="host:port/service_name" sysdba=true']` -DSN host configuration is the recommended way to configure the Oracle Metricbeat Module as it supports the usage of special characters in the password. +DSN host configuration is the recommended configuration type as it supports the use of special characters in the password. + +In a URL any special characters should be URL encoded. -Note: If the password contains the backslash (`\`) character, it must be escaped with a backslash. For example, if the password is `my\_password`, it should be written as `my\\_password`. +In the logfmt-encoded DSN format, if the password contains a backslash character (`\`), it must be escaped with another backslash. For example, if the password is `my\_password`, it must be written as `my\\_password`. [float] == Metricsets diff --git a/metricbeat/docs/modules/sql.asciidoc b/metricbeat/docs/modules/sql.asciidoc index 9c27c0bc4ba5..d8e0e15b617d 100644 --- a/metricbeat/docs/modules/sql.asciidoc +++ b/metricbeat/docs/modules/sql.asciidoc @@ -871,19 +871,26 @@ Then, Metricbeat can be launched. ===== Host Configuration for Oracle -The following two types of host configurations are supported: +The following types of host configuration are supported: -1. DSN host configuration as URL: +1. An old-style Oracle connection string, for backwards compatibility: a. `hosts: ["user/pass@0.0.0.0:1521/ORCLPDB1.localdomain"]` b. `hosts: ["user/password@0.0.0.0:1521/ORCLPDB1.localdomain as sysdba"]` -2. DSN host configuration: +2. DSN configuration as a URL: + a. `hosts: ["oracle://user:pass@0.0.0.0:1521/ORCLPDB1.localdomain?sysdba=1"]` + +3. DSN configuration as a logfmt-encoded parameter list: a. `hosts: ['user="user" password="pass" connectString="0.0.0.0:1521/ORCLPDB1.localdomain"']` b. `hosts: ['user="user" password="password" connectString="host:port/service_name" sysdba=true']` -Note: If the password contains the backslash (`\`) character, it must be escaped with a backslash. For example, if the password is `my\_password`, it should be written as `my\\_password`. +DSN host configuration is the recommended configuration type as it supports the use of special characters in the password. + +In a URL any special characters should be URL encoded. -The username and password to connect to the database can be provided as values to `username` and `password` keys of `sql.yml`. +In the logfmt-encoded DSN format, if the password contains a backslash character (`\`), it must be escaped with another backslash. For example, if the password is `my\_password`, it must be written as `my\\_password`. + +The username and password to connect to the database can be provided as values to the `username` and `password` keys of `sql.yml`. [source,yml] ---- @@ -901,6 +908,7 @@ The username and password to connect to the database can be provided as values t response_format: variables ---- + :edit_url: [float] diff --git a/x-pack/metricbeat/module/oracle/_meta/docs.asciidoc b/x-pack/metricbeat/module/oracle/_meta/docs.asciidoc index 7a93e3069816..887b06019399 100644 --- a/x-pack/metricbeat/module/oracle/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/oracle/_meta/docs.asciidoc @@ -48,19 +48,24 @@ Then, Metricbeat can be launched. *Host Configuration* -The following two types of host configurations are supported: +The following types of host configuration are supported: -1. Old style host configuration for backwards compatibility: +1. An old-style Oracle connection string, for backwards compatibility: a. `hosts: ["user/pass@0.0.0.0:1521/ORCLPDB1.localdomain"]` b. `hosts: ["user/password@0.0.0.0:1521/ORCLPDB1.localdomain as sysdba"]` -2. DSN host configuration: +2. DSN configuration as a URL: + a. `hosts: ["oracle://user:pass@0.0.0.0:1521/ORCLPDB1.localdomain?sysdba=1"]` + +3. DSN configuration as a logfmt-encoded parameter list: a. `hosts: ['user="user" password="pass" connectString="0.0.0.0:1521/ORCLPDB1.localdomain"']` b. `hosts: ['user="user" password="password" connectString="host:port/service_name" sysdba=true']` -DSN host configuration is the recommended way to configure the Oracle Metricbeat Module as it supports the usage of special characters in the password. +DSN host configuration is the recommended configuration type as it supports the use of special characters in the password. + +In a URL any special characters should be URL encoded. -Note: If the password contains the backslash (`\`) character, it must be escaped with a backslash. For example, if the password is `my\_password`, it should be written as `my\\_password`. +In the logfmt-encoded DSN format, if the password contains a backslash character (`\`), it must be escaped with another backslash. For example, if the password is `my\_password`, it must be written as `my\\_password`. [float] == Metricsets diff --git a/x-pack/metricbeat/module/sql/_meta/docs.asciidoc b/x-pack/metricbeat/module/sql/_meta/docs.asciidoc index 17175cb58780..95ae9376e4d0 100644 --- a/x-pack/metricbeat/module/sql/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/sql/_meta/docs.asciidoc @@ -859,19 +859,26 @@ Then, Metricbeat can be launched. ===== Host Configuration for Oracle -The following two types of host configurations are supported: +The following types of host configuration are supported: -1. DSN host configuration as URL: +1. An old-style Oracle connection string, for backwards compatibility: a. `hosts: ["user/pass@0.0.0.0:1521/ORCLPDB1.localdomain"]` b. `hosts: ["user/password@0.0.0.0:1521/ORCLPDB1.localdomain as sysdba"]` -2. DSN host configuration: +2. DSN configuration as a URL: + a. `hosts: ["oracle://user:pass@0.0.0.0:1521/ORCLPDB1.localdomain?sysdba=1"]` + +3. DSN configuration as a logfmt-encoded parameter list: a. `hosts: ['user="user" password="pass" connectString="0.0.0.0:1521/ORCLPDB1.localdomain"']` b. `hosts: ['user="user" password="password" connectString="host:port/service_name" sysdba=true']` -Note: If the password contains the backslash (`\`) character, it must be escaped with a backslash. For example, if the password is `my\_password`, it should be written as `my\\_password`. +DSN host configuration is the recommended configuration type as it supports the use of special characters in the password. + +In a URL any special characters should be URL encoded. -The username and password to connect to the database can be provided as values to `username` and `password` keys of `sql.yml`. +In the logfmt-encoded DSN format, if the password contains a backslash character (`\`), it must be escaped with another backslash. For example, if the password is `my\_password`, it must be written as `my\\_password`. + +The username and password to connect to the database can be provided as values to the `username` and `password` keys of `sql.yml`. [source,yml] ---- @@ -887,4 +894,4 @@ The username and password to connect to the database can be provided as values t sql_queries: - query: SELECT METRIC_NAME, VALUE FROM V$SYSMETRIC WHERE GROUP_ID = 2 and METRIC_NAME LIKE '%' response_format: variables ----- \ No newline at end of file +---- From 9f58fef62910c5aa007bcd0e86a431c339d21011 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Tue, 30 Jan 2024 14:21:05 +0100 Subject: [PATCH 090/129] github-action: use wildcards for discovering all the workflows (#37783) --- .github/workflows/opentelemetry.yml | 44 +++++------------------------ 1 file changed, 7 insertions(+), 37 deletions(-) diff --git a/.github/workflows/opentelemetry.yml b/.github/workflows/opentelemetry.yml index 4cdb1e2197eb..84a6209ff2c9 100644 --- a/.github/workflows/opentelemetry.yml +++ b/.github/workflows/opentelemetry.yml @@ -1,46 +1,16 @@ +--- +# Look up results at https://ela.st/oblt-ci-cd-stats. +# There will be one service per GitHub repository, including the org name, and one Transaction per Workflow. name: OpenTelemetry Export Trace on: workflow_run: - workflows: - - bump-elastic-stack-snapshot - - bump-golang - - check-auditbeat - - check-default - - check-dev-tools - - check-docs - - check-filebeat - - check-heartbeat - - check-libbeat - - check-metricbeat - - check-packetbeat - - check-winlogbeat - - check-x-pack-auditbeat - - check-x-pack-dockerlogbeat - - check-x-pack-filebeat - - check-x-pack-functionbeat - - check-x-pack-heartbeat - - check-x-pack-libbeat - - check-x-pack-metricbeat - - check-x-pack-osquerybeat - - check-x-pack-packetbeat - - check-x-pack-winlogbeat - - golangci-lint - - notify-stalled-snapshots - - auditbeat - - filebeat - - heartbeat - - metricbeat - - packetbeat - - x-pack-auditbeat - - x-pack-filebeat - - x-pack-functionbeat - - x-pack-heartbeat - - x-pack-metricbeat - - x-pack-osquerybeat - - x-pack-packetbeat + workflows: [ "*" ] types: [completed] +permissions: + contents: read + jobs: otel-export-trace: runs-on: ubuntu-latest From c331659458257c8697b6214a3ba5226752ed44e6 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 30 Jan 2024 13:02:10 -0500 Subject: [PATCH 091/129] Change queue flush to include unit in known issue. (#37791) (#37792) (cherry picked from commit 2b8622034bee70a298a22bfaf2c10e26698f7fc2) Co-authored-by: Craig MacKenzie --- CHANGELOG.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 071357c104a2..eef386a8a6ed 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -15,9 +15,9 @@ Performance regression in AWS S3 inputs using SQS notification. In 8.12 the default memory queue flush interval was raised from 1 second to 10 seconds. In many configurations this improves performance because it allows the output to batch more events per round trip, which improves efficiency. However, the SQS input has an extra bottleneck that interacts badly with the new value. For more details see {issue}37754[37754]. -If you are using the Elasticsearch output, and your output configuration uses a performance preset, switch it to `preset: latency`. If you use no preset or use `preset: custom`, then set `queue.mem.flush.timeout: 1` in your queue or output configuration. +If you are using the Elasticsearch output, and your output configuration uses a performance preset, switch it to `preset: latency`. If you use no preset or use `preset: custom`, then set `queue.mem.flush.timeout: 1s` in your queue or output configuration. -If you are not using the Elasticsearch output, set `queue.mem.flush.timeout: 1` in your queue or output configuration. +If you are not using the Elasticsearch output, set `queue.mem.flush.timeout: 1s` in your queue or output configuration. ==== Breaking changes From e900baf2fb45e507aecfcecc7fe23edcb355d42e Mon Sep 17 00:00:00 2001 From: Dan Kortschak <90160302+efd6@users.noreply.github.com> Date: Wed, 31 Jan 2024 10:47:52 +1030 Subject: [PATCH 092/129] {,x-pack}/packetbeat: add support for pipeline module uploads (#37291) * packetbeat/module: new package for supporting modules [git-generate] yoink -pkg ./winlogbeat/module -dir packetbeat -y UploadPipelines,PipelinesFS gsed -r -i 's/Winlogbeat/Packetbeat/g' packetbeat/module/*.go gsed -r -i 's/winlogbeat/packetbeat/g' packetbeat/module/*.go goimports -w -local github.com/elastic ./packetbeat/module yoink -pkg ./x-pack/winlogbeat/module -dir x-pack/packetbeat -y init gsed -r -i 's/winlogbeat/packetbeat/g' x-pack/packetbeat/module/*.go mkdir -p x-pack/packetbeat/module/null/ingest touch x-pack/packetbeat/module/null/ingest/pin.yml * packetbeat/module: adjust pipeline names * packetbeat/beater: add support for pipeline uploads * x-pack/packetbeat/module: import pipelines from integrations and remove placeholder [git-generate] data_stream_root=$(go env GOPATH)/src/github.com/elastic/integrations/packages/network_traffic/data_stream parallel mkdir -p x-pack/packetbeat/module/{/}/ingest ::: $data_stream_root/* parallel 'cp {}/elasticsearch/ingest_pipeline/* x-pack/packetbeat/module/{/}/ingest/' ::: $data_stream_root/* rm -rf x-pack/packetbeat/module/null * x-pack/packetbeat/module: add routing --- CHANGELOG.next.asciidoc | 2 + .../_meta/config/beat.reference.yml.tmpl | 5 + packetbeat/beater/packetbeat.go | 57 +++++- packetbeat/config/config.go | 17 +- packetbeat/magefile.go | 9 +- packetbeat/module/pipeline.go | 188 ++++++++++++++++++ packetbeat/packetbeat.reference.yml | 5 + packetbeat/scripts/mage/config.go | 7 + .../config/output-elasticsearch.yml.tmpl | 15 ++ x-pack/packetbeat/cmd/root.go | 3 + x-pack/packetbeat/magefile.go | 1 + .../packetbeat/module/amqp/ingest/default.yml | 59 ++++++ .../packetbeat/module/amqp/ingest/geoip.yml | 103 ++++++++++ .../module/cassandra/ingest/default.yml | 59 ++++++ .../module/cassandra/ingest/geoip.yml | 103 ++++++++++ .../module/dhcpv4/ingest/default.yml | 74 +++++++ .../packetbeat/module/dhcpv4/ingest/geoip.yml | 103 ++++++++++ .../packetbeat/module/dns/ingest/default.yml | 59 ++++++ x-pack/packetbeat/module/dns/ingest/geoip.yml | 103 ++++++++++ .../packetbeat/module/flow/ingest/default.yml | 89 +++++++++ .../packetbeat/module/flow/ingest/geoip.yml | 103 ++++++++++ .../packetbeat/module/http/ingest/default.yml | 72 +++++++ .../packetbeat/module/http/ingest/geoip.yml | 103 ++++++++++ .../packetbeat/module/icmp/ingest/default.yml | 66 ++++++ .../packetbeat/module/icmp/ingest/geoip.yml | 103 ++++++++++ .../module/memcached/ingest/default.yml | 79 ++++++++ .../module/memcached/ingest/geoip.yml | 103 ++++++++++ .../module/mongodb/ingest/default.yml | 59 ++++++ .../module/mongodb/ingest/geoip.yml | 103 ++++++++++ .../module/mysql/ingest/default.yml | 59 ++++++ .../packetbeat/module/mysql/ingest/geoip.yml | 103 ++++++++++ .../packetbeat/module/nfs/ingest/default.yml | 59 ++++++ x-pack/packetbeat/module/nfs/ingest/geoip.yml | 103 ++++++++++ .../module/pgsql/ingest/default.yml | 59 ++++++ .../packetbeat/module/pgsql/ingest/geoip.yml | 103 ++++++++++ x-pack/packetbeat/module/pipeline.go | 20 ++ .../module/redis/ingest/default.yml | 59 ++++++ .../packetbeat/module/redis/ingest/geoip.yml | 103 ++++++++++ .../module/routing/ingest/default.yml | 64 ++++++ .../packetbeat/module/sip/ingest/default.yml | 59 ++++++ x-pack/packetbeat/module/sip/ingest/geoip.yml | 103 ++++++++++ .../module/thrift/ingest/default.yml | 59 ++++++ .../packetbeat/module/thrift/ingest/geoip.yml | 103 ++++++++++ .../packetbeat/module/tls/ingest/default.yml | 99 +++++++++ x-pack/packetbeat/module/tls/ingest/geoip.yml | 103 ++++++++++ x-pack/packetbeat/packetbeat.reference.yml | 5 + x-pack/packetbeat/packetbeat.yml | 7 +- 47 files changed, 3098 insertions(+), 24 deletions(-) create mode 100644 packetbeat/module/pipeline.go create mode 100644 x-pack/packetbeat/_meta/config/output-elasticsearch.yml.tmpl create mode 100644 x-pack/packetbeat/module/amqp/ingest/default.yml create mode 100644 x-pack/packetbeat/module/amqp/ingest/geoip.yml create mode 100644 x-pack/packetbeat/module/cassandra/ingest/default.yml create mode 100644 x-pack/packetbeat/module/cassandra/ingest/geoip.yml create mode 100644 x-pack/packetbeat/module/dhcpv4/ingest/default.yml create mode 100644 x-pack/packetbeat/module/dhcpv4/ingest/geoip.yml create mode 100644 x-pack/packetbeat/module/dns/ingest/default.yml create mode 100644 x-pack/packetbeat/module/dns/ingest/geoip.yml create mode 100644 x-pack/packetbeat/module/flow/ingest/default.yml create mode 100644 x-pack/packetbeat/module/flow/ingest/geoip.yml create mode 100644 x-pack/packetbeat/module/http/ingest/default.yml create mode 100644 x-pack/packetbeat/module/http/ingest/geoip.yml create mode 100644 x-pack/packetbeat/module/icmp/ingest/default.yml create mode 100644 x-pack/packetbeat/module/icmp/ingest/geoip.yml create mode 100644 x-pack/packetbeat/module/memcached/ingest/default.yml create mode 100644 x-pack/packetbeat/module/memcached/ingest/geoip.yml create mode 100644 x-pack/packetbeat/module/mongodb/ingest/default.yml create mode 100644 x-pack/packetbeat/module/mongodb/ingest/geoip.yml create mode 100644 x-pack/packetbeat/module/mysql/ingest/default.yml create mode 100644 x-pack/packetbeat/module/mysql/ingest/geoip.yml create mode 100644 x-pack/packetbeat/module/nfs/ingest/default.yml create mode 100644 x-pack/packetbeat/module/nfs/ingest/geoip.yml create mode 100644 x-pack/packetbeat/module/pgsql/ingest/default.yml create mode 100644 x-pack/packetbeat/module/pgsql/ingest/geoip.yml create mode 100644 x-pack/packetbeat/module/pipeline.go create mode 100644 x-pack/packetbeat/module/redis/ingest/default.yml create mode 100644 x-pack/packetbeat/module/redis/ingest/geoip.yml create mode 100644 x-pack/packetbeat/module/routing/ingest/default.yml create mode 100644 x-pack/packetbeat/module/sip/ingest/default.yml create mode 100644 x-pack/packetbeat/module/sip/ingest/geoip.yml create mode 100644 x-pack/packetbeat/module/thrift/ingest/default.yml create mode 100644 x-pack/packetbeat/module/thrift/ingest/geoip.yml create mode 100644 x-pack/packetbeat/module/tls/ingest/default.yml create mode 100644 x-pack/packetbeat/module/tls/ingest/geoip.yml diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 3f06bacacb69..5ba27260c3b7 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -205,6 +205,8 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d *Packetbeat* - Bump Windows Npcap version to v1.79. {pull}37733[37733] +- Add metrics for TCP flags. {issue}36992[36992] {pull}36975[36975] +- Add support for pipeline loading. {pull}37291[37291] *Packetbeat* diff --git a/packetbeat/_meta/config/beat.reference.yml.tmpl b/packetbeat/_meta/config/beat.reference.yml.tmpl index 649ec0e8deea..033aa1e51063 100644 --- a/packetbeat/_meta/config/beat.reference.yml.tmpl +++ b/packetbeat/_meta/config/beat.reference.yml.tmpl @@ -78,6 +78,11 @@ packetbeat.interfaces.internal_networks: # can stay enabled even after beat is shut down. #packetbeat.interfaces.auto_promisc_mode: true +# By default Ingest pipelines are not updated if a pipeline with the same ID +# already exists. If this option is enabled Packetbeat overwrites pipelines +# every time a new Elasticsearch connection is established. +#packetbeat.overwrite_pipelines: false + {{- template "windows_npcap.yml.tmpl" .}} {{header "Flows"}} diff --git a/packetbeat/beater/packetbeat.go b/packetbeat/beater/packetbeat.go index 725f3eebc33d..d8c223f17892 100644 --- a/packetbeat/beater/packetbeat.go +++ b/packetbeat/beater/packetbeat.go @@ -25,13 +25,16 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common/reload" + "github.com/elastic/beats/v7/libbeat/esleg/eslegclient" "github.com/elastic/beats/v7/libbeat/management" "github.com/elastic/beats/v7/libbeat/monitoring/inputmon" + "github.com/elastic/beats/v7/libbeat/outputs/elasticsearch" conf "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/service" "github.com/elastic/beats/v7/packetbeat/config" + "github.com/elastic/beats/v7/packetbeat/module" "github.com/elastic/beats/v7/packetbeat/protos" // Add packetbeat default processors @@ -80,10 +83,11 @@ func initialConfig() config.Config { // Beater object. Contains all objects needed to run the beat type packetbeat struct { - config *conf.C - factory *processorFactory - done chan struct{} - stopOnce sync.Once + config *conf.C + factory *processorFactory + overwritePipelines bool + done chan struct{} + stopOnce sync.Once } // New returns a new Packetbeat beat.Beater. @@ -98,15 +102,35 @@ func New(b *beat.Beat, rawConfig *conf.C) (beat.Beater, error) { return nil, err } + var overwritePipelines bool + if !b.Manager.Enabled() { + // Pipeline overwrite is only enabled on standalone packetbeat + // since pipelines are managed by fleet otherwise. + config, err := configurator(rawConfig) + if err != nil { + return nil, err + } + overwritePipelines = config.OverwritePipelines + b.OverwritePipelinesCallback = func(esConfig *conf.C) error { + esClient, err := eslegclient.NewConnectedClient(esConfig, "Packetbeat") + if err != nil { + return err + } + _, err = module.UploadPipelines(b.Info, esClient, overwritePipelines) + return err + } + } + return &packetbeat{ - config: rawConfig, - factory: factory, - done: make(chan struct{}), + config: rawConfig, + factory: factory, + overwritePipelines: overwritePipelines, + done: make(chan struct{}), }, nil } // Run starts the packetbeat network capture, decoding and event publication, sending -// events to b.Publisher. If b is mananaged, packetbeat is registered with the +// events to b.Publisher. If b is managed, packetbeat is registered with the // reload.Registry and handled by fleet. Otherwise it is run until cancelled or a // fatal error. func (pb *packetbeat) Run(b *beat.Beat) error { @@ -138,11 +162,28 @@ func (pb *packetbeat) Run(b *beat.Beat) error { } if !b.Manager.Enabled() { + if b.Config.Output.Name() == "elasticsearch" { + _, err := elasticsearch.RegisterConnectCallback(func(esClient *eslegclient.Connection) error { + _, err := module.UploadPipelines(b.Info, esClient, pb.overwritePipelines) + return err + }) + if err != nil { + return err + } + } else { + logp.L().Warn(pipelinesWarning) + } + return pb.runStatic(b, pb.factory) } return pb.runManaged(b, pb.factory) } +const pipelinesWarning = "Packetbeat is unable to load the ingest pipelines for the configured" + + " modules because the Elasticsearch output is not configured/enabled. If you have" + + " already loaded the ingest pipelines or are using Logstash pipelines, you" + + " can ignore this warning." + // runStatic constructs a packetbeat runner and starts it, returning on cancellation // or the first fatal error. func (pb *packetbeat) runStatic(b *beat.Beat, factory *processorFactory) error { diff --git a/packetbeat/config/config.go b/packetbeat/config/config.go index 13d00b89e44b..7d579af635bf 100644 --- a/packetbeat/config/config.go +++ b/packetbeat/config/config.go @@ -33,14 +33,15 @@ import ( var errFanoutGroupAFPacketOnly = errors.New("fanout_group is only valid with af_packet type") type Config struct { - Interface *InterfaceConfig `config:"interfaces"` - Interfaces []InterfaceConfig `config:"interfaces"` - Flows *Flows `config:"flows"` - Protocols map[string]*conf.C `config:"protocols"` - ProtocolsList []*conf.C `config:"protocols"` - Procs procs.ProcsConfig `config:"procs"` - IgnoreOutgoing bool `config:"ignore_outgoing"` - ShutdownTimeout time.Duration `config:"shutdown_timeout"` + Interface *InterfaceConfig `config:"interfaces"` + Interfaces []InterfaceConfig `config:"interfaces"` + Flows *Flows `config:"flows"` + Protocols map[string]*conf.C `config:"protocols"` + ProtocolsList []*conf.C `config:"protocols"` + Procs procs.ProcsConfig `config:"procs"` + IgnoreOutgoing bool `config:"ignore_outgoing"` + ShutdownTimeout time.Duration `config:"shutdown_timeout"` + OverwritePipelines bool `config:"overwrite_pipelines"` // Only used by standalone Packetbeat. } // FromStatic initializes a configuration given a config.C diff --git a/packetbeat/magefile.go b/packetbeat/magefile.go index 50c8a19310ca..00e4f9dd47ba 100644 --- a/packetbeat/magefile.go +++ b/packetbeat/magefile.go @@ -29,19 +29,20 @@ import ( "github.com/elastic/beats/v7/dev-tools/mage/target/build" packetbeat "github.com/elastic/beats/v7/packetbeat/scripts/mage" - // mage:import + //mage:import "github.com/elastic/beats/v7/dev-tools/mage/target/common" - // mage:import + //mage:import "github.com/elastic/beats/v7/dev-tools/mage/target/unittest" - // mage:import + //mage:import _ "github.com/elastic/beats/v7/dev-tools/mage/target/integtest/notests" - // mage:import + //mage:import _ "github.com/elastic/beats/v7/dev-tools/mage/target/test" ) func init() { common.RegisterCheckDeps(Update) unittest.RegisterPythonTestDeps(packetbeat.FieldsYML, Dashboards) + packetbeat.SelectLogic = devtools.OSSProject devtools.BeatDescription = "Packetbeat analyzes network traffic and sends the data to Elasticsearch." } diff --git a/packetbeat/module/pipeline.go b/packetbeat/module/pipeline.go new file mode 100644 index 000000000000..9e6d23849386 --- /dev/null +++ b/packetbeat/module/pipeline.go @@ -0,0 +1,188 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package module + +import ( + "embed" + "encoding/json" + "errors" + "fmt" + "os" + "path" + "path/filepath" + "strings" + + "github.com/joeshaw/multierror" + "gopkg.in/yaml.v2" + + "github.com/elastic/beats/v7/filebeat/fileset" + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/esleg/eslegclient" + "github.com/elastic/elastic-agent-libs/logp" +) + +// PipelinesFS is used from the x-pack/packetbeat code to inject modules. The +// OSS version does not have modules. +var PipelinesFS *embed.FS + +var errNoFS = errors.New("no embedded file system") + +const logName = "pipeline" + +type pipeline struct { + id string + contents map[string]interface{} +} + +// UploadPipelines reads all pipelines embedded in the Packetbeat executable +// and adapts the pipeline for a given ES version, converts to JSON if +// necessary and creates or updates ingest pipeline in ES. The IDs of pipelines +// uploaded to ES are returned in loaded. +func UploadPipelines(info beat.Info, esClient *eslegclient.Connection, overwritePipelines bool) (loaded []string, err error) { + pipelines, err := readAll(info) + if err != nil { + return nil, err + } + return load(esClient, pipelines, overwritePipelines) +} + +// readAll reads pipelines from the the embedded filesystem and +// returns a slice of pipelines suitable for sending to Elasticsearch +// with load. +func readAll(info beat.Info) (pipelines []pipeline, err error) { + p, err := readDir(".", info) + if err == errNoFS { //nolint:errorlint // Bad linter! This is never wrapped. + return nil, nil + } + return p, err +} + +func readDir(dir string, info beat.Info) (pipelines []pipeline, err error) { + if PipelinesFS == nil { + return nil, errNoFS + } + dirEntries, err := PipelinesFS.ReadDir(dir) + if err != nil { + return nil, err + } + for _, de := range dirEntries { + if de.IsDir() { + subPipelines, err := readDir(path.Join(dir, de.Name()), info) + if err != nil { + return nil, err + } + pipelines = append(pipelines, subPipelines...) + continue + } + p, err := readFile(path.Join(dir, de.Name()), info) + if err == errNoFS { //nolint:errorlint // Bad linter! This is never wrapped. + continue + } + if err != nil { + return nil, err + } + pipelines = append(pipelines, p) + } + return pipelines, nil +} + +func readFile(filename string, info beat.Info) (p pipeline, err error) { + if PipelinesFS == nil { + return pipeline{}, errNoFS + } + contents, err := PipelinesFS.ReadFile(filename) + if err != nil { + return pipeline{}, err + } + updatedContent, err := applyTemplates(info.IndexPrefix, info.Version, filename, contents) + if err != nil { + return pipeline{}, err + } + ds, _, _ := strings.Cut(filename, string(os.PathSeparator)) + p = pipeline{ + id: fileset.FormatPipelineID(info.IndexPrefix, "", "", ds, info.Version), + contents: updatedContent, + } + return p, nil +} + +// load uses esClient to load pipelines to Elasticsearch cluster. +// The IDs of loaded pipelines will be returned in loaded. +// load will only overwrite existing pipelines if overwritePipelines is +// true. An error in loading one of the pipelines will cause the +// successfully loaded ones to be deleted. +func load(esClient *eslegclient.Connection, pipelines []pipeline, overwritePipelines bool) (loaded []string, err error) { + log := logp.NewLogger(logName) + + for _, pipeline := range pipelines { + err = fileset.LoadPipeline(esClient, pipeline.id, pipeline.contents, overwritePipelines, log) + if err != nil { + err = fmt.Errorf("error loading pipeline %s: %w", pipeline.id, err) + break + } + loaded = append(loaded, pipeline.id) + } + + if err != nil { + errs := multierror.Errors{err} + for _, id := range loaded { + err = fileset.DeletePipeline(esClient, id) + if err != nil { + errs = append(errs, err) + } + } + return nil, errs.Err() + } + return loaded, nil +} + +func applyTemplates(prefix string, version string, filename string, original []byte) (converted map[string]interface{}, err error) { + vars := map[string]interface{}{ + "builtin": map[string]interface{}{ + "prefix": prefix, + "module": "", + "fileset": "", + "beatVersion": version, + }, + } + + encodedString, err := fileset.ApplyTemplate(vars, string(original), true) + if err != nil { + return nil, fmt.Errorf("failed to apply template: %w", err) + } + + var content map[string]interface{} + switch extension := strings.ToLower(filepath.Ext(filename)); extension { + case ".json": + if err = json.Unmarshal([]byte(encodedString), &content); err != nil { + return nil, fmt.Errorf("error JSON decoding the pipeline file: %s: %w", filename, err) + } + case ".yaml", ".yml": + if err = yaml.Unmarshal([]byte(encodedString), &content); err != nil { + return nil, fmt.Errorf("error YAML decoding the pipeline file: %s: %w", filename, err) + } + newContent, err := fileset.FixYAMLMaps(content) + if err != nil { + return nil, fmt.Errorf("failed to sanitize the YAML pipeline file: %s: %w", filename, err) + } + content = newContent.(map[string]interface{}) + default: + return nil, fmt.Errorf("unsupported extension '%s' for pipeline file: %s", extension, filename) + } + return content, nil +} diff --git a/packetbeat/packetbeat.reference.yml b/packetbeat/packetbeat.reference.yml index 1e013fb081f5..c9dac77048ad 100644 --- a/packetbeat/packetbeat.reference.yml +++ b/packetbeat/packetbeat.reference.yml @@ -78,6 +78,11 @@ packetbeat.interfaces.internal_networks: # can stay enabled even after beat is shut down. #packetbeat.interfaces.auto_promisc_mode: true +# By default Ingest pipelines are not updated if a pipeline with the same ID +# already exists. If this option is enabled Packetbeat overwrites pipelines +# every time a new Elasticsearch connection is established. +#packetbeat.overwrite_pipelines: false + # =================================== Flows ==================================== packetbeat.flows: diff --git a/packetbeat/scripts/mage/config.go b/packetbeat/scripts/mage/config.go index 5213f4f1f87c..f41b50ffff75 100644 --- a/packetbeat/scripts/mage/config.go +++ b/packetbeat/scripts/mage/config.go @@ -30,11 +30,18 @@ func device(goos string) string { return "default_route" } +// SelectLogic configures the types of project logic to use (OSS vs X-Pack). +// It is set in the packetbeat and x-pack/packetbeat magefiles. +var SelectLogic devtools.ProjectType + // ConfigFileParams returns the default ConfigFileParams for generating // packetbeat*.yml files. func ConfigFileParams() devtools.ConfigFileParams { p := devtools.DefaultConfigFileParams() p.Templates = append(p.Templates, devtools.OSSBeatDir("_meta/config/*.tmpl")) + if SelectLogic == devtools.XPackProject { + p.Templates = append(p.Templates, devtools.XPackBeatDir("_meta/config/*.tmpl")) + } p.ExtraVars = map[string]interface{}{ "device": device, } diff --git a/x-pack/packetbeat/_meta/config/output-elasticsearch.yml.tmpl b/x-pack/packetbeat/_meta/config/output-elasticsearch.yml.tmpl new file mode 100644 index 000000000000..ffb3bc696fc2 --- /dev/null +++ b/x-pack/packetbeat/_meta/config/output-elasticsearch.yml.tmpl @@ -0,0 +1,15 @@ +{{subheader "Elasticsearch Output"}} +output.elasticsearch: + # Array of hosts to connect to. + hosts: ["localhost:9200"] + + # Protocol - either `http` (default) or `https`. + #protocol: "https" + + # Authentication credentials - either API key or username/password. + #api_key: "id:api_key" + #username: "elastic" + #password: "changeme" + + # Pipeline to route events to protocol pipelines. + pipeline: "packetbeat-%{[agent.version]}-routing" diff --git a/x-pack/packetbeat/cmd/root.go b/x-pack/packetbeat/cmd/root.go index f77bd827bf22..8611fe8d1150 100644 --- a/x-pack/packetbeat/cmd/root.go +++ b/x-pack/packetbeat/cmd/root.go @@ -21,6 +21,9 @@ import ( // This registers the Npcap installer on Windows. _ "github.com/elastic/beats/v7/x-pack/packetbeat/npcap" + + // Enable pipelines. + _ "github.com/elastic/beats/v7/x-pack/packetbeat/module" ) // Name of this beat. diff --git a/x-pack/packetbeat/magefile.go b/x-pack/packetbeat/magefile.go index acef21538d62..03104ab9157e 100644 --- a/x-pack/packetbeat/magefile.go +++ b/x-pack/packetbeat/magefile.go @@ -47,6 +47,7 @@ func init() { devtools.BeatDescription = "Packetbeat analyzes network traffic and sends the data to Elasticsearch." devtools.BeatLicense = "Elastic License" + packetbeat.SelectLogic = devtools.XPackProject } // Update updates the generated files. diff --git a/x-pack/packetbeat/module/amqp/ingest/default.yml b/x-pack/packetbeat/module/amqp/ingest/default.yml new file mode 100644 index 000000000000..7b2268f48129 --- /dev/null +++ b/x-pack/packetbeat/module/amqp/ingest/default.yml @@ -0,0 +1,59 @@ +--- +description: Pipeline for processing amqp traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + tag: gsubmac + ignore_missing: true +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + tag: gsubmac + ignore_missing: true +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + field: observer.ip + tag: foreachip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipelineprocessor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/amqp/ingest/geoip.yml b/x-pack/packetbeat/module/amqp/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/amqp/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/cassandra/ingest/default.yml b/x-pack/packetbeat/module/cassandra/ingest/default.yml new file mode 100644 index 000000000000..61ce5ff4d736 --- /dev/null +++ b/x-pack/packetbeat/module/cassandra/ingest/default.yml @@ -0,0 +1,59 @@ +--- +description: Pipeline for processing cassandra traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsubmac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsubmac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + field: observer.ip + tag: foreachip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipelineprocessor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/cassandra/ingest/geoip.yml b/x-pack/packetbeat/module/cassandra/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/cassandra/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/dhcpv4/ingest/default.yml b/x-pack/packetbeat/module/dhcpv4/ingest/default.yml new file mode 100644 index 000000000000..1c3a2a572644 --- /dev/null +++ b/x-pack/packetbeat/module/dhcpv4/ingest/default.yml @@ -0,0 +1,74 @@ +--- +description: Pipeline for processing dhcpv4 traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: dhcpv4.client_mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_dhcpv4_client_mac +- gsub: + field: dhcpv4.client_mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_dhcpv4_client_mac +- uppercase: + field: dhcpv4.client_mac + ignore_missing: true +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + field: observer.ip + tag: foreach_observer_ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/dhcpv4/ingest/geoip.yml b/x-pack/packetbeat/module/dhcpv4/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/dhcpv4/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/dns/ingest/default.yml b/x-pack/packetbeat/module/dns/ingest/default.yml new file mode 100644 index 000000000000..ff055c3c9b37 --- /dev/null +++ b/x-pack/packetbeat/module/dns/ingest/default.yml @@ -0,0 +1,59 @@ +--- +description: Pipeline for processing dhcpv4 traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + field: observer.ip + tag: foreach_observer_ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/dns/ingest/geoip.yml b/x-pack/packetbeat/module/dns/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/dns/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/flow/ingest/default.yml b/x-pack/packetbeat/module/flow/ingest/default.yml new file mode 100644 index 000000000000..6e969ea1a61e --- /dev/null +++ b/x-pack/packetbeat/module/flow/ingest/default.yml @@ -0,0 +1,89 @@ +--- +description: Pipeline for processing traffic flows +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set {host,source,destination}.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + tag: foreach_observer_ip + field: observer.ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host +- gsub: + field: source.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_source_mac +- gsub: + field: source.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_source_mac +- uppercase: + field: source.mac + ignore_missing: true +- gsub: + field: destination.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_destination_mac +- gsub: + field: destination.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_destination_mac +- uppercase: + field: destination.mac + ignore_missing: true + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/flow/ingest/geoip.yml b/x-pack/packetbeat/module/flow/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/flow/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/http/ingest/default.yml b/x-pack/packetbeat/module/http/ingest/default.yml new file mode 100644 index 000000000000..e066200becb5 --- /dev/null +++ b/x-pack/packetbeat/module/http/ingest/default.yml @@ -0,0 +1,72 @@ +--- +description: Pipeline for processing http traffic +processors: +- set: + field: ecs.version + value: '8.11.0' + +# Detection Rules compatibility +- set: + tag: set_compatibility_request_authorization + field: network_traffic.http.request.headers.authorization + copy_from: http.request.headers.authorization + ignore_empty_value: true +- set: + tag: set_compatibility_response_type + field: http.response.mime_type + copy_from: http.response.headers.content-type + ignore_empty_value: true + +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + tag: foreach_observer_ip + field: observer.ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/http/ingest/geoip.yml b/x-pack/packetbeat/module/http/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/http/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/icmp/ingest/default.yml b/x-pack/packetbeat/module/icmp/ingest/default.yml new file mode 100644 index 000000000000..7a50bb91cc56 --- /dev/null +++ b/x-pack/packetbeat/module/icmp/ingest/default.yml @@ -0,0 +1,66 @@ +--- +description: Pipeline for processing icmp traffic +processors: +- set: + field: ecs.version + value: '8.11.0' + +# Detection Rules compatibility +- set: + tag: set_compatibility_type + field: network.protocol + copy_from: type + +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + tag: foreach_observer_ip + field: observer.ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/icmp/ingest/geoip.yml b/x-pack/packetbeat/module/icmp/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/icmp/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/memcached/ingest/default.yml b/x-pack/packetbeat/module/memcached/ingest/default.yml new file mode 100644 index 000000000000..d0f5f18088c1 --- /dev/null +++ b/x-pack/packetbeat/module/memcached/ingest/default.yml @@ -0,0 +1,79 @@ +--- +description: Pipeline for processing memcached traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + tag: foreach_observer_ip + field: observer.ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +## +# Reformat memcache stats response data as a single object +## +- rename: + field: memcache.response.stats + target_field: memcache.response.stats_objects + ignore_missing: true +- foreach: + description: Build an object for memcache stats response data + if: ctx.memcache?.response?.stats_objects instanceof List + tag: foreach_memcache_response_stats_objects + field: memcache.response.stats_objects + processor: + set: + field: "memcache.response.stats.{{{_ingest._value.name}}}" + value: "{{{_ingest._value.value}}}" +- remove: + field: memcache.response.stats_objects + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/memcached/ingest/geoip.yml b/x-pack/packetbeat/module/memcached/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/memcached/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/mongodb/ingest/default.yml b/x-pack/packetbeat/module/mongodb/ingest/default.yml new file mode 100644 index 000000000000..a40e27da35d7 --- /dev/null +++ b/x-pack/packetbeat/module/mongodb/ingest/default.yml @@ -0,0 +1,59 @@ +--- +description: Pipeline for processing mongodb traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + field: observer.ip + tag: foreach_observer_ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/mongodb/ingest/geoip.yml b/x-pack/packetbeat/module/mongodb/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/mongodb/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/mysql/ingest/default.yml b/x-pack/packetbeat/module/mysql/ingest/default.yml new file mode 100644 index 000000000000..e9cb2ebcdb06 --- /dev/null +++ b/x-pack/packetbeat/module/mysql/ingest/default.yml @@ -0,0 +1,59 @@ +--- +description: Pipeline for processing mysql traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + field: observer.ip + tag: foreach_observer_ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/mysql/ingest/geoip.yml b/x-pack/packetbeat/module/mysql/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/mysql/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/nfs/ingest/default.yml b/x-pack/packetbeat/module/nfs/ingest/default.yml new file mode 100644 index 000000000000..a1b72a252179 --- /dev/null +++ b/x-pack/packetbeat/module/nfs/ingest/default.yml @@ -0,0 +1,59 @@ +--- +description: Pipeline for processing nfs traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + field: observer.ip + tag: foreach_observer_ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/nfs/ingest/geoip.yml b/x-pack/packetbeat/module/nfs/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/nfs/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/pgsql/ingest/default.yml b/x-pack/packetbeat/module/pgsql/ingest/default.yml new file mode 100644 index 000000000000..bd28f9211e1f --- /dev/null +++ b/x-pack/packetbeat/module/pgsql/ingest/default.yml @@ -0,0 +1,59 @@ +--- +description: Pipeline for processing pgsql traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + field: observer.ip + tag: foreach_observer_ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/pgsql/ingest/geoip.yml b/x-pack/packetbeat/module/pgsql/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/pgsql/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/pipeline.go b/x-pack/packetbeat/module/pipeline.go new file mode 100644 index 000000000000..a325fba7de4f --- /dev/null +++ b/x-pack/packetbeat/module/pipeline.go @@ -0,0 +1,20 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package module + +import ( + "embed" + + "github.com/elastic/beats/v7/packetbeat/module" +) + +// pipelineFS holds the yml representation of the ingest node pipelines +// +//go:embed */ingest/*.yml +var pipelinesFS embed.FS + +func init() { + module.PipelinesFS = &pipelinesFS +} diff --git a/x-pack/packetbeat/module/redis/ingest/default.yml b/x-pack/packetbeat/module/redis/ingest/default.yml new file mode 100644 index 000000000000..4f815adc3a90 --- /dev/null +++ b/x-pack/packetbeat/module/redis/ingest/default.yml @@ -0,0 +1,59 @@ +--- +description: Pipeline for processing redis traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + field: observer.ip + tag: foreach_observer_ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/redis/ingest/geoip.yml b/x-pack/packetbeat/module/redis/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/redis/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/routing/ingest/default.yml b/x-pack/packetbeat/module/routing/ingest/default.yml new file mode 100644 index 000000000000..a11b5e79f7ab --- /dev/null +++ b/x-pack/packetbeat/module/routing/ingest/default.yml @@ -0,0 +1,64 @@ +--- +description: Route to appropriate data source pipenline. +processors: + - set: + field: event.ingested + value: '{{_ingest.timestamp}}' + + - pipeline: + if: ctx.type == "amqp" + name: '{< IngestPipeline "amqp" >}' + - pipeline: + if: ctx.type == "cassandra" + name: '{< IngestPipeline "cassandra" >}' + - pipeline: + if: ctx.type == "dhcpv4" + name: '{< IngestPipeline "dhcpv4" >}' + - pipeline: + if: ctx.type == "dns" + name: '{< IngestPipeline "dns" >}' + - pipeline: + if: ctx.type == "flow" + name: '{< IngestPipeline "flow" >}' + - pipeline: + if: ctx.type == "http" + name: '{< IngestPipeline "http" >}' + - pipeline: + if: ctx.type == "icmp" + name: '{< IngestPipeline "icmp" >}' + - pipeline: + if: ctx.type == "memcache" + name: '{< IngestPipeline "memcached" >}' + - pipeline: + if: ctx.type == "mongodb" + name: '{< IngestPipeline "mongodb" >}' + - pipeline: + if: ctx.type == "mysql" + name: '{< IngestPipeline "mysql" >}' + - pipeline: + if: ctx.type == "nfs" + name: '{< IngestPipeline "nfs" >}' + - pipeline: + if: ctx.type == "pgsql" + name: '{< IngestPipeline "pgsql" >}' + - pipeline: + if: ctx.type == "redis" + name: '{< IngestPipeline "redis" >}' + - pipeline: + if: ctx.type == "sip" + name: '{< IngestPipeline "sip" >}' + - pipeline: + if: ctx.type == "thrift" + name: '{< IngestPipeline "thrift" >}' + - pipeline: + if: ctx.type == "tls" + name: '{< IngestPipeline "tls" >}' + +on_failure: + - set: + field: event.kind + value: pipeline_error + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" diff --git a/x-pack/packetbeat/module/sip/ingest/default.yml b/x-pack/packetbeat/module/sip/ingest/default.yml new file mode 100644 index 000000000000..62f3d6c1c424 --- /dev/null +++ b/x-pack/packetbeat/module/sip/ingest/default.yml @@ -0,0 +1,59 @@ +--- +description: Pipeline for processing sip traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + field: observer.ip + tag: foreach_observer_ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/sip/ingest/geoip.yml b/x-pack/packetbeat/module/sip/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/sip/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/thrift/ingest/default.yml b/x-pack/packetbeat/module/thrift/ingest/default.yml new file mode 100644 index 000000000000..f2726cea96b6 --- /dev/null +++ b/x-pack/packetbeat/module/thrift/ingest/default.yml @@ -0,0 +1,59 @@ +--- +description: Pipeline for processing thrift traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + field: observer.ip + tag: foreach_observer_ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/thrift/ingest/geoip.yml b/x-pack/packetbeat/module/thrift/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/thrift/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/tls/ingest/default.yml b/x-pack/packetbeat/module/tls/ingest/default.yml new file mode 100644 index 000000000000..94ef3b55d224 --- /dev/null +++ b/x-pack/packetbeat/module/tls/ingest/default.yml @@ -0,0 +1,99 @@ +--- +description: Pipeline for processing tls traffic +processors: +- set: + field: ecs.version + value: '8.11.0' +## +# Set host.mac to dash separated upper case value +# as per ECS recommendation +## +- gsub: + field: host.mac + pattern: '[-:.]' + replacement: '' + ignore_missing: true + tag: gsub_host_mac +- gsub: + field: host.mac + pattern: '(..)(?!$)' + replacement: '$1-' + ignore_missing: true + tag: gsub_host_mac +- uppercase: + field: host.mac + ignore_missing: true +- append: + field: related.hosts + value: "{{{observer.hostname}}}" + if: ctx.observer?.hostname != null && ctx.observer?.hostname != '' + allow_duplicates: false +- foreach: + if: ctx.observer?.ip != null && ctx.observer.ip instanceof List + field: observer.ip + tag: foreach_observer_ip + processor: + append: + field: related.ip + value: '{{{_ingest._value}}}' + allow_duplicates: false +- remove: + if: ctx.host != null && ctx.tags != null && ctx.tags.contains('forwarded') + field: host + +- pipeline: + if: ctx._conf?.geoip_enrich != null && ctx._conf.geoip_enrich + name: '{{ IngestPipeline "geoip" }}' + tag: pipeline_processor +- remove: + field: _conf + ignore_missing: true + +## +# Make tls.{client,server}.x509.version_number a string as per ECS. +## +- convert: + field: tls.client.x509.version_number + type: string + ignore_missing: true + tag: convert_tls_client_x509_version_number +- convert: + field: tls.server.x509.version_number + type: string + ignore_missing: true + tag: convert_tls_server_x509_version_number + +## +# This handles legacy TLS fields from Packetbeat 7.17. +## +- remove: + description: Remove legacy fields from Packetbeat 7.17 that are duplicated. + field: + - tls.client.x509.issuer.province # Duplicated as tls.client.x509.issuer.state_or_province. + - tls.client.x509.subject.province # Duplicated as tls.client.x509.subject.state_or_province. + - tls.client.x509.version # Duplicated as tls.client.x509.version_number. + - tls.detailed.client_certificate # Duplicated as tls.client.x509. + - tls.detailed.server_certificate # Duplicated as tls.server.x509. + - tls.server.x509.issuer.province # Duplicated as tls.server.x509.issuer.state_or_province. + - tls.server.x509.subject.province # Duplicated as tls.server.x509.subject.state_or_province. + - tls.server.x509.version # Duplicated as tls.server.x509.version_number. + ignore_missing: true + +- append: + field: related.hash + value: "{{tls.server.ja3s}}" + if: "ctx?.tls?.server?.ja3s != null" +- append: + field: related.hash + value: "{{tls.client.ja3}}" + if: "ctx?.tls?.client?.ja3 != null" + allow_duplicates: false + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/module/tls/ingest/geoip.yml b/x-pack/packetbeat/module/tls/ingest/geoip.yml new file mode 100644 index 000000000000..eb88d38caf0c --- /dev/null +++ b/x-pack/packetbeat/module/tls/ingest/geoip.yml @@ -0,0 +1,103 @@ +--- +description: GeoIP enrichment. +processors: + - geoip: + field: source.ip + target_field: source.geo + ignore_missing: true + tag: source_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: source.ip + target_field: source.as + properties: + - asn + - organization_name + ignore_missing: true + tag: source_geo + - rename: + field: source.as.asn + target_field: source.as.number + ignore_missing: true + - rename: + field: source.as.organization_name + target_field: source.as.organization.name + ignore_missing: true + + - geoip: + field: destination.ip + target_field: destination.geo + ignore_missing: true + tag: destination_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: destination.ip + target_field: destination.as + properties: + - asn + - organization_name + ignore_missing: true + tag: destination_geo + - rename: + field: destination.as.asn + target_field: destination.as.number + ignore_missing: true + - rename: + field: destination.as.organization_name + target_field: destination.as.organization.name + ignore_missing: true + + - geoip: + field: server.ip + target_field: server.geo + ignore_missing: true + tag: server_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: server.ip + target_field: server.as + properties: + - asn + - organization_name + ignore_missing: true + tag: server_geo + - rename: + field: server.as.asn + target_field: server.as.number + ignore_missing: true + - rename: + field: server.as.organization_name + target_field: server.as.organization.name + ignore_missing: true + + - geoip: + field: client.ip + target_field: client.geo + ignore_missing: true + tag: client_geo + - geoip: + database_file: GeoLite2-ASN.mmdb + field: client.ip + target_field: client.as + properties: + - asn + - organization_name + ignore_missing: true + tag: client_geo + - rename: + field: client.as.asn + target_field: client.as.number + ignore_missing: true + - rename: + field: client.as.organization_name + target_field: client.as.organization.name + ignore_missing: true + +on_failure: + - append: + field: error.message + value: |- + Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + - set: + field: event.kind + value: pipeline_error diff --git a/x-pack/packetbeat/packetbeat.reference.yml b/x-pack/packetbeat/packetbeat.reference.yml index 1e013fb081f5..c9dac77048ad 100644 --- a/x-pack/packetbeat/packetbeat.reference.yml +++ b/x-pack/packetbeat/packetbeat.reference.yml @@ -78,6 +78,11 @@ packetbeat.interfaces.internal_networks: # can stay enabled even after beat is shut down. #packetbeat.interfaces.auto_promisc_mode: true +# By default Ingest pipelines are not updated if a pipeline with the same ID +# already exists. If this option is enabled Packetbeat overwrites pipelines +# every time a new Elasticsearch connection is established. +#packetbeat.overwrite_pipelines: false + # =================================== Flows ==================================== packetbeat.flows: diff --git a/x-pack/packetbeat/packetbeat.yml b/x-pack/packetbeat/packetbeat.yml index fea1a2fb1153..d78fb6a7ccd5 100644 --- a/x-pack/packetbeat/packetbeat.yml +++ b/x-pack/packetbeat/packetbeat.yml @@ -213,10 +213,6 @@ output.elasticsearch: # Array of hosts to connect to. hosts: ["localhost:9200"] - # Performance preset - one of "balanced", "throughput", "scale", - # "latency", or "custom". - preset: balanced - # Protocol - either `http` (default) or `https`. #protocol: "https" @@ -225,6 +221,9 @@ output.elasticsearch: #username: "elastic" #password: "changeme" + # Pipeline to route events to protocol pipelines. + pipeline: "packetbeat-%{[agent.version]}-routing" + # ------------------------------ Logstash Output ------------------------------- #output.logstash: # The Logstash hosts From 2840dac635f493d1cd0ac51b390ec32affa654ec Mon Sep 17 00:00:00 2001 From: Pavel Zorin Date: Wed, 31 Jan 2024 13:54:20 +0000 Subject: [PATCH 093/129] Move Elastic agent pipeline config to main (#37801) * Moved 7.17 elstic agent pipeline configuration to main branch * Fixed indentations * Fixed indentations --- .buildkite/pull-requests.json | 16 +++++++ .../pipeline.xpack.elastic-agent.yml | 6 +++ catalog-info.yaml | 42 +++++++++++++++++++ 3 files changed, 64 insertions(+) create mode 100644 .buildkite/xpack/elastic-agent/pipeline.xpack.elastic-agent.yml diff --git a/.buildkite/pull-requests.json b/.buildkite/pull-requests.json index cc8ff9ab7a52..20870794752a 100644 --- a/.buildkite/pull-requests.json +++ b/.buildkite/pull-requests.json @@ -127,6 +127,22 @@ "skip_target_branches": [ ], "skip_ci_on_only_changed": [ ], "always_require_ci_on_changed": ["^packetbeat/.*", ".buildkite/packetbeat/.*", "^go.mod", "^pytest.ini", "^dev-tools/.*", "^libbeat/.*", "^testing/.*"] + }, + { + "enabled": true, + "pipelineSlug": "xpack-elastic-agent", + "allow_org_users": true, + "allowed_repo_permissions": ["admin", "write"], + "allowed_list": [ ], + "set_commit_status": true, + "build_on_commit": true, + "build_on_comment": true, + "trigger_comment_regex": "^/test elastic-agent$", + "always_trigger_comment_regex": "^/test elastic-agent$", + "skip_ci_labels": [ ], + "skip_target_branches": [ ], + "skip_ci_on_only_changed": ["^xpack/elastic-agent/README.md", "^xpack/elastic-agent/docs/.*", "^xpack/elastic-agent/devtools/.*" ], + "always_require_ci_on_changed": ["^xpack/elastic-agent/.*", ".buildkite/xpack/elastic-agent/.*", "^go.mod", "^pytest.ini", "^dev-tools/.*", "^libbeat/.*", "^testing/.*"] } ] } diff --git a/.buildkite/xpack/elastic-agent/pipeline.xpack.elastic-agent.yml b/.buildkite/xpack/elastic-agent/pipeline.xpack.elastic-agent.yml new file mode 100644 index 000000000000..58d61a367a4a --- /dev/null +++ b/.buildkite/xpack/elastic-agent/pipeline.xpack.elastic-agent.yml @@ -0,0 +1,6 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json + +# This pipeline is only for 7.17 branch. See catalog-info.yml +steps: + - label: "Example test" + command: echo "Hello!" diff --git a/catalog-info.yaml b/catalog-info.yaml index 4d6c956f1f3f..c5679e1215ce 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -377,3 +377,45 @@ spec: access_level: MANAGE_BUILD_AND_READ everyone: access_level: READ_ONLY + +--- +# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json +apiVersion: backstage.io/v1alpha1 +kind: Resource +metadata: + name: buildkite-pipeline-beats-xpack-elastic-agent + description: "Beats xpack elastic agent" + links: + - title: Pipeline + url: https://buildkite.com/elastic/beats-xpack-elastic-agent + +spec: + type: buildkite-pipeline + owner: group:ingest-fp + system: buildkite + implementation: + apiVersion: buildkite.elastic.dev/v1 + kind: Pipeline + metadata: + name: beats-xpack-elastic-agent + description: "Beats xpack elastic agent pipeline" + spec: + branch_configuration: "7.17" + pipeline_file: ".buildkite/xpack/elastic-agent/pipeline.xpack.elastic-agent.yml" + provider_settings: + build_pull_request_forks: false + build_pull_requests: true # requires filter_enabled and filter_condition settings as below when used with buildkite-pr-bot + build_tags: true + filter_enabled: true + filter_condition: >- + build.pull_request.id == null || (build.creator.name == 'elasticmachine' && build.pull_request.id != null) + repository: elastic/beats + cancel_intermediate_builds: true + cancel_intermediate_builds_branch_filter: "!main !7.17 !8.*" + skip_intermediate_builds: true + skip_intermediate_builds_branch_filter: "!main !7.17 !8.*" + teams: + ingest-fp: + access_level: MANAGE_BUILD_AND_READ + everyone: + access_level: READ_ONLY \ No newline at end of file From 5d1c59247f7b49606b49b43aac694780901bb338 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emilio=20Alvarez=20Pi=C3=B1eiro?= <95703246+emilioalvap@users.noreply.github.com> Date: Wed, 31 Jan 2024 15:41:02 +0100 Subject: [PATCH 094/129] [Heartbeat] Remove containerized check from setuid logic (#37794) Removed isContainerized from setuid check, as it fails to detect containers running under cgroups v2 and prevents switching users when running as root. --- CHANGELOG.next.asciidoc | 1 + heartbeat/security/security.go | 10 +--------- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 5ba27260c3b7..405c5b6cb472 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -81,6 +81,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Heartbeat* - Fix panics when parsing dereferencing invalid parsed url. {pull}34702[34702] +- Fix setuid root when running under cgroups v2. {pull}37794[37794] *Metricbeat* diff --git a/heartbeat/security/security.go b/heartbeat/security/security.go index 20c0f5cc7d68..8e15102f7b8d 100644 --- a/heartbeat/security/security.go +++ b/heartbeat/security/security.go @@ -26,8 +26,6 @@ import ( "strconv" "syscall" - sysinfo "github.com/elastic/go-sysinfo" - "kernel.org/pub/linux/libs/security/libcap/cap" ) @@ -36,13 +34,7 @@ func init() { // In the context of a container, where users frequently run as root, we follow BEAT_SETUID_AS to setuid/gid // and add capabilities to make this actually run as a regular user. This also helps Node.js in synthetics, which // does not want to run as root. It's also just generally more secure. - sysInfo, err := sysinfo.Host() - isContainer := false - if err == nil && sysInfo.Info().Containerized != nil { - isContainer = *sysInfo.Info().Containerized - } - - if localUserName := os.Getenv("BEAT_SETUID_AS"); isContainer && localUserName != "" && syscall.Geteuid() == 0 { + if localUserName := os.Getenv("BEAT_SETUID_AS"); localUserName != "" && syscall.Geteuid() == 0 { err := setNodeProcAttr(localUserName) if err != nil { panic(err) From a6e5b04fada1d13e430206000b0f3c1f5ee39ce6 Mon Sep 17 00:00:00 2001 From: Andrew Kroh Date: Wed, 31 Jan 2024 12:06:20 -0500 Subject: [PATCH 095/129] aws - add credential caching for aws assume role sessions (#37788) Add caching so that AWS `AssumeRole` session credentials are not requested for every single request. Sessions are valid for 15m by default but without caching that does not matter. This will speed up requests for users of `role_arn` by removing the overhead of most STS (session token service) calls and stop users from hitting rate-limiting issues with the STS. Fixes #37787 --- CHANGELOG.next.asciidoc | 1 + x-pack/libbeat/common/aws/credentials.go | 17 ++++++++++++++++- .../docs/aws-credentials-config.asciidoc | 3 +++ 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 405c5b6cb472..30d7bc46755d 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -56,6 +56,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Support build of projects outside of beats directory {pull}36126[36126] - Support Elastic Agent control protocol chunking support {pull}37343[37343] - Upgrade elastic-agent-libs to v0.7.5. Removes obsolete "Treating the CommonName field on X.509 certificates as a host name..." deprecation warning for 8.0. {pull}37755[37755] +- aws: Add credential caching for `AssumeRole` session tokens. {issue}37787[37787] *Auditbeat* diff --git a/x-pack/libbeat/common/aws/credentials.go b/x-pack/libbeat/common/aws/credentials.go index 84e88d10422b..f6efde3e2b20 100644 --- a/x-pack/libbeat/common/aws/credentials.go +++ b/x-pack/libbeat/common/aws/credentials.go @@ -10,6 +10,7 @@ import ( "fmt" "net/http" "net/url" + "time" "github.com/aws/aws-sdk-go-v2/service/sts" @@ -44,6 +45,13 @@ type ConfigAWS struct { FIPSEnabled bool `config:"fips_enabled"` TLS *tlscommon.Config `config:"ssl" yaml:"ssl,omitempty" json:"ssl,omitempty"` DefaultRegion string `config:"default_region"` + + // The duration of the role session. Defaults to 15m when not set. + AssumeRoleDuration time.Duration `config:"assume_role.duration"` + + // AssumeRoleExpiryWindow will allow the credentials to trigger refreshing prior to the credentials + // actually expiring. If expiry_window is less than or equal to zero, the setting is ignored. + AssumeRoleExpiryWindow time.Duration `config:"assume_role.expiry_window"` } // InitializeAWSConfig function creates the awssdk.Config object from the provided config @@ -154,8 +162,15 @@ func addAssumeRoleProviderToAwsConfig(config ConfigAWS, awsConfig *awssdk.Config if config.ExternalID != "" { aro.ExternalID = awssdk.String(config.ExternalID) } + if config.AssumeRoleDuration > 0 { + aro.Duration = config.AssumeRoleDuration + } + }) + awsConfig.Credentials = awssdk.NewCredentialsCache(stsCredProvider, func(options *awssdk.CredentialsCacheOptions) { + if config.AssumeRoleExpiryWindow > 0 { + options.ExpiryWindow = config.AssumeRoleExpiryWindow + } }) - awsConfig.Credentials = stsCredProvider } // addStaticCredentialsProviderToAwsConfig adds a static credentials provider to the current AWS config by using the keys stored in Beats config diff --git a/x-pack/libbeat/docs/aws-credentials-config.asciidoc b/x-pack/libbeat/docs/aws-credentials-config.asciidoc index 172142d1aa82..423e241f8963 100644 --- a/x-pack/libbeat/docs/aws-credentials-config.asciidoc +++ b/x-pack/libbeat/docs/aws-credentials-config.asciidoc @@ -15,6 +15,9 @@ To configure AWS credentials, either put the credentials into the {beatname_uc} * *fips_enabled*: Enabling this option instructs {beatname_uc} to use the FIPS endpoint of a service. All services used by {beatname_uc} are FIPS compatible except for `tagging` but only certain regions are FIPS compatible. See https://aws.amazon.com/compliance/fips/ or the appropriate service page, https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html, for a full list of FIPS endpoints and regions. * *ssl*: This specifies SSL/TLS configuration. If the ssl section is missing, the host's CAs are used for HTTPS connections. See <> for more information. * *default_region*: Default region to query if no other region is set. Most AWS services offer a regional endpoint that can be used to make requests. Some services, such as IAM, do not support regions. If a region is not provided by any other way (environment variable, credential or instance profile), the value set here will be used. +* *assume_role.duration*: The duration of the requested assume role session. Defaults to 15m when not set. AWS allows a maximum session duration between 1h and 12h depending on your maximum session duration policies. +* *assume_role.expiry_window*: The expiry_window will allow refreshing the session prior to its expiration. + This is beneficial to prevent expiring tokens from causing requests to fail with an ExpiredTokenException. [float] ==== Supported Formats From 11f298c09be268bc590a4018c2433ad93ac7c2c1 Mon Sep 17 00:00:00 2001 From: Dan Kortschak <90160302+efd6@users.noreply.github.com> Date: Thu, 1 Feb 2024 09:33:43 +1030 Subject: [PATCH 096/129] mod: update github.com/lestrrat-go/jwx version (#37799) Addresses CVE-2023-49290 and CVE-2024-21664 risk. --- CHANGELOG.next.asciidoc | 1 + NOTICE.txt | 8 ++++---- go.mod | 4 ++-- go.sum | 16 ++++------------ 4 files changed, 11 insertions(+), 18 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 30d7bc46755d..c18010c5b86f 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -78,6 +78,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Added a fix for Crowdstrike pipeline handling process arrays {pull}36496[36496] - Fix m365_defender cursor value and query building. {pull}37116[37116] - Fix TCP/UDP metric queue length parsing base. {pull}37714[37714] +- Update github.com/lestrrat-go/jwx dependency. {pull}37799[37799] *Heartbeat* diff --git a/NOTICE.txt b/NOTICE.txt index c803ff33e8ea..7e0e27d091c7 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -20363,11 +20363,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/lestrrat-go/jwx/v2 -Version: v2.0.11 +Version: v2.0.19 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/lestrrat-go/jwx/v2@v2.0.11/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/lestrrat-go/jwx/v2@v2.0.19/LICENSE: The MIT License (MIT) @@ -45913,11 +45913,11 @@ Contents of probable licence file $GOMODCACHE/github.com/kylelemons/godebug@v1.1 -------------------------------------------------------------------------------- Dependency : github.com/lestrrat-go/blackmagic -Version: v1.0.1 +Version: v1.0.2 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/lestrrat-go/blackmagic@v1.0.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/lestrrat-go/blackmagic@v1.0.2/LICENSE: MIT License diff --git a/go.mod b/go.mod index a7044889fac4..e39b37f445eb 100644 --- a/go.mod +++ b/go.mod @@ -213,7 +213,7 @@ require ( github.com/gorilla/handlers v1.5.1 github.com/gorilla/mux v1.8.0 github.com/icholy/digest v0.1.22 - github.com/lestrrat-go/jwx/v2 v2.0.11 + github.com/lestrrat-go/jwx/v2 v2.0.19 github.com/otiai10/copy v1.12.0 github.com/pierrec/lz4/v4 v4.1.18 github.com/pkg/xattr v0.4.9 @@ -321,7 +321,7 @@ require ( github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/kortschak/utter v1.5.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/lestrrat-go/blackmagic v1.0.1 // indirect + github.com/lestrrat-go/blackmagic v1.0.2 // indirect github.com/lestrrat-go/httpcc v1.0.1 // indirect github.com/lestrrat-go/httprc v1.0.4 // indirect github.com/lestrrat-go/iter v1.0.2 // indirect diff --git a/go.sum b/go.sum index 79feea755704..e11edad3d976 100644 --- a/go.sum +++ b/go.sum @@ -572,7 +572,6 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892 h1:qg9VbHo1TlL0KDM0vYvBG9EY0X0Yku5WYIPoFWt8f6o= github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE= -github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= @@ -1351,17 +1350,16 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lestrrat-go/blackmagic v1.0.1 h1:lS5Zts+5HIC/8og6cGHb0uCcNCa3OUt1ygh3Qz2Fe80= -github.com/lestrrat-go/blackmagic v1.0.1/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU= +github.com/lestrrat-go/blackmagic v1.0.2 h1:Cg2gVSc9h7sz9NOByczrbUvLopQmXrfFx//N+AkAr5k= +github.com/lestrrat-go/blackmagic v1.0.2/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU= github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE= github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= github.com/lestrrat-go/httprc v1.0.4 h1:bAZymwoZQb+Oq8MEbyipag7iSq6YIga8Wj6GOiJGdI8= github.com/lestrrat-go/httprc v1.0.4/go.mod h1:mwwz3JMTPBjHUkkDv/IGJ39aALInZLrhBp0X7KGUZlo= github.com/lestrrat-go/iter v1.0.2 h1:gMXo1q4c2pHmC3dn8LzRhJfP1ceCbgSiT9lUydIzltI= github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4= -github.com/lestrrat-go/jwx/v2 v2.0.11 h1:ViHMnaMeaO0qV16RZWBHM7GTrAnX2aFLVKofc7FuKLQ= -github.com/lestrrat-go/jwx/v2 v2.0.11/go.mod h1:ZtPtMFlrfDrH2Y0iwfa3dRFn8VzwBrB+cyrm3IBWdDg= -github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= +github.com/lestrrat-go/jwx/v2 v2.0.19 h1:ekv1qEZE6BVct89QA+pRF6+4pCpfVrOnEJnTnT4RXoY= +github.com/lestrrat-go/jwx/v2 v2.0.19/go.mod h1:l3im3coce1lL2cDeAjqmaR+Awx+X8Ih+2k8BuHNJ4CU= github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU= github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -2043,7 +2041,6 @@ golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2186,7 +2183,6 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2346,7 +2342,6 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2354,7 +2349,6 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= @@ -2365,7 +2359,6 @@ golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuX golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2381,7 +2374,6 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 31f564529c6d3575744a801d94945e6d7db77e39 Mon Sep 17 00:00:00 2001 From: Dan Kortschak <90160302+efd6@users.noreply.github.com> Date: Thu, 1 Feb 2024 09:35:44 +1030 Subject: [PATCH 097/129] x-pack/filebeat/docs/input: advise to use CEL over HTTPJSON for new projects (#37797) --- x-pack/filebeat/docs/inputs/input-httpjson.asciidoc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc b/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc index 410edf9f9485..baa5767b91d3 100644 --- a/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc @@ -11,6 +11,8 @@ Use the `httpjson` input to read messages from an HTTP API with JSON payloads. +If you are starting development of a new custom HTTP API input, we recommend that you use the <> which provides greater flexibility and an improved developer experience. + This input supports: * Auth From 4ad9bb81e4b8e00e71ae5cacc39fe1b8c4b14bf6 Mon Sep 17 00:00:00 2001 From: Pavel Zorin Date: Wed, 31 Jan 2024 23:39:40 +0000 Subject: [PATCH 098/129] Fixed elastic-agent buildkite pipeline slug (#37806) --- .buildkite/pull-requests.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/pull-requests.json b/.buildkite/pull-requests.json index 20870794752a..c1cfa299d079 100644 --- a/.buildkite/pull-requests.json +++ b/.buildkite/pull-requests.json @@ -130,7 +130,7 @@ }, { "enabled": true, - "pipelineSlug": "xpack-elastic-agent", + "pipelineSlug": "beats-xpack-elastic-agent", "allow_org_users": true, "allowed_repo_permissions": ["admin", "write"], "allowed_list": [ ], From 4cd62146346a9479c6cb0d8129615adf699da734 Mon Sep 17 00:00:00 2001 From: Dan Kortschak <90160302+efd6@users.noreply.github.com> Date: Thu, 1 Feb 2024 11:51:52 +1030 Subject: [PATCH 099/129] x-pack/filebeat/input/httpjson: add support for pem encoded keys (#37772) This adds a new Okta auth field, jwk_pem, that allows users to specify a PEM-encoded private key for authentication. Also refactor the JSON-based code to simplify and add minimal testing. --- CHANGELOG.next.asciidoc | 1 + .../docs/inputs/input-httpjson.asciidoc | 6 +- x-pack/filebeat/input/httpjson/config_auth.go | 24 +++- .../input/httpjson/config_okta_auth.go | 127 ++++++++++-------- .../input/httpjson/config_okta_auth_test.go | 88 ++++++++++++ x-pack/filebeat/input/httpjson/config_test.go | 2 +- 6 files changed, 191 insertions(+), 57 deletions(-) create mode 100644 x-pack/filebeat/input/httpjson/config_okta_auth_test.go diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index c18010c5b86f..579890f20293 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -177,6 +177,7 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d - Update CEL extensions library to v1.8.0 to provide runtime error location reporting. {issue}37304[37304] {pull}37718[37718] - Add request trace logging for chained API requests. {issue}37551[36551] {pull}37682[37682] - Relax TCP/UDP metric polling expectations to improve metric collection. {pull}37714[37714] +- Add support for PEM-based Okta auth in HTTPJSON. {pull}37772[37772] *Auditbeat* diff --git a/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc b/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc index baa5767b91d3..cc3594780e4c 100644 --- a/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc @@ -401,8 +401,12 @@ NOTE: Only one of the credentials settings can be set at once. For more informat The RSA JWK Private Key JSON for your Okta Service App which is used for interacting with Okta Org Auth Server to mint tokens with okta.* scopes. -NOTE: Only one of the credentials settings can be set at once. For more information please refer to https://developer.okta.com/docs/guides/implement-oauth-for-okta-serviceapp/main/ +[float] +==== `auth.oauth2.okta.jwk_pem` +The RSA JWK private key PEM block for your Okta Service App which is used for interacting with Okta Org Auth Server to mint tokens with okta.* scopes. + +NOTE: Only one of the credentials settings can be set at once. For more information please refer to https://developer.okta.com/docs/guides/implement-oauth-for-okta-serviceapp/main/ [float] ==== `auth.oauth2.google.delegated_account` diff --git a/x-pack/filebeat/input/httpjson/config_auth.go b/x-pack/filebeat/input/httpjson/config_auth.go index 948948037770..d05592dfa500 100644 --- a/x-pack/filebeat/input/httpjson/config_auth.go +++ b/x-pack/filebeat/input/httpjson/config_auth.go @@ -6,6 +6,7 @@ package httpjson import ( "context" + "crypto/x509" "encoding/json" "errors" "fmt" @@ -104,6 +105,7 @@ type oAuth2Config struct { // okta specific RSA JWK private key OktaJWKFile string `config:"okta.jwk_file"` OktaJWKJSON common.JSONBlob `config:"okta.jwk_json"` + OktaJWKPEM string `config:"okta.jwk_pem"` } // IsEnabled returns true if the `enable` field is set to true in the yaml. @@ -289,8 +291,26 @@ func (o *oAuth2Config) validateGoogleProvider() error { } func (o *oAuth2Config) validateOktaProvider() error { - if o.TokenURL == "" || o.ClientID == "" || len(o.Scopes) == 0 || (o.OktaJWKJSON == nil && o.OktaJWKFile == "") { - return errors.New("okta validation error: token_url, client_id, scopes and at least one of okta.jwk_json or okta.jwk_file must be provided") + if o.TokenURL == "" || o.ClientID == "" || len(o.Scopes) == 0 { + return errors.New("okta validation error: token_url, client_id, scopes must be provided") + } + var n int + if o.OktaJWKJSON != nil { + n++ + } + if o.OktaJWKFile != "" { + n++ + } + if o.OktaJWKPEM != "" { + n++ + } + if n != 1 { + return errors.New("okta validation error: one of okta.jwk_json, okta.jwk_file or okta.jwk_pem must be provided") + } + // jwk_pem + if o.OktaJWKPEM != "" { + _, err := x509.ParsePKCS1PrivateKey([]byte(o.OktaJWKPEM)) + return err } // jwk_file if o.OktaJWKFile != "" { diff --git a/x-pack/filebeat/input/httpjson/config_okta_auth.go b/x-pack/filebeat/input/httpjson/config_okta_auth.go index 8bf2995d746a..c2b4289d9c91 100644 --- a/x-pack/filebeat/input/httpjson/config_okta_auth.go +++ b/x-pack/filebeat/input/httpjson/config_okta_auth.go @@ -5,10 +5,13 @@ package httpjson import ( + "bytes" "context" "crypto/rsa" + "crypto/x509" "encoding/base64" "encoding/json" + "encoding/pem" "fmt" "math/big" "net/http" @@ -43,9 +46,20 @@ func (o *oAuth2Config) fetchOktaOauthClient(ctx context.Context, _ *http.Client) }, } - oktaJWT, err := generateOktaJWT(o.OktaJWKJSON, conf) - if err != nil { - return nil, fmt.Errorf("oauth2 client: error generating Okta JWT: %w", err) + var ( + oktaJWT string + err error + ) + if len(o.OktaJWKPEM) != 0 { + oktaJWT, err = generateOktaJWTPEM(o.OktaJWKPEM, conf) + if err != nil { + return nil, fmt.Errorf("oauth2 client: error generating Okta JWT PEM: %w", err) + } + } else { + oktaJWT, err = generateOktaJWT(o.OktaJWKJSON, conf) + if err != nil { + return nil, fmt.Errorf("oauth2 client: error generating Okta JWT: %w", err) + } } token, err := exchangeForBearerToken(ctx, oktaJWT, conf) @@ -85,70 +99,78 @@ func (ts *oktaTokenSource) Token() (*oauth2.Token, error) { } func generateOktaJWT(oktaJWK []byte, cnf *oauth2.Config) (string, error) { - // unmarshal the JWK into a map - var jwkData map[string]string + // Unmarshal the JWK into big ints. + var jwkData struct { + N base64int `json:"n"` + E base64int `json:"e"` + D base64int `json:"d"` + P base64int `json:"p"` + Q base64int `json:"q"` + Dp base64int `json:"dp"` + Dq base64int `json:"dq"` + Qinv base64int `json:"qi"` + } err := json.Unmarshal(oktaJWK, &jwkData) if err != nil { return "", fmt.Errorf("error decoding JWK: %w", err) } - // create an RSA private key from JWK components - decodeBase64 := func(key string) (*big.Int, error) { - data, err := base64.RawURLEncoding.DecodeString(jwkData[key]) - if err != nil { - return nil, fmt.Errorf("error decoding RSA JWK component %s: %w", key, err) - } - return new(big.Int).SetBytes(data), nil + // Create an RSA private key from JWK components. + key := &rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + N: &jwkData.N.Int, + E: int(jwkData.E.Int64()), + }, + D: &jwkData.D.Int, + Primes: []*big.Int{&jwkData.P.Int, &jwkData.Q.Int}, + Precomputed: rsa.PrecomputedValues{ + Dp: &jwkData.Dp.Int, + Dq: &jwkData.Dq.Int, + Qinv: &jwkData.Qinv.Int, + }, } - n, err := decodeBase64("n") - if err != nil { - return "", err - } - e, err := decodeBase64("e") - if err != nil { - return "", err - } - d, err := decodeBase64("d") - if err != nil { - return "", err - } - p, err := decodeBase64("p") - if err != nil { - return "", err + return signJWT(cnf, key) + +} + +// base64int is a JSON decoding shim for base64-encoded big.Int. +type base64int struct { + big.Int +} + +func (i *base64int) UnmarshalJSON(b []byte) error { + src, ok := bytes.CutPrefix(b, []byte{'"'}) + if !ok { + return fmt.Errorf("invalid JSON type: %s", b) } - q, err := decodeBase64("q") - if err != nil { - return "", err + src, ok = bytes.CutSuffix(src, []byte{'"'}) + if !ok { + return fmt.Errorf("invalid JSON type: %s", b) } - dp, err := decodeBase64("dp") + dst := make([]byte, base64.RawURLEncoding.DecodedLen(len(src))) + _, err := base64.RawURLEncoding.Decode(dst, src) if err != nil { - return "", err + return err } - dq, err := decodeBase64("dq") - if err != nil { - return "", err + i.SetBytes(dst) + return nil +} + +func generateOktaJWTPEM(pemdata string, cnf *oauth2.Config) (string, error) { + blk, rest := pem.Decode([]byte(pemdata)) + if rest := bytes.TrimSpace(rest); len(rest) != 0 { + return "", fmt.Errorf("PEM text has trailing data: %s", rest) } - qi, err := decodeBase64("qi") + key, err := x509.ParsePKCS8PrivateKey(blk.Bytes) if err != nil { return "", err } + return signJWT(cnf, key) +} - privateKeyRSA := &rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - N: n, - E: int(e.Int64()), - }, - D: d, - Primes: []*big.Int{p, q}, - Precomputed: rsa.PrecomputedValues{ - Dp: dp, - Dq: dq, - Qinv: qi, - }, - } - - // create a JWT token using required claims and sign it with the private key +// signJWT creates a JWT token using required claims and sign it with the private key. +func signJWT(cnf *oauth2.Config, key any) (string, error) { now := time.Now() tok, err := jwt.NewBuilder().Audience([]string{cnf.Endpoint.TokenURL}). Issuer(cnf.ClientID). @@ -159,11 +181,10 @@ func generateOktaJWT(oktaJWK []byte, cnf *oauth2.Config) (string, error) { if err != nil { return "", err } - signedToken, err := jwt.Sign(tok, jwt.WithKey(jwa.RS256, privateKeyRSA)) + signedToken, err := jwt.Sign(tok, jwt.WithKey(jwa.RS256, key)) if err != nil { return "", fmt.Errorf("failed to sign token: %w", err) } - return string(signedToken), nil } diff --git a/x-pack/filebeat/input/httpjson/config_okta_auth_test.go b/x-pack/filebeat/input/httpjson/config_okta_auth_test.go new file mode 100644 index 000000000000..2f686af04373 --- /dev/null +++ b/x-pack/filebeat/input/httpjson/config_okta_auth_test.go @@ -0,0 +1,88 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package httpjson + +import ( + "testing" + + "github.com/lestrrat-go/jwx/v2/jwt" + "golang.org/x/oauth2" +) + +func TestGenerateOktaJWT(t *testing.T) { + // jwt is a JWT obtained from the Okta integration. + const jwtText = `{ "d": "Cmhokw2MnZfX6da36nnsnQ7IPX9vE6se8_D1NgyL9j9rarYpexhlp45hswcAIFNgWA03NV848Gc0e84AW6wMbyD2E8LPI0Bd8lhdmzRE6L4or2Rxqqjk2Pr2aqGnqs4A0uTijAA7MfPF1zFFdR3EOVx499fEeTiMcLjO83IJCoNiOySDoQgt3KofX5bCbaDy2eiB83rzf0fEcWrWfTY65_Hc2c5lek-1uuF7NpELVzX80p5H-b9MOfLn0BdOGe-mJ2j5bXi-UCQ45Wxj2jdkoA_Qwb4MEtXZjp5LjcM75SrlGfVd99acML2wGZgYLGweJ0sAPDlKzGvj4ve-JT8nNw", "p": "8-UBb4psN0wRPktkh3S48L3ng4T5zR08t7nwXDYNajROrS2j7oq60dtlGY4IwgwcC0c9GDQP7NiN2IpU2uahYkGQ7lDyM_h7UfQWL5fMrsYiKgn2pUgSy5TTT8smkSLbJAD35nAH6PknsQ2PuvOlb4laiC0MXw1Rw4vT9HAEB9M", "q": "0DJkPEN0bECG_6lorlNJgIfoNahVevGKK-Yti1YZ5K-nQCuffPCwPG0oZZo_55y5LODe9W7psxnAt7wxkpAY4lK2hpHTWJSkPjqXWFYIP8trn4RZDShnJXli0i1XqPOqkiVzBZGx5nLtj2bUtmXfIU7-kneHGvLQ5EXcyQW1ISM", "dp": "Ye1PWEPSE5ndSo_m-2RoZXE6pdocmrjkijiEQ-IIHN6HwI0Ux1C4lk5rF4mqBo_qKrUd2Lv-sPB6c7mHPKVhoxwEX0vtE-TvTwacadufeYVgblS1zcNUmJ1XAzDkeV3vc1NYNhRBeM-hmjuBvGTbxh72VLsRvpCQhd186yaW17U", "dq": "jvSK7vZCUrJb_-CLCGgX6DFpuK5FQ43mmg4K58nPLb-Oz_kkId4CpPsu6dToXFi4raAad9wYi-n68i4-u6xF6eFxgyVOQVyPCkug7_7i2ysKUxXFL8u2R3z55edMca4eSQt91y0bQmlXxUeOd0-rzms3UcrQ8igYVyXBXCaXIJE", "qi": "iIY1Y4bzMYIFG7XH7gNP7C-mWi6QH4l9aGRTzPB_gPaFThvc0XKW0S0l82bfp_PPPWg4D4QpDCp7rZ6KhEA8BlNi86Vt3V6F3Hz5XiDa4ikgQNsAXiXLqf83R-y1-cwHjW70PP3U89hmalCRRFfVXcLHV77AVHqbrp9rAIo-X-I", "kty": "RSA", "e": "AQAB", "kid": "koeFQjkyiav_3Qwr3aRinCqCD2LaEHOjFnje7XlkbdI", "n": "xloTY8bAuI5AEo8JursCd7w0LmELCae7JOFaVo9njGrG8tRNqgIdjPyoGY_ABwKkmjcCMLGMA29llFDbry8rB4LTWai-h_jX4_uUUnl52mLX-lO6merL5HEPZF438Ql9Hrxs5yGzT8n865-E_3uwYSBrhTjvlZJeXYUeVHfKo8pJSSsw3RZEjBW4Tt0eFmCZnFErtTyk3oUPaYVP-8YLLAenhUDV4Lm1dC4dxqUj0Oh6XrWgIb-eYHGolMY9g9xbgyd4ir39RodA_1DOjzHWpNfCM-J5ZOtfpuKCAe5__u7L8FT0m56XOxcDoVVsz1J1VNrACWAGbhDWNjyHfL5E2Q" }` + cnf := &oauth2.Config{ + ClientID: "0oaajljpeokFZLyKU5d7", + Scopes: []string{"okta.logs.read"}, + } + got, err := generateOktaJWT([]byte(jwtText), cnf) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + tok, err := jwt.Parse([]byte(got), jwt.WithVerify(false)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if tok.Issuer() != cnf.ClientID { + t.Errorf("unexpected issuer: got:%s want:%s", tok.Issuer(), cnf.ClientID) + } + if tok.Subject() != cnf.ClientID { + t.Errorf("unexpected subject: got:%s want:%s", tok.Subject(), cnf.ClientID) + } +} + +func TestGenerateOktaJWTPEM(t *testing.T) { + // jwtText is generated by https://mkjwk.org/ using the instructions at + // https://developer.okta.com/docs/guides/dpop/nonoktaresourceserver/main/#create-the-json-web-token + const jwtText = ` +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCOuef3HMRhohVT +5kSoAJgV+atpDjkwTwkOq+ImnbBlv75GaApG90w8VpjXjhqN/1KJmwfyrKiquiMq +OPu+o/672Dys5rUAaWSbT7wRF1GjLDDZrM0GHRdV4DGxM/LKI8I5yE1Mx3EzV+D5 +ZLmcRc5U4oEoMwtGpr0zRZ7uUr6a28UQwcUsVIPItc1/9rERlo1WTv8dcaj4ECC3 +2Sc0y/F+9XqwJvLd4Uv6ckzP0Sv4tbDA+7jpD9MneAIUiZ4LVj2cwbBd+YRY6jXx +MkevcCSmSX60clBY1cIFkw1DYHqtdHEwAQcQHLGMoi72xRP2qrdzIPsaTKVYoHVo +WA9vADdHAgMBAAECggEAIlx7jjCsztyYyeQsL05FTzUWoWo9NnYwtgmHnshkCXsK +MiUmJEOxZO1sSqj5l6oakupyFWigCspZYPbrFNCiqVK7+NxqQzkccY/WtT6p9uDS +ufUyPwCN96zMCd952lSVlBe3FH8Hr9a+YQxw60CbFjCZ67WuR0opTsi6JKJjJSDb +TQQZ4qJR97D05I1TgfmO+VO7G/0/dDaNHnnlYz0AnOgZPSyvrU2G5cYye4842EMB +ng81xjHD+xp55JNui/xYkhmYspYhrB2KlEjkKb08OInUjBeaLEAgA1r9yOHsfV/3 +DQzDPRO9iuqx5BfJhdIqUB1aifrye+sbxt9uMBtUgQKBgQDVdfO3GYT+ZycOQG9P +QtdMn6uiSddchVCGFpk331u6M6yafCKjI/MlJDl29B+8R5sVsttwo8/qnV/xd3cn +pY14HpKAsE4l6/Ciagzoj+0NqfPEDhEzbo8CyArcd7pSxt3XxECAfZe2+xivEPHe +gFO60vSFjFtvlLRMDMOmqX3kYQKBgQCrK1DISyQTnD6/axsgh2/ESOmT7n+JRMx/ +YzA7Lxu3zGzUC8/sRDa1C41t054nf5ZXJueYLDSc4kEAPddzISuCLxFiTD2FQ75P +lHWMgsEzQObDm4GPE9cdKOjoAvtAJwbvZcjDa029CDx7aCaDzbNvdmplZ7EUrznR +55U8Wsm8pwKBgBytxTmzZwfbCgdDJvFKNKzpwuCB9TpL+v6Y6Kr2Clfg+26iAPFU +MiWqUUInGGBuamqm5g6jI5sM28gQWeTsvC4IRXyes1Eq+uCHSQax15J/Y+3SSgNT +9kjUYYkvWMwoRcPobRYWSZze7XkP2L8hFJ7EGvAaZGqAWxzgliS9HtnhAoGAONZ/ +UqMw7Zoac/Ga5mhSwrj7ZvXxP6Gqzjofj+eKqrOlB5yMhIX6LJATfH6iq7cAMxxm +Fu/G4Ll4oB3o5wACtI3wldV/MDtYfJBtoCTjBqPsfNOsZ9hMvBATlsc2qwzKjsAb +tFhzTevoOYpSD75EcSS/G8Ec2iN9bagatBnpl00CgYBVqAOFZelNfP7dj//lpk8y +EUAw7ABOq0S9wkpFWTXIVPoBQUipm3iAUqGNPmvr/9ShdZC9xeu5AwKram4caMWJ +ExRhcDP1hFM6CdmSkIYEgBKvN9N0O4Lx1ba34gk74Hm65KXxokjJHOC0plO7c7ok +LNV/bIgMHOMoxiGrwyjAhg== +-----END PRIVATE KEY----- +` + cnf := &oauth2.Config{ + ClientID: "0oaajljpeokFZLyKU5d7", + Scopes: []string{"okta.logs.read"}, + } + got, err := generateOktaJWTPEM(jwtText, cnf) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + tok, err := jwt.Parse([]byte(got), jwt.WithVerify(false)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if tok.Issuer() != cnf.ClientID { + t.Errorf("unexpected issuer: got:%s want:%s", tok.Issuer(), cnf.ClientID) + } + if tok.Subject() != cnf.ClientID { + t.Errorf("unexpected subject: got:%s want:%s", tok.Subject(), cnf.ClientID) + } +} diff --git a/x-pack/filebeat/input/httpjson/config_test.go b/x-pack/filebeat/input/httpjson/config_test.go index 74e72ded3323..d88c6ac4a625 100644 --- a/x-pack/filebeat/input/httpjson/config_test.go +++ b/x-pack/filebeat/input/httpjson/config_test.go @@ -464,7 +464,7 @@ func TestConfigOauth2Validation(t *testing.T) { }, { name: "okta requires token_url, client_id, scopes and at least one of okta.jwk_json or okta.jwk_file to be provided", - expectedErr: "okta validation error: token_url, client_id, scopes and at least one of okta.jwk_json or okta.jwk_file must be provided accessing 'auth.oauth2'", + expectedErr: "okta validation error: one of okta.jwk_json, okta.jwk_file or okta.jwk_pem must be provided accessing 'auth.oauth2'", input: map[string]interface{}{ "auth.oauth2": map[string]interface{}{ "provider": "okta", From 842c77c95641708c06b2acbbab2be3c088baf7a8 Mon Sep 17 00:00:00 2001 From: Dan Kortschak <90160302+efd6@users.noreply.github.com> Date: Thu, 1 Feb 2024 19:18:18 +1030 Subject: [PATCH 100/129] packetbeat/docs: add documentation for ingest pipelines (#37798) Also fix typo in winlogbeat load-ingest-pipelines doc. --- packetbeat/docs/howto/howto.asciidoc | 2 + .../docs/howto/load-ingest-pipelines.asciidoc | 28 +++++++++++++ packetbeat/docs/modules.asciidoc | 41 +++++++++++++++++++ packetbeat/docs/packetbeat-options.asciidoc | 9 ++++ .../docs/howto/load-ingest-pipelines.asciidoc | 2 +- 5 files changed, 81 insertions(+), 1 deletion(-) create mode 100644 packetbeat/docs/howto/load-ingest-pipelines.asciidoc create mode 100644 packetbeat/docs/modules.asciidoc diff --git a/packetbeat/docs/howto/howto.asciidoc b/packetbeat/docs/howto/howto.asciidoc index cdadf3cb7b35..b7284ab3024b 100644 --- a/packetbeat/docs/howto/howto.asciidoc +++ b/packetbeat/docs/howto/howto.asciidoc @@ -23,6 +23,8 @@ include::{libbeat-dir}/howto/load-dashboards.asciidoc[] include::{libbeat-dir}/shared-geoip.asciidoc[] +include::load-ingest-pipelines.asciidoc[] + :standalone: include::{libbeat-dir}/shared-env-vars.asciidoc[] :standalone!: diff --git a/packetbeat/docs/howto/load-ingest-pipelines.asciidoc b/packetbeat/docs/howto/load-ingest-pipelines.asciidoc new file mode 100644 index 000000000000..acca824829c5 --- /dev/null +++ b/packetbeat/docs/howto/load-ingest-pipelines.asciidoc @@ -0,0 +1,28 @@ +[[load-ingest-pipelines]] +== Load ingest pipelines + +{beatname_uc} modules are implemented using {es} ingest node +pipelines. The events receive their transformations within +{es}. The ingest node pipelines must be loaded +into {es}. This can happen one of several ways. + +[id="{beatname_lc}-load-pipeline-auto"] +[float] +=== On connection to {es} + +{beatname_uc} will send ingest pipelines automatically to {es} if the +{es} output is enabled. + +Make sure the user specified in +{beatname_lc}.yml+ is +<>. + +If {beatname_uc} is sending events to {ls} or another output you need +to load the ingest pipelines with the `setup` command or manually. + +[id="{beatname_lc}-load-pipeline-manual"] +[float] +=== Manually install pipelines + +Pipelines can be loaded them into {es} with the `_ingest/pipeline` REST API +call. The user making the REST API call will need to have the `ingest_admin` +role assigned to them. diff --git a/packetbeat/docs/modules.asciidoc b/packetbeat/docs/modules.asciidoc new file mode 100644 index 000000000000..8e72454f9cff --- /dev/null +++ b/packetbeat/docs/modules.asciidoc @@ -0,0 +1,41 @@ +[id="{beatname_lc}-modules"] +[role="xpack"] += Modules + +[partintro] +-- +This section contains detailed information about the available network packet +log processing modules contained in {beatname_uc}. + +{beatname_uc} modules are implemented using Elasticsearch Ingest Node pipelines. +The events receive their transformations within Elasticsearch. All events are +sent through {beatname_uc}'s "routing" pipeline that routes events to specific +module pipelines based on their network protocol. + +{beatname_uc}'s default config file contains the option to send all events to +the routing pipeline. If you remove this option then the module processing +will not be applied. + +[source,yaml,subs="attributes"] +---- +output.elasticsearch.pipeline: packetbeat-%{[agent.version]}-routing +---- + +The general goal of each module is to transform events by renaming fields to +comply with the {ecs-ref}/index.html[Elastic Common Schema] (ECS). The modules +may also apply additional categorization, tagging, and parsing as necessary. +about how to configure the language in `packetbeat`, refer to <>. + +[id="{beatname_lc}-modules-setup"] +[float] +=== Setup of Ingest Node pipelines + +{beatname_uc}'s Ingest Node pipelines must be installed to Elasticsearch if you +want to apply the module processing to events. The simplest way to get started +is to use the Elasticsearch output and {beatname_uc} will automatically install +the pipelines when it first connects to Elasticsearch. + +Installation Methods + +1. <<{beatname_lc}-load-pipeline-auto>> +2. <<{beatname_lc}-load-pipeline-manual>> diff --git a/packetbeat/docs/packetbeat-options.asciidoc b/packetbeat/docs/packetbeat-options.asciidoc index c5cb4d95d6b8..c48b4a1b01d0 100644 --- a/packetbeat/docs/packetbeat-options.asciidoc +++ b/packetbeat/docs/packetbeat-options.asciidoc @@ -1650,3 +1650,12 @@ Example configuration: ------------------------------------------------------------------------------------- packetbeat.shutdown_timeout: 5s ------------------------------------------------------------------------------------- + +[float] +==== `overwrite_pipelines` + +By default Ingest pipelines are not updated if a pipeline with the same ID +already exists. If this option is enabled {beatname_uc} overwrites pipelines +every time a new Elasticsearch connection is established. + +The default value is `false`. diff --git a/winlogbeat/docs/howto/load-ingest-pipelines.asciidoc b/winlogbeat/docs/howto/load-ingest-pipelines.asciidoc index fa795f0b6b2a..0d7f842249e1 100644 --- a/winlogbeat/docs/howto/load-ingest-pipelines.asciidoc +++ b/winlogbeat/docs/howto/load-ingest-pipelines.asciidoc @@ -24,7 +24,7 @@ to load the ingest pipelines with the `setup` command or manually. === setup command On a machine that has {beatname_uc} installed and has {es} configured -as the outup, run the `setup` command with the `--pipelines` option +as the output, run the `setup` command with the `--pipelines` option specified. For example, the following command loads the ingest pipelines: From eae57f95df941ea61a5b51a9c2253133c58cc8f2 Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Thu, 1 Feb 2024 14:54:47 +0100 Subject: [PATCH 101/129] Lower logging level when unpacking config in autodiscover (#37816) When unpacking a config while applying the autodiscover config template, sometimes a configuration template cannot be resolved at that moment but will be resolved at a later stage. This commit brings the logging level of this error back to debug as it can become too verbose and not always means the config is invalid. --- CHANGELOG.next.asciidoc | 1 + libbeat/autodiscover/template/config.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 579890f20293..48d87b1dd86f 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -57,6 +57,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Support Elastic Agent control protocol chunking support {pull}37343[37343] - Upgrade elastic-agent-libs to v0.7.5. Removes obsolete "Treating the CommonName field on X.509 certificates as a host name..." deprecation warning for 8.0. {pull}37755[37755] - aws: Add credential caching for `AssumeRole` session tokens. {issue}37787[37787] +- Lower logging level to debug when attempting to configure beats with unknown fields from autodiscovered events/environments {pull}[37816][37816] *Auditbeat* diff --git a/libbeat/autodiscover/template/config.go b/libbeat/autodiscover/template/config.go index 3ba0db210de9..c050ff8acd86 100644 --- a/libbeat/autodiscover/template/config.go +++ b/libbeat/autodiscover/template/config.go @@ -154,7 +154,7 @@ func ApplyConfigTemplate(event bus.Event, configs []*conf.C, options ...ucfg.Opt var unpacked map[string]interface{} err = c.Unpack(&unpacked, opts...) if err != nil { - logp.Warn("autodiscover: Configuration template cannot be resolved: %v", err) + logp.Debug("autodiscover", "Configuration template cannot be resolved: %v", err) continue } // Repack again: From 07c559b703fb6d52447fd22893add4ece8b982a9 Mon Sep 17 00:00:00 2001 From: Maximilian Stinsky <26960620+mstinsky@users.noreply.github.com> Date: Thu, 1 Feb 2024 18:13:07 +0100 Subject: [PATCH 102/129] Remove huaweicloud - revert #27607 (#35184) * Remove huaweicloud - revert #27607 The huaweicloud is just openstack therefore revert #27607 to fix detection of all public and private openstack installations. * make huawei an alias for openstack * change doc * Update CHANGELOG.next.asciidoc --------- Co-authored-by: kaiyan-sheng --- CHANGELOG.next.asciidoc | 4 + .../docs/add_cloud_metadata.asciidoc | 22 +---- .../provider_huawei_cloud.go | 81 --------------- .../provider_huawei_cloud_test.go | 98 ------------------- .../add_cloud_metadata/providers.go | 2 +- 5 files changed, 10 insertions(+), 197 deletions(-) delete mode 100644 libbeat/processors/add_cloud_metadata/provider_huawei_cloud.go delete mode 100644 libbeat/processors/add_cloud_metadata/provider_huawei_cloud_test.go diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 48d87b1dd86f..8d09997f774d 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -11,6 +11,10 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Affecting all Beats* - Upgrade to Go 1.21.6. Removes support for Windows 8.1. See https://tip.golang.org/doc/go1.21#windows. {pull}37615[37615] +- add_cloud_metadata processor: `huawei` provider is now treated as `openstack`. Huawei cloud runs on OpenStack +platform, and when viewed from a metadata API standpoint, it is impossible to differentiate it from OpenStack. If you +know that your deployments run on Huawei Cloud exclusively, and you wish to have `cloud.provider` value as `huawei`, +you can achieve this by overwriting the value using an `add_fields` processor. {pull}35184[35184] *Auditbeat* diff --git a/libbeat/processors/add_cloud_metadata/docs/add_cloud_metadata.asciidoc b/libbeat/processors/add_cloud_metadata/docs/add_cloud_metadata.asciidoc index 9e61cac2e8cf..322bf4bd7575 100644 --- a/libbeat/processors/add_cloud_metadata/docs/add_cloud_metadata.asciidoc +++ b/libbeat/processors/add_cloud_metadata/docs/add_cloud_metadata.asciidoc @@ -16,7 +16,10 @@ The following cloud providers are supported: - Google Compute Engine (GCE) - https://www.qcloud.com/?lang=en[Tencent Cloud] (QCloud) - Alibaba Cloud (ECS) -- Huawei Cloud (ECS) +- Huawei Cloud (ECS)footnote:[`huawei` is an alias for `openstack`. Huawei cloud runs on OpenStack platform, and when +viewed from a metadata API standpoint, it is impossible to differentiate it from OpenStack. If you know that your +deployments run on Huawei Cloud exclusively, and you wish to have `cloud.provider` value as `huawei`, you can achieve +this by overwriting the value using an `add_fields` processor.] - Azure Virtual Machine - Openstack Nova - Hetzner Cloud @@ -53,10 +56,9 @@ List of names the `providers` setting supports: - "digitalocean" for Digital Ocean (enabled by default). - "aws", or "ec2" for Amazon Web Services (enabled by default). - "gcp" for Google Copmute Enging (enabled by default). -- "openstack", or "nova" for Openstack Nova (enabled by default). +- "openstack", "nova", or "huawei" for Openstack Nova (enabled by default). - "openstack-ssl", or "nova-ssl" for Openstack Nova when SSL metadata APIs are enabled (enabled by default). - "tencent", or "qcloud" for Tencent Cloud (disabled by default). -- "huawei" for Huawei Cloud (enabled by default). - "hetzner" for Hetzner Cloud (enabled by default). The third optional configuration setting is `overwrite`. When `overwrite` is @@ -128,20 +130,6 @@ _Tencent Cloud_ } ------------------------------------------------------------------------------- -_Huawei Cloud_ - -[source,json] -------------------------------------------------------------------------------- -{ - "cloud": { - "availability_zone": "cn-east-2b", - "instance.id": "37da9890-8289-4c58-ba34-a8271c4a8216", - "provider": "huawei", - "region": "cn-east-2" - } -} -------------------------------------------------------------------------------- - _Alibaba Cloud_ This metadata is only available when VPC is selected as the network type of the diff --git a/libbeat/processors/add_cloud_metadata/provider_huawei_cloud.go b/libbeat/processors/add_cloud_metadata/provider_huawei_cloud.go deleted file mode 100644 index 36683e74a134..000000000000 --- a/libbeat/processors/add_cloud_metadata/provider_huawei_cloud.go +++ /dev/null @@ -1,81 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package add_cloud_metadata - -import ( - "encoding/json" - - conf "github.com/elastic/elastic-agent-libs/config" - "github.com/elastic/elastic-agent-libs/mapstr" -) - -type hwMeta struct { - ImageName string `json:"image_name"` - VpcID string `json:"vpc_id"` -} - -type hwMetadata struct { - UUID string `json:"uuid"` - AvailabilityZone string `json:"availability_zone"` - RegionID string `json:"region_id"` - Meta *hwMeta `json:"meta"` - ProjectID string `json:"project_id"` - Name string `json:"name"` -} - -// Huawei Cloud Metadata Service -// Document https://support.huaweicloud.com/usermanual-ecs/ecs_03_0166.html -var huaweiMetadataFetcher = provider{ - Name: "huawei-cloud", - - Local: true, - - Create: func(_ string, c *conf.C) (metadataFetcher, error) { - metadataHost := "169.254.169.254" - huaweiCloudMetadataJSONURI := "/openstack/latest/meta_data.json" - - huaweiCloudSchema := func(m map[string]interface{}) mapstr.M { - m["service"] = mapstr.M{ - "name": "ECS", - } - return mapstr.M{"cloud": m} - } - - urls, err := getMetadataURLs(c, metadataHost, []string{ - huaweiCloudMetadataJSONURI, - }) - if err != nil { - return nil, err - } - responseHandlers := map[string]responseHandler{ - urls[0]: func(all []byte, result *result) error { - data := new(hwMetadata) - err := json.Unmarshal(all, data) - if err != nil { - return err - } - result.metadata.Put("instance.id", data.UUID) - result.metadata.Put("region", data.RegionID) - result.metadata.Put("availability_zone", data.AvailabilityZone) - return nil - }, - } - fetcher := &httpMetadataFetcher{"huawei", nil, responseHandlers, huaweiCloudSchema} - return fetcher, nil - }, -} diff --git a/libbeat/processors/add_cloud_metadata/provider_huawei_cloud_test.go b/libbeat/processors/add_cloud_metadata/provider_huawei_cloud_test.go deleted file mode 100644 index 0ae6fc332f09..000000000000 --- a/libbeat/processors/add_cloud_metadata/provider_huawei_cloud_test.go +++ /dev/null @@ -1,98 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package add_cloud_metadata - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/beats/v7/libbeat/beat" - conf "github.com/elastic/elastic-agent-libs/config" - "github.com/elastic/elastic-agent-libs/logp" - "github.com/elastic/elastic-agent-libs/mapstr" -) - -func initHuaweiCloudTestServer() *httptest.Server { - return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.RequestURI == "/openstack/latest/meta_data.json" { - w.Write([]byte(`{ - "random_seed": "CWIZtYK4y5pzMtShTtCKx16qB1DsA/2kL0US4u1fHxedODNr7gos4RgdE/z9eHucnltnlJfDY1remfGL60yzTsvEIWPdECOpPaJm1edIYQaUvQzdeQwKcOQAHjUP5wLQzGA3j3Pw10p7u+M7glHEwNRoEY1WsbVYwzyOOkBnqb+MJ1aOhiRnfNtHOxjLNBSDvjHaQZzoHL+1YNAxDYFezE83nE2m3ciVwZO7xWpdKDQ+W5hYBUsYAWODRMOYqIR/5ZLsfAfxE2DhK+NvuMyJ5yjO+ObQf0DN5nRUSrM5ajs84UVMr9ylJuT78ckh83CLSttsjzXJ+sr07ZFsB6/6NABzziFL7Xn8z/mEBVmFXBiBgg7KcWSoH756w42VSdUezwTy9lW0spRmdvNBKV/PzrYyy0FMiGXXZwMOCyBD05CBRJlsPorwxZLlfRVmNvsTuMYB8TG3UUbFhoR8Bd5en+EC3ncH3QIUDWn0oVg28BVjWe5rADVQLX1h83ti6GD08YUGaxoNPXnJLZfiaucSacby2mG31xysxd8Tg0qPRq7744a1HPVryuauWR9pF0+qDmtskhenxK0FR+TQ4w0fRxTigteBsXx1pQu0iz+B8rP68uokU2faCC2IMHY2Tf9RPCe6Eef0/DdQhBft88PuJLwq52o/0qZ/n9HFL6LdgCU=", - "uuid": "37da9890-8289-4c58-ba34-a8271c4a8216", - "availability_zone": "cn-east-2b", - "enterprise_project_id": "0", - "launch_index": 0, - "instance_type": "c3.large.2", - "meta": { - "os_bit": "64", - "image_name": "CentOS 7.4", - "vpc_id": "6dad7f50-db1d-4cce-b095-d27bc837d4bb" - }, - "region_id": "cn-east-2", - "project_id": "c09b8baf28b845a9b53ed37575cfd61f", - "name": "hwdev-test-1" - }`)) - return - } - - http.Error(w, "not found", http.StatusNotFound) - })) -} - -func TestRetrieveHuaweiCloudMetadata(t *testing.T) { - logp.TestingSetup() - - server := initHuaweiCloudTestServer() - defer server.Close() - - config, err := conf.NewConfigFrom(map[string]interface{}{ - "providers": []string{"huawei"}, - "host": server.Listener.Addr().String(), - }) - - if err != nil { - t.Fatal(err) - } - - p, err := New(config) - if err != nil { - t.Fatal(err) - } - - actual, err := p.Run(&beat.Event{Fields: mapstr.M{}}) - if err != nil { - t.Fatal(err) - } - - expected := mapstr.M{ - "cloud": mapstr.M{ - "provider": "huawei", - "instance": mapstr.M{ - "id": "37da9890-8289-4c58-ba34-a8271c4a8216", - }, - "region": "cn-east-2", - "availability_zone": "cn-east-2b", - "service": mapstr.M{ - "name": "ECS", - }, - }, - } - assert.Equal(t, expected, actual.Fields) -} diff --git a/libbeat/processors/add_cloud_metadata/providers.go b/libbeat/processors/add_cloud_metadata/providers.go index 55e68f756071..77c4c7042add 100644 --- a/libbeat/processors/add_cloud_metadata/providers.go +++ b/libbeat/processors/add_cloud_metadata/providers.go @@ -64,7 +64,7 @@ var cloudMetaProviders = map[string]provider{ "nova-ssl": openstackNovaSSLMetadataFetcher, "qcloud": qcloudMetadataFetcher, "tencent": qcloudMetadataFetcher, - "huawei": huaweiMetadataFetcher, + "huawei": openstackNovaMetadataFetcher, "hetzner": hetznerMetadataFetcher, } From fd5171b07efed70a039c58805c6383920ae784d9 Mon Sep 17 00:00:00 2001 From: David Kilfoyle <41695641+kilfoyle@users.noreply.github.com> Date: Thu, 1 Feb 2024 14:09:04 -0500 Subject: [PATCH 103/129] Remove footnote to fix broken docs build (#37832) * Remove footnote to fix broken docs build * Typo --- .../docs/add_cloud_metadata.asciidoc | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/libbeat/processors/add_cloud_metadata/docs/add_cloud_metadata.asciidoc b/libbeat/processors/add_cloud_metadata/docs/add_cloud_metadata.asciidoc index 322bf4bd7575..c6dbdd5600a9 100644 --- a/libbeat/processors/add_cloud_metadata/docs/add_cloud_metadata.asciidoc +++ b/libbeat/processors/add_cloud_metadata/docs/add_cloud_metadata.asciidoc @@ -16,14 +16,16 @@ The following cloud providers are supported: - Google Compute Engine (GCE) - https://www.qcloud.com/?lang=en[Tencent Cloud] (QCloud) - Alibaba Cloud (ECS) -- Huawei Cloud (ECS)footnote:[`huawei` is an alias for `openstack`. Huawei cloud runs on OpenStack platform, and when -viewed from a metadata API standpoint, it is impossible to differentiate it from OpenStack. If you know that your -deployments run on Huawei Cloud exclusively, and you wish to have `cloud.provider` value as `huawei`, you can achieve -this by overwriting the value using an `add_fields` processor.] +- Huawei Cloud (ECS) - Azure Virtual Machine - Openstack Nova - Hetzner Cloud +NOTE: `huawei` is an alias for `openstack`. Huawei cloud runs on OpenStack platform, and when +viewed from a metadata API standpoint, it is impossible to differentiate it from OpenStack. If you know that your +deployments run on Huawei Cloud exclusively, and you wish to have `cloud.provider` value as `huawei`, you can achieve +this by overwriting the value using an `add_fields` processor. + The Alibaba Cloud and Tencent cloud providers are disabled by default, because they require to access a remote host. The `providers` setting allows users to select a list of default providers to query. From cb7ae7de15f22976c2d0e2fd10f743c0a73c7eb3 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 1 Feb 2024 16:36:57 -0500 Subject: [PATCH 104/129] chore: Update snapshot.yml (#37821) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Made with ❤️️ by updatecli Co-authored-by: apmmachine Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- testing/environments/snapshot.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index c1e25d376f66..977cfdd021fb 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-yil7wib0-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-por0bbe1-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.13.0-yil7wib0-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.13.0-por0bbe1-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.13.0-yil7wib0-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.13.0-por0bbe1-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 730dc87d0eb95e74ac1278fd9d6f425b61d73b91 Mon Sep 17 00:00:00 2001 From: Olga Naydyonock Date: Fri, 2 Feb 2024 14:10:13 +0200 Subject: [PATCH 105/129] Filebeat pipeline migration to Buildkite (#37283) * added test scripts * added windows tests * added packaging step * updated packaging execution conditions * added arm packaging * fixed linting in filbeat test_crawler.py * added platforms for linux packaging --------- Co-authored-by: Pavel Zorin --- .buildkite/env-scripts/env.sh | 13 ++ .buildkite/env-scripts/linux-env.sh | 24 +++ .buildkite/env-scripts/macos-env.sh | 8 + .buildkite/env-scripts/util.sh | 91 +++++++++++ .buildkite/env-scripts/win-env.sh | 8 + .buildkite/filebeat/filebeat-pipeline.yml | 141 +++++++++++++++++- .../filebeat/scripts/integration-gotests.sh | 12 ++ .../filebeat/scripts/integration-pytests.sh | 12 ++ .buildkite/filebeat/scripts/package-step.sh | 46 ++++++ .buildkite/filebeat/scripts/package.sh | 12 ++ .../filebeat/scripts/unit-tests-win.ps1 | 51 +++++++ .buildkite/filebeat/scripts/unit-tests.sh | 12 ++ .buildkite/hooks/pre-command | 18 +++ .buildkite/pull-requests.json | 6 +- catalog-info.yaml | 8 +- dev-tools/packaging/package_test.go | 33 ++-- filebeat/filebeat_windows_amd64.syso | Bin 0 -> 1072 bytes .../filestream/internal/task/group_test.go | 6 +- filebeat/tests/system/test_crawler.py | 22 ++- 19 files changed, 487 insertions(+), 36 deletions(-) create mode 100644 .buildkite/env-scripts/env.sh create mode 100644 .buildkite/env-scripts/linux-env.sh create mode 100644 .buildkite/env-scripts/macos-env.sh create mode 100644 .buildkite/env-scripts/util.sh create mode 100644 .buildkite/env-scripts/win-env.sh create mode 100755 .buildkite/filebeat/scripts/integration-gotests.sh create mode 100755 .buildkite/filebeat/scripts/integration-pytests.sh create mode 100755 .buildkite/filebeat/scripts/package-step.sh create mode 100755 .buildkite/filebeat/scripts/package.sh create mode 100644 .buildkite/filebeat/scripts/unit-tests-win.ps1 create mode 100755 .buildkite/filebeat/scripts/unit-tests.sh create mode 100644 .buildkite/hooks/pre-command create mode 100644 filebeat/filebeat_windows_amd64.syso diff --git a/.buildkite/env-scripts/env.sh b/.buildkite/env-scripts/env.sh new file mode 100644 index 000000000000..d94d03aad53b --- /dev/null +++ b/.buildkite/env-scripts/env.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +SETUP_GVM_VERSION="v0.5.1" +WORKSPACE="$(pwd)" +BIN="${WORKSPACE}/bin" +HW_TYPE="$(uname -m)" +PLATFORM_TYPE="$(uname)" + +export SETUP_GVM_VERSION +export WORKSPACE +export BIN +export HW_TYPE +export PLATFORM_TYPE diff --git a/.buildkite/env-scripts/linux-env.sh b/.buildkite/env-scripts/linux-env.sh new file mode 100644 index 000000000000..edaf1a3100c2 --- /dev/null +++ b/.buildkite/env-scripts/linux-env.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +source .buildkite/env-scripts/util.sh + +DEBIAN_FRONTEND="noninteractive" + +export DEBIAN_FRONTEND + +sudo mkdir -p /etc/needrestart +echo "\$nrconf{restart} = 'a';" | sudo tee -a /etc/needrestart/needrestart.conf > /dev/null + +# Remove this code once beats specific agent is set up +if [[ $PLATFORM_TYPE == "Linux" ]]; then + echo ":: Installing libs ::" + sudo apt-get update + sudo apt-get install -y libsystemd-dev + sudo apt install -y python3-pip + sudo apt-get install -y python3-venv +fi + +echo ":: Setting up environment ::" +add_bin_path +with_go +with_mage diff --git a/.buildkite/env-scripts/macos-env.sh b/.buildkite/env-scripts/macos-env.sh new file mode 100644 index 000000000000..ac1486b64fdd --- /dev/null +++ b/.buildkite/env-scripts/macos-env.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +if [[ $PLATFORM_TYPE == Darwin* ]]; then + echo ":: Setting larger ulimit on MacOS ::" + # To bypass file descriptor errors like "Too many open files error" on MacOS + ulimit -Sn 50000 + echo ":: ULIMIT :: $(ulimit -n)" +fi diff --git a/.buildkite/env-scripts/util.sh b/.buildkite/env-scripts/util.sh new file mode 100644 index 000000000000..157a5aff37af --- /dev/null +++ b/.buildkite/env-scripts/util.sh @@ -0,0 +1,91 @@ +#!/usr/bin/env bash + +set -euo pipefail + +add_bin_path() { + echo "Adding PATH to the environment variables..." + create_bin + export PATH="${PATH}:${BIN}" +} + +with_go() { + local go_version="${GOLANG_VERSION}" + echo "Setting up the Go environment..." + create_bin + check_platform_architecture + retry 5 curl -sL -o ${BIN}/gvm "https://github.com/andrewkroh/gvm/releases/download/${SETUP_GVM_VERSION}/gvm-${PLATFORM_TYPE}-${arch_type}" + export PATH="${PATH}:${BIN}" + chmod +x ${BIN}/gvm + eval "$(gvm "$go_version")" + go version + which go + export PATH="${PATH}:$(go env GOPATH):$(go env GOPATH)/bin" +} + +with_mage() { + local install_packages=( + "github.com/magefile/mage" + "github.com/elastic/go-licenser" + "golang.org/x/tools/cmd/goimports" + "github.com/jstemmer/go-junit-report" + "gotest.tools/gotestsum" + ) + create_bin + for pkg in "${install_packages[@]}"; do + go install "${pkg}@latest" + done +} + +create_bin() { + if [[ ! -d "${BIN}" ]]; then + mkdir -p ${BIN} + fi +} + +check_platform_architecture() { +# for downloading the GVM and Terraform packages + case "${HW_TYPE}" in + "x86_64") + arch_type="amd64" + ;; + "aarch64") + arch_type="arm64" + ;; + "arm64") + arch_type="arm64" + ;; + *) + echo "The current platform/OS type is unsupported yet" + ;; + esac +} + +retry() { + local retries=$1 + shift + local count=0 + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + if [ $count -lt "$retries" ]; then + >&2 echo "Retry $count/$retries exited $exit, retrying in $wait seconds..." + sleep $wait + else + >&2 echo "Retry $count/$retries exited $exit, no more retries left." + return $exit + fi + done + return 0 +} + +are_files_changed() { + local changeset=$1 + + if git diff --name-only HEAD@{1} HEAD | grep -qE "$changeset"; then + return 0; + else + echo "WARN! No files changed in $changeset" + return 1; + fi +} diff --git a/.buildkite/env-scripts/win-env.sh b/.buildkite/env-scripts/win-env.sh new file mode 100644 index 000000000000..aa5f67ca4cee --- /dev/null +++ b/.buildkite/env-scripts/win-env.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +install_python_win() { + if [[ ${PLATFORM_TYPE} = MINGW* ]]; then + choco install mingw -y + choco install python --version=3.11.0 -y + fi +} diff --git a/.buildkite/filebeat/filebeat-pipeline.yml b/.buildkite/filebeat/filebeat-pipeline.yml index 34321b61161b..e3d7384a71ea 100644 --- a/.buildkite/filebeat/filebeat-pipeline.yml +++ b/.buildkite/filebeat/filebeat-pipeline.yml @@ -1,5 +1,142 @@ # yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json +env: + IMAGE_UBUNTU_X86_64: "family/core-ubuntu-2204" + IMAGE_UBUNTU_ARM_64: "core-ubuntu-2004-aarch64" + IMAGE_WIN_2016: "family/core-windows-2016" + IMAGE_WIN_2019: "family/core-windows-2019" + IMAGE_WIN_2022: "family/core-windows-2022" + IMAGE_MACOS_X86_64: "generic-13-ventura-x64" + steps: - - label: "Example test" - command: echo "Hello!" + - group: "Filebeat Mandatory Testing" + key: "mandatory-tests" + if: build.env("GITHUB_PR_TRIGGER_COMMENT") == "filebeat" || build.env("BUILDKITE_PULL_REQUEST") != "false" + + steps: + - label: ":ubuntu: Unit Tests" + command: + - ".buildkite/filebeat/scripts/unit-tests.sh" + notify: + - github_commit_status: + context: "Filebeat: Unit Tests" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "c2-standard-16" + artifact_paths: + - "filebeat/build/*.xml" + - "filebeat/build/*.json" + + - label: ":ubuntu: Go Integration Tests" + command: + - ".buildkite/filebeat/scripts/integration-gotests.sh" + notify: + - github_commit_status: + context: "Filebeat: Integration Tests" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "c2-standard-16" + artifact_paths: + - "filebeat/build/*.xml" + - "filebeat/build/*.json" + + - label: ":ubuntu: Python Integration Tests" + command: + - ".buildkite/filebeat/scripts/integration-pytests.sh" + notify: + - github_commit_status: + context: "Filebeat: Python Integration Tests" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "c2-standard-16" + artifact_paths: + - "filebeat/build/*.xml" + - "filebeat/build/*.json" + + - label: ":windows:-{{matrix.image}} Unit Tests" + command: ".buildkite/filebeat/scripts/unit-tests-win.ps1" + notify: + - github_commit_status: + context: "Filebeat: Unit Tests" + agents: + provider: "gcp" + image: "{{matrix.image}}" + machine_type: "n2-standard-8" + disk_size: 200 + disk_type: "pd-ssd" + matrix: + setup: + image: + - "${IMAGE_WIN_2016}" + - "${IMAGE_WIN_2022}" + artifact_paths: + - "filebeat/build/*.xml" + - "filebeat/build/*.json" + + - group: "Extended Testing" + key: "extended-tests" + if: build.env("BUILDKITE_PULL_REQUEST") != "false" || build.env("GITHUB_PR_TRIGGER_COMMENT") == "filebeat for extended support" + + steps: + - label: ":linux: ARM64 Unit Tests" + key: "arm-extended" + if: build.env("GITHUB_PR_TRIGGER_COMMENT") == "filebeat for arm" || build.env("GITHUB_PR_LABELS") =~ /.*arm.*/ + command: + - ".buildkite/filebeat/scripts/unit-tests.sh" + notify: + - github_commit_status: + context: "Filebeat/Extended: Unit Tests ARM" + agents: + provider: "aws" + imagePrefix: "${IMAGE_UBUNTU_ARM_64}" + instanceType: "t4g.large" + artifact_paths: "filebeat/build/*.xml" + + - label: ":mac: MacOS Unit Tests" + key: "macos-extended" + if: build.env("GITHUB_PR_TRIGGER_COMMENT") == "filebeat for macos" || build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ + command: + - ".buildkite/filebeat/scripts/unit-tests.sh" + notify: + - github_commit_status: + context: "Filebeat/Extended: MacOS Unit Tests" + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: "filebeat/build/*.xml" + + - group: "Windows Extended Testing" + key: "extended-tests-win" + if: build.env("GITHUB_PR_TRIGGER_COMMENT") == "filebeat for windows" || build.env("GITHUB_PR_LABELS") =~ /.*windows.*/ + + steps: + - label: ":windows: Win 2019 Unit Tests" + key: "win-extended-2019" + command: ".buildkite/filebeat/scripts/unit-tests-win.ps1" + notify: + - github_commit_status: + context: "Filebeat/Extended: Win-2019 Unit Tests" + agents: + provider: "gcp" + image: "${IMAGE_WIN_2019}" + machine_type: "n2-standard-8" + disk_size: 200 + disk_type: "pd-ssd" + artifact_paths: + - "filebeat/build/*.xml" + - "filebeat/build/*.json" + + - group: "Packaging" + key: "packaging" + if: build.env("BUILDKITE_PULL_REQUEST") != "false" + depends_on: + - "mandatory-tests" + - "extended-tests" + - "extended-tests-win" + + steps: + - label: Package pipeline + commands: ".buildkite/filebeat/scripts/package-step.sh | buildkite-agent pipeline upload" diff --git a/.buildkite/filebeat/scripts/integration-gotests.sh b/.buildkite/filebeat/scripts/integration-gotests.sh new file mode 100755 index 000000000000..a3eabf70c0d3 --- /dev/null +++ b/.buildkite/filebeat/scripts/integration-gotests.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -euo pipefail + +source .buildkite/env-scripts/linux-env.sh + +echo ":: Execute Integration Tests ::" +sudo chmod -R go-w filebeat/ + +cd filebeat +umask 0022 +mage goIntegTest diff --git a/.buildkite/filebeat/scripts/integration-pytests.sh b/.buildkite/filebeat/scripts/integration-pytests.sh new file mode 100755 index 000000000000..5e2e403dda87 --- /dev/null +++ b/.buildkite/filebeat/scripts/integration-pytests.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -euo pipefail + +source .buildkite/env-scripts/linux-env.sh + +echo ":: Execute Integration Tests ::" +sudo chmod -R go-w filebeat/ + +cd filebeat +umask 0022 +mage pythonIntegTest diff --git a/.buildkite/filebeat/scripts/package-step.sh b/.buildkite/filebeat/scripts/package-step.sh new file mode 100755 index 000000000000..a4127c3cd1d6 --- /dev/null +++ b/.buildkite/filebeat/scripts/package-step.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +set -euo pipefail + +source .buildkite/env-scripts/util.sh + +changeset="^filebeat/ + ^go.mod + ^pytest.ini + ^dev-tools/ + ^libbeat/ + ^testing/ + ^\.buildkite/filebeat/" + +if are_files_changed "$changeset"; then + cat <<-EOF + steps: + - label: ":ubuntu: Packaging Linux X86" + key: "package-linux-x86" + env: + PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" + command: + - ".buildkite/filebeat/scripts/package.sh" + notify: + - github_commit_status: + context: "Filebeat/Packaging: Linux X86" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + + - label: ":linux: Packaging Linux ARM" + key: "package-linux-arm" + env: + PLATFORMS: "linux/arm64" + PACKAGES: "docker" + command: + - ".buildkite/filebeat/scripts/package.sh" + notify: + - github_commit_status: + context: "Filebeat/Packaging: ARM" + agents: + provider: "aws" + imagePrefix: "${IMAGE_UBUNTU_ARM_64}" + instanceType: "t4g.large" +EOF +fi diff --git a/.buildkite/filebeat/scripts/package.sh b/.buildkite/filebeat/scripts/package.sh new file mode 100755 index 000000000000..2ae226eb739c --- /dev/null +++ b/.buildkite/filebeat/scripts/package.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -euo pipefail + +source .buildkite/env-scripts/linux-env.sh + +echo ":: Evaluate Filebeat Changes ::" + +echo ":: Start Packaging ::" +cd filebeat +umask 0022 +mage package diff --git a/.buildkite/filebeat/scripts/unit-tests-win.ps1 b/.buildkite/filebeat/scripts/unit-tests-win.ps1 new file mode 100644 index 000000000000..8990eb30a093 --- /dev/null +++ b/.buildkite/filebeat/scripts/unit-tests-win.ps1 @@ -0,0 +1,51 @@ +$ErrorActionPreference = "Stop" # set -e +$GoVersion = $env:GOLANG_VERSION # If Choco doesn't have the version specified in .go-version file, should be changed manually + +# Forcing to checkout again all the files with a correct autocrlf. +# Doing this here because we cannot set git clone options before. +function fixCRLF() { + Write-Host "-- Fixing CRLF in git checkout --" + git config core.autocrlf false + git rm --quiet --cached -r . + git reset --quiet --hard +} + +function withGolang() { + Write-Host "-- Install golang $GoVersion --" + choco install golang -y --version $GoVersion + + $choco = Convert-Path "$((Get-Command choco).Path)\..\.." + Import-Module "$choco\helpers\chocolateyProfile.psm1" + refreshenv + go version + go env +} + +function installGoDependencies() { + $installPackages = @( + "github.com/magefile/mage" + "github.com/elastic/go-licenser" + "golang.org/x/tools/cmd/goimports" + "github.com/jstemmer/go-junit-report" + "github.com/tebeka/go2xunit" + ) + foreach ($pkg in $installPackages) { + go install "$pkg" + } +} + +fixCRLF + +$ErrorActionPreference = "Continue" # set +e + +Set-Location -Path filebeat +New-Item -ItemType Directory -Force -Path "build" +withGolang +installGoDependencies + +mage build unitTest + +$EXITCODE=$LASTEXITCODE +$ErrorActionPreference = "Stop" + +Exit $EXITCODE diff --git a/.buildkite/filebeat/scripts/unit-tests.sh b/.buildkite/filebeat/scripts/unit-tests.sh new file mode 100755 index 000000000000..cda1dd85aea2 --- /dev/null +++ b/.buildkite/filebeat/scripts/unit-tests.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -euo pipefail + +source .buildkite/env-scripts/linux-env.sh +source .buildkite/env-scripts/macos-env.sh + +echo ":: Execute Unit Tests ::" +sudo chmod -R go-w filebeat/ + +umask 0022 +mage -d filebeat unitTest diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command new file mode 100644 index 000000000000..0a1567e53cd5 --- /dev/null +++ b/.buildkite/hooks/pre-command @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +set -euo pipefail + +source .buildkite/env-scripts/env.sh +source .buildkite/env-scripts/util.sh +source .buildkite/env-scripts/win-env.sh + + +if [[ "$BUILDKITE_PIPELINE_SLUG" == "filebeat" ]]; then + if [[ ${PLATFORM_TYPE} = MINGW* ]]; then + install_python_win + fi + + if [[ -z "${GOLANG_VERSION-""}" ]]; then + export GOLANG_VERSION=$(cat "${WORKSPACE}/.go-version") + fi +fi diff --git a/.buildkite/pull-requests.json b/.buildkite/pull-requests.json index c1cfa299d079..76fdfb3e6cdc 100644 --- a/.buildkite/pull-requests.json +++ b/.buildkite/pull-requests.json @@ -25,12 +25,12 @@ "set_commit_status": true, "build_on_commit": true, "build_on_comment": true, - "trigger_comment_regex": "^(?:(?:buildkite\\W+)?(?:build|test)\\W+(?:this|it))|^/test filebeat$", - "always_trigger_comment_regex": "^(?:(?:buildkite\\W+)?(?:build|test)\\W+(?:this|it))|^/test filebeat$", + "trigger_comment_regex": "^/test filebeat(for (arm|macos|windows|extended support))?$|^/packag[ing|e]$", + "always_trigger_comment_regex": "^/test filebeat(for (arm|macos|windows|extended support))?$|^/package filebeat$", "skip_ci_labels": [ ], "skip_target_branches": [ ], "skip_ci_on_only_changed": [ ], - "always_require_ci_on_changed": [ ] + "always_require_ci_on_changed": ["^filebeat/.*", ".buildkite/filebeat/.*", "^go.mod", "^pytest.ini", "^dev-tools/.*", "^libbeat/.*", "^testing/.*" ] }, { "enabled": true, diff --git a/catalog-info.yaml b/catalog-info.yaml index c5679e1215ce..d145975ab7d3 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -130,9 +130,9 @@ spec: name: filebeat description: "Filebeat pipeline" spec: -# branch_configuration: "main 7.* 8.* v7.* v8.*" TODO: temporarily commented to build PRs from forks + branch_configuration: "main 7.* 8.* v7.* v8.*" pipeline_file: ".buildkite/filebeat/filebeat-pipeline.yml" -# maximum_timeout_in_minutes: 120 TODO: uncomment when pipeline is ready + maximum_timeout_in_minutes: 120 provider_settings: build_pull_request_forks: false build_pull_requests: true # requires filter_enabled and filter_condition settings as below when used with buildkite-pr-bot @@ -145,8 +145,8 @@ spec: cancel_intermediate_builds_branch_filter: "!main !7.* !8.*" skip_intermediate_builds: true skip_intermediate_builds_branch_filter: "!main !7.* !8.*" - # env: - # ELASTIC_PR_COMMENTS_ENABLED: "true" TODO: uncomment when pipeline is ready + env: + ELASTIC_PR_COMMENTS_ENABLED: "true" teams: ingest-fp: access_level: MANAGE_BUILD_AND_READ diff --git a/dev-tools/packaging/package_test.go b/dev-tools/packaging/package_test.go index e01b6c566e5a..fff920b429c2 100644 --- a/dev-tools/packaging/package_test.go +++ b/dev-tools/packaging/package_test.go @@ -714,9 +714,11 @@ func readZip(t *testing.T, zipFile string, inspectors ...inspector) (*packageFil } func readDocker(dockerFile string) (*packageFile, *dockerInfo, error) { - // Read the manifest file first so that the config file and layer - // names are known in advance. - manifest, err := getDockerManifest(dockerFile) + var manifest *dockerManifest + var info *dockerInfo + layers := make(map[string]*packageFile) + + manifest, err := readManifest(dockerFile) if err != nil { return nil, nil, err } @@ -727,9 +729,6 @@ func readDocker(dockerFile string) (*packageFile, *dockerInfo, error) { } defer file.Close() - var info *dockerInfo - layers := make(map[string]*packageFile) - gzipReader, err := gzip.NewReader(file) if err != nil { return nil, nil, err @@ -770,11 +769,7 @@ func readDocker(dockerFile string) (*packageFile, *dockerInfo, error) { // Read layers in order and for each file keep only the entry seen in the later layer p := &packageFile{Name: filepath.Base(dockerFile), Contents: map[string]packageEntry{}} - for _, layer := range manifest.Layers { - layerFile, found := layers[layer] - if !found { - return nil, nil, fmt.Errorf("layer not found: %s", layer) - } + for _, layerFile := range layers { for name, entry := range layerFile.Contents { // Check only files in working dir and entrypoint if strings.HasPrefix("/"+name, workingDir) || "/"+name == entrypoint { @@ -799,22 +794,21 @@ func readDocker(dockerFile string) (*packageFile, *dockerInfo, error) { return p, info, nil } -// getDockerManifest opens a gzipped tar file to read the Docker manifest.json -// that it is expected to contain. -func getDockerManifest(file string) (*dockerManifest, error) { - f, err := os.Open(file) +func readManifest(dockerFile string) (*dockerManifest, error) { + var manifest *dockerManifest + + file, err := os.Open(dockerFile) if err != nil { return nil, err } - defer f.Close() + defer file.Close() - gzipReader, err := gzip.NewReader(f) + gzipReader, err := gzip.NewReader(file) if err != nil { return nil, err } defer gzipReader.Close() - var manifest *dockerManifest tarReader := tar.NewReader(gzipReader) for { header, err := tarReader.Next() @@ -833,8 +827,7 @@ func getDockerManifest(file string) (*dockerManifest, error) { break } } - - return manifest, nil + return manifest, err } type dockerManifest struct { diff --git a/filebeat/filebeat_windows_amd64.syso b/filebeat/filebeat_windows_amd64.syso new file mode 100644 index 0000000000000000000000000000000000000000..c52af94f8e059275dff851e701e42fafefdf4132 GIT binary patch literal 1072 zcmZvcPiqrV6vfY&fOb*nQd|w_QbN*yQBY7!i-l0dN^vLCB#i?x6Ot*k(1j~MPnT}| z7B2f8`VGY2dDEAF_$Kq-efOPr?!D)|N&jaewmS_v}vGgN)sxb-p=70Pf@Vy80h4-2mv0c}8F;8`(uk;7{gmt73I%a-Ee~9h& zyMFiA*=*`jn8z_p?z^g{h3EExcburyRJq0)!j@~K0nB5as{r~dV47Neq)R-B-huhf zGXP&|2EKRi$(g^_w)WLlrWV!?U|^3TYmq!ORo4LK(Bx;jLh;di~0fGJ>biqB-LT?#j`|Hx0ei~6oN+*`lHFHx{AzxVk%8;6vk~%W>futpr>sxm z&0($bd;zY777dhe8|f+dF1R5~qi`Y<;<>^htR)zq(yA|A)9HDr#EQ9QXO-$LH>1nA zJ&)Dlb|u`mirY8F5uULXtKIz vU72-VI_yjMVytfDT-<8unfw8NX4Q9SMjbx&h0c!a@6oNM<$r-Bc)a@`Kf{eH literal 0 HcmV?d00001 diff --git a/filebeat/input/filestream/internal/task/group_test.go b/filebeat/input/filestream/internal/task/group_test.go index 553070e5ec73..5ce15d455e3e 100644 --- a/filebeat/input/filestream/internal/task/group_test.go +++ b/filebeat/input/filestream/internal/task/group_test.go @@ -241,12 +241,14 @@ func TestGroup_Go(t *testing.T) { want := uint64(2) g := NewGroup(want, time.Second, logger, "errorPrefix") - wg.Add(2) + wg.Add(1) err := g.Go(workload(1)) require.NoError(t, err) + wg.Wait() + + wg.Add(1) err = g.Go(workload(2)) require.NoError(t, err) - wg.Wait() err = g.Stop() diff --git a/filebeat/tests/system/test_crawler.py b/filebeat/tests/system/test_crawler.py index fba8debcaea6..2bea57223fe8 100644 --- a/filebeat/tests/system/test_crawler.py +++ b/filebeat/tests/system/test_crawler.py @@ -197,7 +197,10 @@ def test_file_renaming(self): # expecting 6 more events self.wait_until( - lambda: self.output_has(lines=iterations1 + iterations2), max_timeout=10) + lambda: self.output_has( + lines=iterations1 + + iterations2), + max_timeout=10) filebeat.check_kill_and_wait() @@ -247,7 +250,10 @@ def test_file_disappear(self): # Let it read the file self.wait_until( - lambda: self.output_has(lines=iterations1 + iterations2), max_timeout=10) + lambda: self.output_has( + lines=iterations1 + + iterations2), + max_timeout=10) filebeat.check_kill_and_wait() @@ -317,7 +323,10 @@ def test_file_disappear_appear(self): # Let it read the file self.wait_until( - lambda: self.output_has(lines=iterations1 + iterations2), max_timeout=10) + lambda: self.output_has( + lines=iterations1 + + iterations2), + max_timeout=10) filebeat.check_kill_and_wait() @@ -468,7 +477,8 @@ def test_tail_files(self): f.write("hello world 2\n") f.flush() - # Sleep 1 second to make sure the file is persisted on disk and timestamp is in the past + # Sleep 1 second to make sure the file is persisted on disk and + # timestamp is in the past time.sleep(1) filebeat = self.start_beat() @@ -569,6 +579,7 @@ def test_encodings(self): with codecs.open(self.working_dir + "/log/test-{}".format(enc_py), "w", enc_py) as f: f.write(text + "\n") + f.close() # create the config file inputs = [] @@ -592,10 +603,11 @@ def test_encodings(self): with codecs.open(self.working_dir + "/log/test-{}".format(enc_py), "a", enc_py) as f: f.write(text + " 2" + "\n") + f.close() # wait again self.wait_until(lambda: self.output_has(lines=len(encodings) * 2), - max_timeout=15) + max_timeout=60) filebeat.check_kill_and_wait() # check that all outputs are present in the JSONs in UTF-8 From aeb93a05c3f7c8e6b688352f4e82463cba4d7f91 Mon Sep 17 00:00:00 2001 From: Pavel Zorin Date: Fri, 2 Feb 2024 12:15:19 +0000 Subject: [PATCH 106/129] [CI] elastic agent 7.17. Renamed xpack dir to x-pack for consistency (#37830) * [CI] 7.17: renamed xpack dir to x-pack for consistency * Fixed indentations --- .buildkite/pull-requests.json | 4 ++-- .../elastic-agent/pipeline.xpack.elastic-agent.yml | 0 catalog-info.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) rename .buildkite/{xpack => x-pack}/elastic-agent/pipeline.xpack.elastic-agent.yml (100%) diff --git a/.buildkite/pull-requests.json b/.buildkite/pull-requests.json index 76fdfb3e6cdc..4607a0576d0b 100644 --- a/.buildkite/pull-requests.json +++ b/.buildkite/pull-requests.json @@ -141,8 +141,8 @@ "always_trigger_comment_regex": "^/test elastic-agent$", "skip_ci_labels": [ ], "skip_target_branches": [ ], - "skip_ci_on_only_changed": ["^xpack/elastic-agent/README.md", "^xpack/elastic-agent/docs/.*", "^xpack/elastic-agent/devtools/.*" ], - "always_require_ci_on_changed": ["^xpack/elastic-agent/.*", ".buildkite/xpack/elastic-agent/.*", "^go.mod", "^pytest.ini", "^dev-tools/.*", "^libbeat/.*", "^testing/.*"] + "skip_ci_on_only_changed": ["^x-pack/elastic-agent/README.md", "^x-pack/elastic-agent/docs/.*", "^x-pack/elastic-agent/devtools/.*" ], + "always_require_ci_on_changed": ["^x-pack/elastic-agent/.*", ".buildkite/x-pack/elastic-agent/.*", "^go.mod", "^pytest.ini", "^dev-tools/.*", "^libbeat/.*", "^testing/.*"] } ] } diff --git a/.buildkite/xpack/elastic-agent/pipeline.xpack.elastic-agent.yml b/.buildkite/x-pack/elastic-agent/pipeline.xpack.elastic-agent.yml similarity index 100% rename from .buildkite/xpack/elastic-agent/pipeline.xpack.elastic-agent.yml rename to .buildkite/x-pack/elastic-agent/pipeline.xpack.elastic-agent.yml diff --git a/catalog-info.yaml b/catalog-info.yaml index d145975ab7d3..f3dd3094788e 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -401,7 +401,7 @@ spec: description: "Beats xpack elastic agent pipeline" spec: branch_configuration: "7.17" - pipeline_file: ".buildkite/xpack/elastic-agent/pipeline.xpack.elastic-agent.yml" + pipeline_file: ".buildkite/x-pack/elastic-agent/pipeline.xpack.elastic-agent.yml" provider_settings: build_pull_request_forks: false build_pull_requests: true # requires filter_enabled and filter_condition settings as below when used with buildkite-pr-bot From a611494eb8edaae0b6de574925f54eebadf3d91f Mon Sep 17 00:00:00 2001 From: sharbuz <87968844+sharbuz@users.noreply.github.com> Date: Fri, 2 Feb 2024 17:41:29 +0200 Subject: [PATCH 107/129] migrate metricbeat pipeline to Buildkite (#37592) migrate the metricbeat pipeline to Buildkite --- .buildkite/hooks/post-checkout | 53 ++++ .buildkite/hooks/pre-command | 17 +- .buildkite/metricbeat/pipeline.yml | 45 +++- .buildkite/scripts/common.sh | 252 ++++++++++++++++++ .buildkite/scripts/crosscompile.sh | 8 + .../scripts/generate_metricbeat_pipeline.sh | 178 +++++++++++++ .buildkite/scripts/go_int_tests.sh | 12 + .buildkite/scripts/install_tools.sh | 48 ++++ .buildkite/scripts/packaging.sh | 12 + .buildkite/scripts/py_int_tests.sh | 12 + .buildkite/scripts/setenv.sh | 15 ++ .buildkite/scripts/unit_tests.sh | 12 + .buildkite/scripts/win_unit_tests.ps1 | 70 +++++ 13 files changed, 727 insertions(+), 7 deletions(-) create mode 100644 .buildkite/hooks/post-checkout create mode 100755 .buildkite/scripts/common.sh create mode 100755 .buildkite/scripts/crosscompile.sh create mode 100755 .buildkite/scripts/generate_metricbeat_pipeline.sh create mode 100755 .buildkite/scripts/go_int_tests.sh create mode 100644 .buildkite/scripts/install_tools.sh create mode 100755 .buildkite/scripts/packaging.sh create mode 100755 .buildkite/scripts/py_int_tests.sh create mode 100755 .buildkite/scripts/setenv.sh create mode 100755 .buildkite/scripts/unit_tests.sh create mode 100644 .buildkite/scripts/win_unit_tests.ps1 diff --git a/.buildkite/hooks/post-checkout b/.buildkite/hooks/post-checkout new file mode 100644 index 000000000000..e10f15de7b65 --- /dev/null +++ b/.buildkite/hooks/post-checkout @@ -0,0 +1,53 @@ +#!/bin/bash + +set -euo pipefail + +checkout_merge() { + local target_branch=$1 + local pr_commit=$2 + local merge_branch=$3 + + if [[ -z "${target_branch}" ]]; then + echo "No pull request target branch" + exit 1 + fi + + git fetch -v origin "${target_branch}" + git checkout FETCH_HEAD + echo "Current branch: $(git rev-parse --abbrev-ref HEAD)" + + # create temporal branch to merge the PR with the target branch + git checkout -b ${merge_branch} + echo "New branch created: $(git rev-parse --abbrev-ref HEAD)" + + # set author identity so it can be run git merge + git config user.name "github-merged-pr-post-checkout" + git config user.email "auto-merge@buildkite" + + git merge --no-edit "${BUILDKITE_COMMIT}" || { + local merge_result=$? + echo "Merge failed: ${merge_result}" + git merge --abort + exit ${merge_result} + } +} + +pull_request="${BUILDKITE_PULL_REQUEST:-false}" + +if [[ "${pull_request}" == "false" ]]; then + echo "Not a pull request, skipping" + exit 0 +fi + +TARGET_BRANCH="${BUILDKITE_PULL_REQUEST_BASE_BRANCH:-master}" +PR_COMMIT="${BUILDKITE_COMMIT}" +PR_ID=${BUILDKITE_PULL_REQUEST} +MERGE_BRANCH="pr_merge_${PR_ID}" + +checkout_merge "${TARGET_BRANCH}" "${PR_COMMIT}" "${MERGE_BRANCH}" + +echo "Commit information" +git --no-pager log --format=%B -n 1 + +# Ensure buildkite groups are rendered +echo "" diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command index 0a1567e53cd5..ef38478a4327 100644 --- a/.buildkite/hooks/pre-command +++ b/.buildkite/hooks/pre-command @@ -2,12 +2,11 @@ set -euo pipefail -source .buildkite/env-scripts/env.sh -source .buildkite/env-scripts/util.sh -source .buildkite/env-scripts/win-env.sh - - if [[ "$BUILDKITE_PIPELINE_SLUG" == "filebeat" ]]; then + source .buildkite/env-scripts/env.sh + source .buildkite/env-scripts/util.sh + source .buildkite/env-scripts/win-env.sh + if [[ ${PLATFORM_TYPE} = MINGW* ]]; then install_python_win fi @@ -16,3 +15,11 @@ if [[ "$BUILDKITE_PIPELINE_SLUG" == "filebeat" ]]; then export GOLANG_VERSION=$(cat "${WORKSPACE}/.go-version") fi fi + +if [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-metricbeat" ]]; then + source .buildkite/scripts/setenv.sh + if [[ "${BUILDKITE_COMMAND}" =~ ^buildkite-agent ]]; then + echo "Skipped pre-command when running the Upload pipeline" + exit 0 + fi +fi diff --git a/.buildkite/metricbeat/pipeline.yml b/.buildkite/metricbeat/pipeline.yml index 34321b61161b..0abc58a85ae5 100644 --- a/.buildkite/metricbeat/pipeline.yml +++ b/.buildkite/metricbeat/pipeline.yml @@ -1,5 +1,46 @@ # yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json +env: + IMAGE_UBUNTU_X86_64: "family/core-ubuntu-2204" + IMAGE_UBUNTU_ARM_64: "core-ubuntu-2004-aarch64" + IMAGE_WIN_10: "family/general-windows-10" + IMAGE_WIN_11: "family/general-windows-11" + IMAGE_WIN_2016: "family/core-windows-2016" + IMAGE_WIN_2019: "family/core-windows-2019" + IMAGE_WIN_2022: "family/core-windows-2022" + IMAGE_MACOS_X86_64: "generic-13-ventura-x64" + GO_AGENT_IMAGE: "golang:${GO_VERSION}" + BEATS_PROJECT_NAME: "metricbeat" steps: - - label: "Example test" - command: echo "Hello!" + + - input: "Input Parameters" + key: "input-run-all-stages" + fields: + - select: "Metricbeat - runAllStages" + key: "runAllStages" + options: + - label: "True" + value: "true" + - label: "False" + value: "false" + default: "false" + - select: "Metricbeat - runMacOsTests" + key: "UI_MACOS_TESTS" + options: + - label: "True" + value: "true" + - label: "False" + value: "false" + default: "false" + if: "build.source == 'ui'" + + - wait: ~ + if: "build.source == 'ui'" + allow_dependency_failure: false + + - label: ":linux: Load dynamic metricbeat pipeline" + key: "metricbeat-pipeline" + command: ".buildkite/scripts/generate_metricbeat_pipeline.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" diff --git a/.buildkite/scripts/common.sh b/.buildkite/scripts/common.sh new file mode 100755 index 000000000000..a27fa820a7ab --- /dev/null +++ b/.buildkite/scripts/common.sh @@ -0,0 +1,252 @@ +#!/bin/bash +set -euo pipefail + +WORKSPACE=${WORKSPACE:-"$(pwd)"} +BIN="${WORKSPACE}/bin" +platform_type="$(uname)" +platform_type_lowercase=$(echo "$platform_type" | tr '[:upper:]' '[:lower:]') +arch_type="$(uname -m)" +GITHUB_PR_TRIGGER_COMMENT=${GITHUB_PR_TRIGGER_COMMENT:-""} +ONLY_DOCS=${ONLY_DOCS:-"true"} +UI_MACOS_TESTS="$(buildkite-agent meta-data get UI_MACOS_TESTS --default ${UI_MACOS_TESTS:-"false"})" +runAllStages="$(buildkite-agent meta-data get runAllStages --default ${runAllStages:-"false"})" +metricbeat_changeset=( + "^metricbeat/.*" + "^go.mod" + "^pytest.ini" + "^dev-tools/.*" + "^libbeat/.*" + "^testing/.*" + ) +oss_changeset=( + "^go.mod" + "^pytest.ini" + "^dev-tools/.*" + "^libbeat/.*" + "^testing/.*" +) +ci_changeset=( + "^.buildkite/.*" +) +go_mod_changeset=( + "^go.mod" + ) +docs_changeset=( + ".*\\.(asciidoc|md)" + "deploy/kubernetes/.*-kubernetes\\.yaml" + ) +packaging_changeset=( + "^dev-tools/packaging/.*" + ".go-version" + ) + +with_docker_compose() { + local version=$1 + echo "Setting up the Docker-compose environment..." + create_workspace + retry 3 curl -sSL -o ${BIN}/docker-compose "https://github.com/docker/compose/releases/download/${version}/docker-compose-${platform_type_lowercase}-${arch_type}" + chmod +x ${BIN}/docker-compose + export PATH="${BIN}:${PATH}" + docker-compose version +} + +create_workspace() { + if [[ ! -d "${BIN}" ]]; then + mkdir -p "${BIN}" + fi +} + +add_bin_path() { + echo "Adding PATH to the environment variables..." + create_workspace + export PATH="${BIN}:${PATH}" +} + +check_platform_architeture() { + case "${arch_type}" in + "x86_64") + go_arch_type="amd64" + ;; + "aarch64") + go_arch_type="arm64" + ;; + "arm64") + go_arch_type="arm64" + ;; + *) + echo "The current platform/OS type is unsupported yet" + ;; + esac +} + +with_mage() { + local install_packages=( + "github.com/magefile/mage" + "github.com/elastic/go-licenser" + "golang.org/x/tools/cmd/goimports" + "github.com/jstemmer/go-junit-report" + "gotest.tools/gotestsum" + ) + create_workspace + for pkg in "${install_packages[@]}"; do + go install "${pkg}@latest" + done +} + +with_go() { + echo "Setting up the Go environment..." + create_workspace + check_platform_architeture + retry 5 curl -sL -o "${BIN}/gvm" "https://github.com/andrewkroh/gvm/releases/download/${SETUP_GVM_VERSION}/gvm-${platform_type_lowercase}-${go_arch_type}" + chmod +x "${BIN}/gvm" + eval "$(gvm $GO_VERSION)" + go version + which go + local go_path="$(go env GOPATH):$(go env GOPATH)/bin" + export PATH="${go_path}:${PATH}" +} + +with_python() { + if [ "${platform_type}" == "Linux" ]; then + #sudo command doesn't work at the "pre-command" hook because of another user environment (root with strange permissions) + sudo apt-get update + sudo apt-get install -y python3-pip python3-venv + elif [ "${platform_type}" == "Darwin" ]; then + brew update + pip3 install virtualenv + ulimit -Sn 10000 + fi +} + +with_dependencies() { + if [ "${platform_type}" == "Linux" ]; then + #sudo command doesn't work at the "pre-command" hook because of another user environment (root with strange permissions) + sudo apt-get update + sudo apt-get install -y libsystemd-dev libpcap-dev + elif [ "${platform_type}" == "Darwin" ]; then + pip3 install libpcap + fi +} + +retry() { + local retries=$1 + shift + local count=0 + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + if [ $count -lt "$retries" ]; then + >&2 echo "Retry $count/$retries exited $exit, retrying in $wait seconds..." + sleep $wait + else + >&2 echo "Retry $count/$retries exited $exit, no more retries left." + return $exit + fi + done + return 0 +} + +are_paths_changed() { + local patterns=("${@}") + local changelist=() + + for pattern in "${patterns[@]}"; do + changed_files=($(git diff --name-only HEAD@{1} HEAD | grep -E "$pattern")) + if [ "${#changed_files[@]}" -gt 0 ]; then + changelist+=("${changed_files[@]}") + fi + done + + if [ "${#changelist[@]}" -gt 0 ]; then + echo "Files changed:" + echo "${changelist[*]}" + return 0 + else + echo "No files changed within specified changeset:" + echo "${patterns[*]}" + return 1 + fi +} + +are_changed_only_paths() { + local patterns=("${@}") + local changelist=() + local changed_files=$(git diff --name-only HEAD@{1} HEAD) + if [ -z "$changed_files" ] || grep -qE "$(IFS=\|; echo "${patterns[*]}")" <<< "$changed_files"; then + return 0 + else + return 1 + fi +} + +are_conditions_met_mandatory_tests() { + if [[ "${BUILDKITE_PULL_REQUEST}" == "" ]] || [[ "${runAllStages}" == "true" ]] || [[ "${ONLY_DOCS}" == "false" && "${BUILDKITE_PULL_REQUEST}" != "" ]]; then # from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/Jenkinsfile#L107-L137 + if are_paths_changed "${metricbeat_changeset[@]}" || are_paths_changed "${oss_changeset[@]}" || are_paths_changed "${ci_changeset[@]}" || [[ "${GITHUB_PR_TRIGGER_COMMENT}" == "/test metricbeat" ]] || [[ "${GITHUB_PR_LABELS}" =~ Metricbeat ]]; then # from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/metricbeat/Jenkinsfile.yml#L3-L12 + return 0 + else + return 1 + fi + else + return 1 + fi +} + +are_conditions_met_extended_tests() { + if are_conditions_met_mandatory_tests; then #from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/Jenkinsfile#L145-L171 + return 0 + else + return 1 + fi +} + +are_conditions_met_macos_tests() { + if are_conditions_met_mandatory_tests; then #from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/Jenkinsfile#L145-L171 + if [[ "${UI_MACOS_TESTS}" == true ]] || [[ "${GITHUB_PR_TRIGGER_COMMENT}" == "/test metricbeat for macos" ]] || [[ "${GITHUB_PR_LABELS}" =~ macOS ]]; then # from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/metricbeat/Jenkinsfile.yml#L3-L12 + return 0 + else + return 1 + fi + else + return 1 + fi +} + +are_conditions_met_extended_windows_tests() { + if [[ "${ONLY_DOCS}" == "false" && "${BUILDKITE_PULL_REQUEST}" != "" ]] || [[ "${runAllStages}" == "true" ]]; then #from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/Jenkinsfile#L145-L171 + if are_paths_changed "${metricbeat_changeset[@]}" || are_paths_changed "${oss_changeset[@]}" || are_paths_changed "${ci_changeset[@]}" || [[ "${GITHUB_PR_TRIGGER_COMMENT}" == "/test metricbeat" ]] || [[ "${GITHUB_PR_LABELS}" =~ Metricbeat ]]; then # from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/metricbeat/Jenkinsfile.yml#L3-L12 + return 0 + else + return 1 + fi + else + return 1 + fi +} + +are_conditions_met_packaging() { + if are_conditions_met_extended_windows_tests; then #from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/Jenkinsfile#L145-L171 + if are_paths_changed "${metricbeat_changeset[@]}" || are_paths_changed "${oss_changeset[@]}" || [[ "${BUILDKITE_TAG}" == "" ]] || [[ "${BUILDKITE_PULL_REQUEST}" != "" ]]; then # from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/metricbeat/Jenkinsfile.yml#L101-L103 + return 0 + else + return 1 + fi + else + return 1 + fi +} + +if ! are_changed_only_paths "${docs_changeset[@]}" ; then + ONLY_DOCS="false" + echo "Changes include files outside the docs_changeset vairiabe. ONLY_DOCS=$ONLY_DOCS." +else + echo "All changes are related to DOCS. ONLY_DOCS=$ONLY_DOCS." +fi + +if are_paths_changed "${go_mod_changeset[@]}" ; then + GO_MOD_CHANGES="true" +fi + +if are_paths_changed "${packaging_changeset[@]}" ; then + PACKAGING_CHANGES="true" +fi diff --git a/.buildkite/scripts/crosscompile.sh b/.buildkite/scripts/crosscompile.sh new file mode 100755 index 000000000000..12f0f6574ca9 --- /dev/null +++ b/.buildkite/scripts/crosscompile.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +source .buildkite/scripts/install_tools.sh + +set -euo pipefail + +echo "--- Run Crosscompile for $BEATS_PROJECT_NAME" +make -C "${BEATS_PROJECT_NAME}" crosscompile diff --git a/.buildkite/scripts/generate_metricbeat_pipeline.sh b/.buildkite/scripts/generate_metricbeat_pipeline.sh new file mode 100755 index 000000000000..a15447ba4bf6 --- /dev/null +++ b/.buildkite/scripts/generate_metricbeat_pipeline.sh @@ -0,0 +1,178 @@ +#!/usr/bin/env bash + +source .buildkite/scripts/common.sh + +set -euo pipefail + +pipelineName="pipeline.metricbeat-dynamic.yml" + +cat > $pipelineName <<- YAML + +steps: + +YAML + +if are_conditions_met_mandatory_tests; then + cat >> $pipelineName <<- YAML + + - group: "Mandatory Tests" + key: "mandatory-tests" + steps: + - label: ":linux: Ubuntu Unit Tests" + key: "mandatory-linux-unit-test" + command: ".buildkite/scripts/unit_tests.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "c2-standard-16" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" + + - label: ":go: Go Intergration Tests" + key: "mandatory-int-test" + command: ".buildkite/scripts/go_int_tests.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "c2-standard-16" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" + + - label: ":python: Python Integration Tests" + key: "mandatory-python-int-test" + command: ".buildkite/scripts/py_int_tests.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "c2-standard-16" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" + + - label: ":negative_squared_cross_mark: Cross compile" + key: "mandatory-cross-compile" + command: ".buildkite/scripts/crosscompile.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "c2-standard-16" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" + + - label: ":windows: Windows 2016/2022 Unit Tests - {{matrix.image}}" + command: ".buildkite/scripts/win_unit_tests.ps1" + key: "mandatory-win-unit-tests" + agents: + provider: "gcp" + image: "{{matrix.image}}" + machine_type: "n2-standard-8" + disk_size: 100 + disk_type: "pd-ssd" + matrix: + setup: + image: + - "${IMAGE_WIN_2016}" + - "${IMAGE_WIN_2022}" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" + +YAML +fi + +if are_conditions_met_extended_tests && are_conditions_met_macos_tests; then + cat >> $pipelineName <<- YAML + + - group: "Extended Tests" + key: "extended-tests" + steps: + - label: ":mac: MacOS Unit Tests" + key: "extended-macos-unit-tests" + command: ".buildkite/scripts/unit_tests.sh" + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" + +YAML +fi + +if are_conditions_met_extended_windows_tests; then + cat >> $pipelineName <<- YAML + + - group: "Extended Windowds Tests" + key: "extended-win-tests" + steps: + - label: ":windows: Windows 2019 Unit Tests" + key: "extended-win-2019-unit-tests" + command: ".buildkite/scripts/win_unit_tests.ps1" + agents: + provider: "gcp" + image: "${IMAGE_WIN_2019}" + machine_type: "n2-standard-8" + disk_size: 100 + disk_type: "pd-ssd" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" + + # Temporary disabled https://github.com/elastic/beats/issues/37841 + # - label: ":windows: Windows 10 Unit Tests" + # key: "extended-win-10-unit-tests" + # command: ".buildkite/scripts/win_unit_tests.ps1" + # agents: + # provider: "gcp" + # image: "${IMAGE_WIN_10}" + # machine_type: "n2-standard-8" + # disk_size: 100 + # disk_type: "pd-ssd" + # artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" + + - label: ":windows: Windows 11 Unit Tests" + key: "extended-win-11-unit-tests" + command: ".buildkite/scripts/win_unit_tests.ps1" + agents: + provider: "gcp" + image: "${IMAGE_WIN_11}" + machine_type: "n2-standard-8" + disk_size: 100 + disk_type: "pd-ssd" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" + +YAML +fi + +if are_conditions_met_extended_windows_tests; then + cat >> $pipelineName <<- YAML + + - group: "Packaging" # TODO: check conditions for future the main pipeline migration: https://github.com/elastic/beats/pull/28589 + key: "packaging" + steps: + - label: ":linux: Packaging Linux" + key: "packaging-linux" + command: ".buildkite/scripts/packaging.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "c2-standard-16" + env: + PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" + + - label: ":linux: Packaging ARM" + key: "packaging-arm" + command: ".buildkite/scripts/packaging.sh" + agents: + provider: "aws" + imagePrefix: "${IMAGE_UBUNTU_ARM_64}" + instanceType: "t4g.xlarge" + env: + PLATFORMS: "linux/arm64" + PACKAGES: "docker" + + depends_on: + - step: "mandatory-tests" + allow_failure: false + - step: "extended-tests" + allow_failure: true + - step: "extended-win-tests" + allow_failure: true + +YAML +fi + +echo "--- Printing dynamic steps" #TODO: remove if the pipeline is public +cat $pipelineName + +echo "--- Loading dynamic steps" +buildkite-agent pipeline upload $pipelineName diff --git a/.buildkite/scripts/go_int_tests.sh b/.buildkite/scripts/go_int_tests.sh new file mode 100755 index 000000000000..b4c519f45126 --- /dev/null +++ b/.buildkite/scripts/go_int_tests.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +source .buildkite/scripts/install_tools.sh + +set -euo pipefail + +echo "--- Run Go Intergration Tests for $BEATS_PROJECT_NAME" +pushd "${BEATS_PROJECT_NAME}" > /dev/null + +mage goIntegTest + +popd > /dev/null diff --git a/.buildkite/scripts/install_tools.sh b/.buildkite/scripts/install_tools.sh new file mode 100644 index 000000000000..796892341d30 --- /dev/null +++ b/.buildkite/scripts/install_tools.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash + +source .buildkite/scripts/common.sh + +set -euo pipefail + +echo "--- Env preparation" + +# Temporary solution to fix the issues with "sudo apt get...." https://elastic.slack.com/archives/C0522G6FBNE/p1706003603442859?thread_ts=1706003209.424539&cid=C0522G6FBNE +# It could be removed when we use our own image for the BK agent. +if [ "${platform_type}" == "Linux" ]; then + DEBIAN_FRONTEND="noninteractive" + #sudo command doesn't work at the "pre-command" hook because of another user environment (root with strange permissions) + sudo mkdir -p /etc/needrestart + echo "\$nrconf{restart} = 'a';" | sudo tee -a /etc/needrestart/needrestart.conf > /dev/null +fi + +add_bin_path + +if command -v docker-compose &> /dev/null +then + echo "Found docker-compose. Checking version.." + FOUND_DOCKER_COMPOSE_VERSION=$(docker-compose --version | awk '{print $4}'|sed s/\,//) + if [ $FOUND_DOCKER_COMPOSE_VERSION == $DOCKER_COMPOSE_VERSION ]; then + echo "Versions match. No need to install docker-compose. Exiting." + elif [[ "${platform_type}" == "Linux" && "${arch_type}" == "aarch64" ]]; then + with_docker_compose "${DOCKER_COMPOSE_VERSION_AARCH64}" + elif [[ "${platform_type}" == "Linux" && "${arch_type}" == "x86_64" ]]; then + with_docker_compose "${DOCKER_COMPOSE_VERSION}" + fi +else + with_docker_compose "${DOCKER_COMPOSE_VERSION}" +fi + +with_go "${GO_VERSION}" +with_mage +with_python +with_dependencies + +#sudo command doesn't work at the "pre-command" hook because of another user environment (root with strange permissions) +sudo chmod -R go-w "${BEATS_PROJECT_NAME}/" #TODO: Remove when the issue is solved https://github.com/elastic/beats/issues/37838 + +pushd "${BEATS_PROJECT_NAME}" > /dev/null + +#TODO "umask 0022" has to be removed after our own image is ready (it has to be moved to the image) +umask 0022 # fix the filesystem permissions issue like this: https://buildkite.com/elastic/beats-metricbeat/builds/1329#018d3179-25a9-475b-a2c8-64329dfe092b/320-1696 + +popd > /dev/null diff --git a/.buildkite/scripts/packaging.sh b/.buildkite/scripts/packaging.sh new file mode 100755 index 000000000000..1539d3ab430c --- /dev/null +++ b/.buildkite/scripts/packaging.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +source .buildkite/scripts/install_tools.sh + +set -euo pipefail + +echo "--- Run Packaging for $BEATS_PROJECT_NAME" +pushd "${BEATS_PROJECT_NAME}" > /dev/null + +mage package + +popd > /dev/null diff --git a/.buildkite/scripts/py_int_tests.sh b/.buildkite/scripts/py_int_tests.sh new file mode 100755 index 000000000000..f43cc2021b5a --- /dev/null +++ b/.buildkite/scripts/py_int_tests.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +source .buildkite/scripts/install_tools.sh + +set -euo pipefail + +echo "--- Run Python Intergration Tests for $BEATS_PROJECT_NAME" +pushd "${BEATS_PROJECT_NAME}" > /dev/null + +mage pythonIntegTest + +popd > /dev/null diff --git a/.buildkite/scripts/setenv.sh b/.buildkite/scripts/setenv.sh new file mode 100755 index 000000000000..901ba9891c20 --- /dev/null +++ b/.buildkite/scripts/setenv.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +set -euo pipefail + +SETUP_GVM_VERSION="v0.5.1" +DOCKER_COMPOSE_VERSION="1.21.0" +DOCKER_COMPOSE_VERSION_AARCH64="v2.21.0" +SETUP_WIN_PYTHON_VERSION="3.11.0" +GO_VERSION=$(cat .go-version) + +export SETUP_GVM_VERSION +export DOCKER_COMPOSE_VERSION +export DOCKER_COMPOSE_VERSION_AARCH64 +export SETUP_WIN_PYTHON_VERSION +export GO_VERSION diff --git a/.buildkite/scripts/unit_tests.sh b/.buildkite/scripts/unit_tests.sh new file mode 100755 index 000000000000..059b4166e296 --- /dev/null +++ b/.buildkite/scripts/unit_tests.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +source .buildkite/scripts/install_tools.sh + +set -euo pipefail + +echo "--- Run Unit Tests" +pushd "${BEATS_PROJECT_NAME}" > /dev/null + +mage build unitTest + +popd > /dev/null diff --git a/.buildkite/scripts/win_unit_tests.ps1 b/.buildkite/scripts/win_unit_tests.ps1 new file mode 100644 index 000000000000..34833d183ffa --- /dev/null +++ b/.buildkite/scripts/win_unit_tests.ps1 @@ -0,0 +1,70 @@ +$ErrorActionPreference = "Stop" # set -e +$WorkFolder = "metricbeat" +# Forcing to checkout again all the files with a correct autocrlf. +# Doing this here because we cannot set git clone options before. +function fixCRLF { + Write-Host "-- Fixing CRLF in git checkout --" + git config core.autocrlf false + git rm --quiet --cached -r . + git reset --quiet --hard +} +function withChoco { + Write-Host "-- Configure Choco --" + $env:ChocolateyInstall = Convert-Path "$((Get-Command choco).Path)\..\.." + Import-Module "$env:ChocolateyInstall\helpers\chocolateyProfile.psm1" +} +function withGolang($version) { + Write-Host "-- Install golang $version --" + choco install -y golang --version=$version + refreshenv + go version +} +function withPython($version) { + Write-Host "-- Install Python $version --" + choco install python --version=$version + refreshenv + python --version +} +function withMinGW { + Write-Host "-- Install MinGW --" + choco install mingw -y + refreshenv +} +function installGoDependencies { + $installPackages = @( + "github.com/magefile/mage" + "github.com/elastic/go-licenser" + "golang.org/x/tools/cmd/goimports" + "github.com/jstemmer/go-junit-report/v2" + "gotest.tools/gotestsum" + ) + foreach ($pkg in $installPackages) { + go install "$pkg@latest" + } +} + +fixCRLF + +withChoco + +withGolang $env:GO_VERSION + +installGoDependencies + +withPython $env:SETUP_WIN_PYTHON_VERSION + +withMinGW + +$ErrorActionPreference = "Continue" # set +e + +Push-Location $WorkFolder + +New-Item -ItemType Directory -Force -Path "build" +mage build unitTest + +Pop-Location + +$EXITCODE=$LASTEXITCODE +$ErrorActionPreference = "Stop" + +Exit $EXITCODE From cc6e88110c8470a17fa9ab7673fd041df76873e6 Mon Sep 17 00:00:00 2001 From: Mattia Meleleo Date: Fri, 2 Feb 2024 19:12:33 +0100 Subject: [PATCH 108/129] Add auditbeat integTest manually triggered workflows (#37824) * Add auditbeat integTest manually triggered workflows * ci: add backlog_wait_time_actual in expected fields of test_show_auditd_status --------- Co-authored-by: Panos Koutsovasilis --- auditbeat/Jenkinsfile.yml | 18 ++++++++++++++++++ auditbeat/tests/system/test_show_command.py | 1 + 2 files changed, 19 insertions(+) diff --git a/auditbeat/Jenkinsfile.yml b/auditbeat/Jenkinsfile.yml index 4ea656f174ea..a68f7e1094a4 100644 --- a/auditbeat/Jenkinsfile.yml +++ b/auditbeat/Jenkinsfile.yml @@ -30,6 +30,24 @@ stages: unitTest: mage: "mage build unitTest" stage: mandatory + integTest: + mage: "mage build integTest" + when: + comments: + - "/test auditbeat integTest" + branches: false + tags: false + stage: extended + integTest-arm: + mage: "mage build integTest" + platforms: + - "ubuntu-2204-aarch64" + when: + comments: + - "/test auditbeat integTest arm" + branches: false + tags: false + stage: extended crosscompile: make: "make -C auditbeat crosscompile" stage: mandatory diff --git a/auditbeat/tests/system/test_show_command.py b/auditbeat/tests/system/test_show_command.py index 3aa15c0aec24..843ab7e829dc 100644 --- a/auditbeat/tests/system/test_show_command.py +++ b/auditbeat/tests/system/test_show_command.py @@ -98,6 +98,7 @@ def test_show_auditd_status(self): 'lost', 'backlog', 'backlog_wait_time', + 'backlog_wait_time_actual', 'features', ] From 456780387af4c68858d94abbf757d06693b28902 Mon Sep 17 00:00:00 2001 From: Michael Wolf Date: Fri, 2 Feb 2024 11:34:32 -0800 Subject: [PATCH 109/129] Add watcher for Kernel Events via ebpf (#37833) This adds a watcher that will watch for Linux kernel events, using ebpf via the ebpfevents library, and send the events to subscribed clients. By using a single global watcher, multiple clients can subscribe and receive kernel events, while avoiding increasing the amount of kernel resources used (e.g. avoiding having multiple ebpf probes/maps). --- CHANGELOG.next.asciidoc | 1 + NOTICE.txt | 98 ++++++++++++++++- dev-tools/notice/overrides.json | 1 + go.mod | 2 + go.sum | 13 ++- libbeat/ebpf/seccomp_linux.go | 40 +++++++ libbeat/ebpf/watcher_linux.go | 183 ++++++++++++++++++++++++++++++++ libbeat/ebpf/watcher_test.go | 61 +++++++++++ 8 files changed, 392 insertions(+), 7 deletions(-) create mode 100644 libbeat/ebpf/seccomp_linux.go create mode 100644 libbeat/ebpf/watcher_linux.go create mode 100644 libbeat/ebpf/watcher_test.go diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 8d09997f774d..41b467a0c363 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -188,6 +188,7 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d *Libbeat* +- Add watcher that can be used to monitor Linux kernel events. {pull}37833[37833] *Heartbeat* - Added status to monitor run log report. diff --git a/NOTICE.txt b/NOTICE.txt index 7e0e27d091c7..22de5a2356bb 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -12255,6 +12255,32 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/ebpfevents +Version: v0.3.2 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/ebpfevents@v0.3.2/LICENSE.txt: + +The https://github.com/elastic/ebpfevents repository contains source code under +various licenses: + +- Source code in the 'headers/bpf' directory, is dual-licensed under the GNU Lesser General + Public License version 2.1 (LICENSES/LGPL-2.1-only.txt) OR BSD-2-Clause license + (LICENSES/BSD-2-Clause.txt) + +- Source code in the 'ebpf' submodule is licensed with multiple licenses. Read more at + https://github.com/elastic/ebpf/blob/main/LICENSE.txt. + +- The binary files 'bpf_bpfel_x86.o' and 'bpf_bpfel_amd64.o' are compiled + from dual-licensed GPL-2.0-only OR BSD-2-Clause licensed code, and are distributed with + the GPL-2.0-only License (LICENSES/GPL-2.0-only.txt). + +- Source code not listed in the previous points is licensed under the Apache License, + version 2 (LICENSES/Apache-2.0.txt). + + -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-autodiscover Version: v0.6.7 @@ -36167,6 +36193,39 @@ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/cilium/ebpf +Version: v0.12.3 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/cilium/ebpf@v0.12.3/LICENSE: + +MIT License + +Copyright (c) 2017 Nathan Sweet +Copyright (c) 2018, 2019 Cloudflare +Copyright (c) 2019 Authors of Cilium + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : github.com/codegangsta/inject Version: v0.0.0-20150114235600-33e0aa1cb7c0 @@ -38172,11 +38231,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/frankban/quicktest -Version: v1.14.3 +Version: v1.14.5 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/frankban/quicktest@v1.14.3/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/frankban/quicktest@v1.14.5/LICENSE: MIT License @@ -38201,6 +38260,37 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/go-faker/faker/v4 +Version: v4.2.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/go-faker/faker/v4@v4.2.0/LICENSE: + +MIT License + +Copyright (c) 2017 Iman Tumorang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : github.com/go-logfmt/logfmt Version: v0.5.1 @@ -45643,11 +45733,11 @@ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/kr/pretty -Version: v0.3.0 +Version: v0.3.1 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/kr/pretty@v0.3.0/License: +Contents of probable licence file $GOMODCACHE/github.com/kr/pretty@v0.3.1/License: Copyright 2012 Keith Rarick diff --git a/dev-tools/notice/overrides.json b/dev-tools/notice/overrides.json index 1484fcde52a0..eee18acc0de5 100644 --- a/dev-tools/notice/overrides.json +++ b/dev-tools/notice/overrides.json @@ -17,3 +17,4 @@ {"name": "github.com/awslabs/kinesis-aggregation/go/v2", "licenceType": "Apache-2.0", "url": "https://github.com/awslabs/kinesis-aggregation/blob/master/LICENSE.txt"} {"name": "github.com/dnaeon/go-vcr", "licenceType": "BSD-2-Clause"} {"name": "github.com/JohnCGriffin/overflow", "licenceType": "MIT"} +{"name": "github.com/elastic/ebpfevents", "licenceType": "Apache-2.0"} diff --git a/go.mod b/go.mod index e39b37f445eb..0556e2b432e6 100644 --- a/go.mod +++ b/go.mod @@ -200,6 +200,7 @@ require ( github.com/aws/smithy-go v1.13.5 github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20220623125934-28468a6701b5 github.com/elastic/bayeux v1.0.5 + github.com/elastic/ebpfevents v0.3.2 github.com/elastic/elastic-agent-autodiscover v0.6.7 github.com/elastic/elastic-agent-libs v0.7.5 github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3 @@ -265,6 +266,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect + github.com/cilium/ebpf v0.12.3 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect diff --git a/go.sum b/go.sum index e11edad3d976..0f1a1756d752 100644 --- a/go.sum +++ b/go.sum @@ -430,6 +430,8 @@ github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLI github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.12.3 h1:8ht6F9MquybnY97at+VDZb3eQQr8ev79RueWeVaEcG4= +github.com/cilium/ebpf v0.12.3/go.mod h1:TctK1ivibvI3znr66ljgi4hqOT8EYQjz1KWBfb1UVgM= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= @@ -657,6 +659,8 @@ github.com/elastic/bayeux v1.0.5 h1:UceFq01ipmT3S8DzFK+uVAkbCdiPR0Bqei8qIGmUeY0= github.com/elastic/bayeux v1.0.5/go.mod h1:CSI4iP7qeo5MMlkznGvYKftp8M7qqP/3nzmVZoXHY68= github.com/elastic/dhcp v0.0.0-20200227161230-57ec251c7eb3 h1:lnDkqiRFKm0rxdljqrj3lotWinO9+jFmeDXIC4gvIQs= github.com/elastic/dhcp v0.0.0-20200227161230-57ec251c7eb3/go.mod h1:aPqzac6AYkipvp4hufTyMj5PDIphF3+At8zr7r51xjY= +github.com/elastic/ebpfevents v0.3.2 h1:UJ8kW5jw2TpUR5MEMaZ1O62sK9JQ+5xTlj+YpQC6BXc= +github.com/elastic/ebpfevents v0.3.2/go.mod h1:o21z5xup/9dK8u0Hg9bZRflSqqj1Zu5h2dg2hSTcUPQ= github.com/elastic/elastic-agent-autodiscover v0.6.7 h1:+KVjltN0rPsBrU8b156gV4lOTBgG/vt0efFCFARrf3g= github.com/elastic/elastic-agent-autodiscover v0.6.7/go.mod h1:hFeFqneS2r4jD0/QzGkrNk0YVdN0JGh7lCWdsH7zcI4= github.com/elastic/elastic-agent-client/v7 v7.6.0 h1:FEn6FjzynW4TIQo5G096Tr7xYK/P5LY9cSS6wRbXZTc= @@ -754,8 +758,8 @@ github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15/go.mod h1:tPg4cp github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= -github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= +github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/gabriel-vasile/mimetype v1.4.1/go.mod h1:05Vi0w3Y9c/lNvJOdmIwvrrAhX3rYhfQQCaf9VJcv7M= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= @@ -767,6 +771,8 @@ github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0 github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-chi/chi v4.1.0+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= +github.com/go-faker/faker/v4 v4.2.0 h1:dGebOupKwssrODV51E0zbMrv5e2gO9VWSLNC1WDCpWg= +github.com/go-faker/faker/v4 v4.2.0/go.mod h1:F/bBy8GH9NxOxMInug5Gx4WYeG6fHJZ8Ol/dhcpRub4= github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= @@ -1340,8 +1346,9 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= diff --git a/libbeat/ebpf/seccomp_linux.go b/libbeat/ebpf/seccomp_linux.go new file mode 100644 index 000000000000..9059eb0f6433 --- /dev/null +++ b/libbeat/ebpf/seccomp_linux.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux + +package ebpf + +import ( + "runtime" + + "github.com/elastic/beats/v7/libbeat/common/seccomp" +) + +func init() { + switch runtime.GOARCH { + case "amd64": + syscalls := []string{ + "bpf", + "eventfd2", // needed by ringbuf + "perf_event_open", // needed by tracepoints + } + if err := seccomp.ModifyDefaultPolicy(seccomp.AddSyscall, syscalls...); err != nil { + panic(err) + } + } +} diff --git a/libbeat/ebpf/watcher_linux.go b/libbeat/ebpf/watcher_linux.go new file mode 100644 index 000000000000..e0da448d87a6 --- /dev/null +++ b/libbeat/ebpf/watcher_linux.go @@ -0,0 +1,183 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux + +package ebpf + +import ( + "context" + "fmt" + "sync" + + "github.com/elastic/ebpfevents" +) + +var ( + gWatcherOnce sync.Once + gWatcher Watcher +) + +type client struct { + name string + mask EventMask + records chan ebpfevents.Record +} + +// EventMask is a mask of ebpfevents.EventType which is used to control which event types clients will receive. +type EventMask uint64 + +// Watcher observes kernel events, using ebpf probes from the ebpfevents library, and sends the +// events to subscribing clients. +// +// A single global watcher can exist, and can deliver events to multiple clients. Clients subscribe +// to the watcher, and all ebpf events that match their mask will be sent to their channel. +type Watcher struct { + sync.Mutex + cancel context.CancelFunc + loader *ebpfevents.Loader + clients map[string]client + status status + err error +} + +type status int + +const ( + stopped status = iota + started +) + +// GetWatcher creates the watcher, if required, and returns a reference to the global Watcher. +func GetWatcher() (*Watcher, error) { + gWatcher.Lock() + defer gWatcher.Unlock() + + // Try to load the probe once on startup so consumers can error out. + gWatcherOnce.Do(func() { + if gWatcher.status == stopped { + l, err := ebpfevents.NewLoader() + if err != nil { + gWatcher.err = fmt.Errorf("init ebpf loader: %w", err) + return + } + _ = l.Close() + } + }) + + return &gWatcher, gWatcher.err +} + +// Subscribe to receive events from the watcher. +func (w *Watcher) Subscribe(clientName string, events EventMask) <-chan ebpfevents.Record { + w.Lock() + defer w.Unlock() + + if w.status == stopped { + w.startLocked() + } + + w.clients[clientName] = client{ + name: clientName, + mask: events, + records: make(chan ebpfevents.Record, w.loader.BufferLen()), + } + + return w.clients[clientName].records +} + +// Unsubscribe the client with the given name. +func (w *Watcher) Unsubscribe(clientName string) { + w.Lock() + defer w.Unlock() + + delete(w.clients, clientName) + + if w.nclients() == 0 { + w.stopLocked() + } +} + +func (w *Watcher) startLocked() { + if w.status == started { + return + } + + loader, err := ebpfevents.NewLoader() + if err != nil { + w.err = fmt.Errorf("start ebpf loader: %w", err) + return + } + + w.loader = loader + w.clients = make(map[string]client) + + records := make(chan ebpfevents.Record, loader.BufferLen()) + var ctx context.Context + ctx, w.cancel = context.WithCancel(context.Background()) + + go w.loader.EventLoop(ctx, records) + go func(ctx context.Context) { + for { + select { + case record := <-records: + if record.Error != nil { + for _, client := range w.clients { + client.records <- record + } + continue + } + for _, client := range w.clients { + if client.mask&EventMask(record.Event.Type) != 0 { + client.records <- record + } + } + continue + case <-ctx.Done(): + return + } + } + }(ctx) + + w.status = started +} + +func (w *Watcher) stopLocked() { + if w.status == stopped { + return + } + w.close() + w.status = stopped +} + +func (w *Watcher) nclients() int { + return len(w.clients) +} + +func (w *Watcher) close() { + if w.cancel != nil { + w.cancel() + } + + if w.loader != nil { + _ = w.loader.Close() + } + + for _, cl := range w.clients { + close(cl.records) + } +} diff --git a/libbeat/ebpf/watcher_test.go b/libbeat/ebpf/watcher_test.go new file mode 100644 index 000000000000..13d27ffd52c0 --- /dev/null +++ b/libbeat/ebpf/watcher_test.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux + +package ebpf + +import ( + "math" + "testing" + + "github.com/stretchr/testify/assert" +) + +const allEvents = EventMask(math.MaxUint64) + +func TestWatcherStartStop(t *testing.T) { + w, err := GetWatcher() + if err != nil { + t.Skipf("skipping ebpf watcher test: %v", err) + } + assert.Equal(t, gWatcher.status, stopped) + assert.Equal(t, 0, gWatcher.nclients()) + + _ = w.Subscribe("test-1", allEvents) + assert.Equal(t, gWatcher.status, started) + assert.Equal(t, 1, gWatcher.nclients()) + + _ = w.Subscribe("test-2", allEvents) + assert.Equal(t, 2, gWatcher.nclients()) + + w.Unsubscribe("test-2") + assert.Equal(t, 1, gWatcher.nclients()) + + w.Unsubscribe("dummy") + assert.Equal(t, 1, gWatcher.nclients()) + + assert.Equal(t, gWatcher.status, started) + w.Unsubscribe("test-1") + assert.Equal(t, 0, gWatcher.nclients()) + assert.Equal(t, gWatcher.status, stopped) + + _ = w.Subscribe("new", allEvents) + assert.Equal(t, 1, gWatcher.nclients()) + assert.Equal(t, gWatcher.status, started) + w.Unsubscribe("new") +} From e4b448f0d135cda0b2f37dc4e81847777a328a71 Mon Sep 17 00:00:00 2001 From: Shaunak Kashyap Date: Fri, 2 Feb 2024 13:04:38 -0800 Subject: [PATCH 110/129] Pass context with timeout to FQDN lookup (#37756) * Pass context with timeout to FQDN lookup * Add temporary gomod replace * Add CHANGELOG entry * Update dependency * Update method calls * Add nolint for rand deprecation warning * Make linter happy --- CHANGELOG.next.asciidoc | 1 + NOTICE.txt | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- libbeat/cmd/instance/beat.go | 7 +++++-- .../processors/add_host_metadata/add_host_metadata.go | 10 +++++++--- 6 files changed, 18 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 41b467a0c363..6f66f76ef8f3 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -62,6 +62,7 @@ you can achieve this by overwriting the value using an `add_fields` processor. { - Upgrade elastic-agent-libs to v0.7.5. Removes obsolete "Treating the CommonName field on X.509 certificates as a host name..." deprecation warning for 8.0. {pull}37755[37755] - aws: Add credential caching for `AssumeRole` session tokens. {issue}37787[37787] - Lower logging level to debug when attempting to configure beats with unknown fields from autodiscovered events/environments {pull}[37816][37816] +- Set timeout of 1 minute for FQDN requests {pull}37756[37756] *Auditbeat* diff --git a/NOTICE.txt b/NOTICE.txt index 22de5a2356bb..eea974cedd13 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -14981,11 +14981,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/go-structform@v -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-sysinfo -Version: v1.11.2 +Version: v1.12.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-sysinfo@v1.11.2/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-sysinfo@v1.12.0/LICENSE.txt: Apache License diff --git a/go.mod b/go.mod index 0556e2b432e6..1c29491fbba4 100644 --- a/go.mod +++ b/go.mod @@ -78,7 +78,7 @@ require ( github.com/elastic/go-perf v0.0.0-20191212140718-9c656876f595 github.com/elastic/go-seccomp-bpf v1.4.0 github.com/elastic/go-structform v0.0.10 - github.com/elastic/go-sysinfo v1.11.2 + github.com/elastic/go-sysinfo v1.12.0 github.com/elastic/go-ucfg v0.8.6 github.com/elastic/gosigar v0.14.2 github.com/fatih/color v1.15.0 diff --git a/go.sum b/go.sum index 0f1a1756d752..039364a70a4d 100644 --- a/go.sum +++ b/go.sum @@ -696,8 +696,8 @@ github.com/elastic/go-seccomp-bpf v1.4.0 h1:6y3lYrEHrLH9QzUgOiK8WDqmPaMnnB785Wxi github.com/elastic/go-seccomp-bpf v1.4.0/go.mod h1:wIMxjTbKpWGQk4CV9WltlG6haB4brjSH/dvAohBPM1I= github.com/elastic/go-structform v0.0.10 h1:oy08o/Ih2hHTkNcRY/1HhaYvIp5z6t8si8gnCJPDo1w= github.com/elastic/go-structform v0.0.10/go.mod h1:CZWf9aIRYY5SuKSmOhtXScE5uQiLZNqAFnwKR4OrIM4= -github.com/elastic/go-sysinfo v1.11.2 h1:mcm4OSYVMyws6+n2HIVMGkln5HOpo5Ie1ZmbbNn0jg4= -github.com/elastic/go-sysinfo v1.11.2/go.mod h1:GKqR8bbMK/1ITnez9NIsIfXQr25aLhRJa7AfT8HpBFQ= +github.com/elastic/go-sysinfo v1.12.0 h1:ZKyB4N5XLnGFysNGNnJl8xvd+GBGCe2MemBykR+3yQI= +github.com/elastic/go-sysinfo v1.12.0/go.mod h1:GKqR8bbMK/1ITnez9NIsIfXQr25aLhRJa7AfT8HpBFQ= github.com/elastic/go-ucfg v0.8.6 h1:stUeyh2goTgGX+/wb9gzKvTv0YB0231LTpKUgCKj4U0= github.com/elastic/go-ucfg v0.8.6/go.mod h1:4E8mPOLSUV9hQ7sgLEJ4bvt0KhMuDJa8joDT2QGAEKA= github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index efe8bd48f79a..7a70b1a55baa 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -197,7 +197,7 @@ func initRand() { } else { seed = n.Int64() } - rand.Seed(seed) + rand.Seed(seed) //nolint:staticcheck // need seed from cryptographically strong PRNG. } // Run initializes and runs a Beater implementation. name is the name of the @@ -824,7 +824,10 @@ func (b *Beat) configure(settings Settings) error { return fmt.Errorf("failed to get host information: %w", err) } - fqdn, err := h.FQDN() + fqdnLookupCtx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + fqdn, err := h.FQDNWithContext(fqdnLookupCtx) if err != nil { // FQDN lookup is "best effort". We log the error, fallback to // the OS-reported hostname, and move on. diff --git a/libbeat/processors/add_host_metadata/add_host_metadata.go b/libbeat/processors/add_host_metadata/add_host_metadata.go index db3cbbc5ee30..5fe28194b555 100644 --- a/libbeat/processors/add_host_metadata/add_host_metadata.go +++ b/libbeat/processors/add_host_metadata/add_host_metadata.go @@ -18,6 +18,7 @@ package add_host_metadata import ( + "context" "fmt" "sync" "time" @@ -25,6 +26,7 @@ import ( "github.com/gofrs/uuid" "github.com/elastic/elastic-agent-libs/monitoring" + "github.com/elastic/go-sysinfo" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/features" @@ -35,7 +37,6 @@ import ( "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" "github.com/elastic/elastic-agent-system-metrics/metric/system/host" - "github.com/elastic/go-sysinfo" ) const processorName = "add_host_metadata" @@ -96,7 +97,7 @@ func New(cfg *config.C) (beat.Processor, error) { } // create a unique ID for this instance of the processor - cbIDStr := "" + var cbIDStr string cbID, err := uuid.NewV4() // if we fail, fall back to the processor name, hope for the best. if err != nil { @@ -178,7 +179,10 @@ func (p *addHostMetadata) loadData(checkCache bool, useFQDN bool) error { hostname := h.Info().Hostname if useFQDN { - fqdn, err := h.FQDN() + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + fqdn, err := h.FQDNWithContext(ctx) if err != nil { // FQDN lookup is "best effort". If it fails, we monitor the failure, fallback to // the OS-reported hostname, and move on. From 9e4329635319a7f8d7cbcb76efb56c9bf987890e Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 2 Feb 2024 17:03:12 -0500 Subject: [PATCH 111/129] chore: Update snapshot.yml (#37843) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Made with ❤️️ by updatecli Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 977cfdd021fb..ee1b8a9c56c1 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-por0bbe1-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-c876301f-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.13.0-por0bbe1-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.13.0-c876301f-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.13.0-por0bbe1-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.13.0-c876301f-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 6d4f1e6e2e428add752511113dd023c9d7ec13c5 Mon Sep 17 00:00:00 2001 From: Dan Kortschak <90160302+efd6@users.noreply.github.com> Date: Sat, 3 Feb 2024 11:47:13 +1030 Subject: [PATCH 112/129] x-pack/filebeat/input/{cel,httpjson,http_endpoint}: prevent complete loss of long request trace data (#37836) The lumberjack logger drops lines that are longer than the max size, so truncate bodies that are near the limit to ensure that at least some logging data is retained. Also truncate requests that are too long, including in http_endpoint. --- CHANGELOG.next.asciidoc | 1 + x-pack/filebeat/input/cel/input.go | 4 +++- .../filebeat/input/http_endpoint/handler.go | 4 +++- x-pack/filebeat/input/httpjson/input.go | 7 ++++++- .../input/internal/httplog/roundtripper.go | 21 ++++++++++++------- 5 files changed, 27 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 6f66f76ef8f3..40d189797b32 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -184,6 +184,7 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d - Add request trace logging for chained API requests. {issue}37551[36551] {pull}37682[37682] - Relax TCP/UDP metric polling expectations to improve metric collection. {pull}37714[37714] - Add support for PEM-based Okta auth in HTTPJSON. {pull}37772[37772] +- Prevent complete loss of long request trace data. {issue}37826[37826] {pull}37836[37836] *Auditbeat* diff --git a/x-pack/filebeat/input/cel/input.go b/x-pack/filebeat/input/cel/input.go index 420c61a1e645..12dd4c4dcecf 100644 --- a/x-pack/filebeat/input/cel/input.go +++ b/x-pack/filebeat/input/cel/input.go @@ -723,7 +723,9 @@ func newClient(ctx context.Context, cfg config, log *logp.Logger) (*http.Client, ) traceLogger := zap.New(core) - trace = httplog.NewLoggingRoundTripper(c.Transport, traceLogger) + const margin = 1e3 // 1OkB ought to be enough room for all the remainder of the trace details. + maxSize := cfg.Resource.Tracer.MaxSize * 1e6 + trace = httplog.NewLoggingRoundTripper(c.Transport, traceLogger, max(0, maxSize-margin)) c.Transport = trace } diff --git a/x-pack/filebeat/input/http_endpoint/handler.go b/x-pack/filebeat/input/http_endpoint/handler.go index 75e34c0928e1..0e2620b5b658 100644 --- a/x-pack/filebeat/input/http_endpoint/handler.go +++ b/x-pack/filebeat/input/http_endpoint/handler.go @@ -177,7 +177,9 @@ func (h *handler) logRequest(r *http.Request, status int, respBody []byte) { zap.ByteString("http.response.body.content", respBody), ) } - httplog.LogRequest(h.reqLogger, r, extra...) + // Limit request logging body size to 10kiB. + const maxBodyLen = 10 * (1 << 10) + httplog.LogRequest(h.reqLogger, r, maxBodyLen, extra...) if scheme != "" { r.URL.Scheme = scheme } diff --git a/x-pack/filebeat/input/httpjson/input.go b/x-pack/filebeat/input/httpjson/input.go index 17877b607013..50a4f7f20a61 100644 --- a/x-pack/filebeat/input/httpjson/input.go +++ b/x-pack/filebeat/input/httpjson/input.go @@ -253,7 +253,12 @@ func newNetHTTPClient(ctx context.Context, cfg *requestConfig, log *logp.Logger, ) traceLogger := zap.New(core) - netHTTPClient.Transport = httplog.NewLoggingRoundTripper(netHTTPClient.Transport, traceLogger) + const margin = 1e3 // 1OkB ought to be enough room for all the remainder of the trace details. + maxSize := cfg.Tracer.MaxSize*1e6 - margin + if maxSize < 0 { + maxSize = 0 + } + netHTTPClient.Transport = httplog.NewLoggingRoundTripper(netHTTPClient.Transport, traceLogger, maxSize) } if reg != nil { diff --git a/x-pack/filebeat/input/internal/httplog/roundtripper.go b/x-pack/filebeat/input/internal/httplog/roundtripper.go index 4f0eb9eb670a..eac54d7378f5 100644 --- a/x-pack/filebeat/input/internal/httplog/roundtripper.go +++ b/x-pack/filebeat/input/internal/httplog/roundtripper.go @@ -32,9 +32,10 @@ type contextKey string // NewLoggingRoundTripper returns a LoggingRoundTripper that logs requests and // responses to the provided logger. -func NewLoggingRoundTripper(next http.RoundTripper, logger *zap.Logger) *LoggingRoundTripper { +func NewLoggingRoundTripper(next http.RoundTripper, logger *zap.Logger, maxBodyLen int) *LoggingRoundTripper { return &LoggingRoundTripper{ transport: next, + maxBodyLen: maxBodyLen, logger: logger, txBaseID: newID(), txIDCounter: atomic.NewUint64(0), @@ -44,6 +45,7 @@ func NewLoggingRoundTripper(next http.RoundTripper, logger *zap.Logger) *Logging // LoggingRoundTripper is an http.RoundTripper that logs requests and responses. type LoggingRoundTripper struct { transport http.RoundTripper + maxBodyLen int // The maximum length of a body. Longer bodies will be truncated. logger *zap.Logger // Destination logger. txBaseID string // Random value to make transaction IDs unique. txIDCounter *atomic.Uint64 // Transaction ID counter that is incremented for each request. @@ -63,6 +65,7 @@ type LoggingRoundTripper struct { // http.request // user_agent.original // http.request.body.content +// http.request.body.truncated // http.request.body.bytes // http.request.mime_type // event.original (the request without body from httputil.DumpRequestOut) @@ -71,6 +74,7 @@ type LoggingRoundTripper struct { // // http.response.status_code // http.response.body.content +// http.response.body.truncated // http.response.body.bytes // http.response.mime_type // event.original (the response without body from httputil.DumpResponse) @@ -86,7 +90,7 @@ func (rt *LoggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, err } } - req, respParts, errorsMessages := logRequest(log, req) + req, respParts, errorsMessages := logRequest(log, req, rt.maxBodyLen) resp, err := rt.transport.RoundTrip(req) if err != nil { @@ -107,7 +111,8 @@ func (rt *LoggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, err errorsMessages = append(errorsMessages, fmt.Sprintf("failed to read response body: %s", err)) } else { respParts = append(respParts, - zap.ByteString("http.response.body.content", body), + zap.ByteString("http.response.body.content", body[:min(len(body), rt.maxBodyLen)]), + zap.Bool("http.response.body.truncated", rt.maxBodyLen < len(body)), zap.Int("http.response.body.bytes", len(body)), zap.String("http.response.mime_type", resp.Header.Get("Content-Type")), ) @@ -143,17 +148,18 @@ func (rt *LoggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, err // http.request // user_agent.original // http.request.body.content +// http.request.body.truncated // http.request.body.bytes // http.request.mime_type // event.original (the request without body from httputil.DumpRequestOut) // // Additional fields in extra will also be logged. -func LogRequest(log *zap.Logger, req *http.Request, extra ...zapcore.Field) *http.Request { - req, _, _ = logRequest(log, req, extra...) +func LogRequest(log *zap.Logger, req *http.Request, maxBodyLen int, extra ...zapcore.Field) *http.Request { + req, _, _ = logRequest(log, req, maxBodyLen, extra...) return req } -func logRequest(log *zap.Logger, req *http.Request, extra ...zapcore.Field) (_ *http.Request, parts []zapcore.Field, errorsMessages []string) { +func logRequest(log *zap.Logger, req *http.Request, maxBodyLen int, extra ...zapcore.Field) (_ *http.Request, parts []zapcore.Field, errorsMessages []string) { reqParts := append([]zapcore.Field{ zap.String("url.original", req.URL.String()), zap.String("url.scheme", req.URL.Scheme), @@ -174,7 +180,8 @@ func logRequest(log *zap.Logger, req *http.Request, extra ...zapcore.Field) (_ * errorsMessages = append(errorsMessages, fmt.Sprintf("failed to read request body: %s", err)) } else { reqParts = append(reqParts, - zap.ByteString("http.request.body.content", body), + zap.ByteString("http.request.body.content", body[:min(len(body), maxBodyLen)]), + zap.Bool("http.request.body.truncated", maxBodyLen < len(body)), zap.Int("http.request.body.bytes", len(body)), zap.String("http.request.mime_type", req.Header.Get("Content-Type")), ) From 84502d287440057585f07f8ebde2609aa6d99351 Mon Sep 17 00:00:00 2001 From: Anderson Queiroz Date: Mon, 5 Feb 2024 14:56:28 +0100 Subject: [PATCH 113/129] Receive and use package.version from Elastic Agent (#37553) In managed mode (running under Agent), Beats now receive agent information alongside connection details. This includes the agent's package version, which Beats will report instead of their own. That means the version added to event will be the agent's package version. The beats test framework (libbeat/tests/integration/framework.go) now utilizes exec.Cmd instead of os.StartProcess to initiate the test beat. Furthermore, the StdinPipe is now exposed instead of binding os.Stdin to the process stdin. A new utility, `testing/certutil/certutil`, has been created to provide root CA and child certificates for use in tests. --- CHANGELOG.next.asciidoc | 3 + NOTICE.txt | 8 +- docs/devguide/testing.asciidoc | 10 +- go.mod | 4 +- go.sum | 8 +- libbeat/cmd/instance/beat.go | 21 +- libbeat/management/management.go | 6 +- .../tests/integration/cmd_keystore_test.go | 28 +- libbeat/tests/integration/framework.go | 30 +- libbeat/tests/integration/mockserver.go | 8 +- libbeat/version/helper.go | 38 ++- testing/certutil/certutil.go | 186 ++++++++++++ .../tests/integration/managerV2_test.go | 281 ++++++++++++++++++ x-pack/libbeat/management/managerV2.go | 39 +-- x-pack/libbeat/management/managerV2_test.go | 3 +- .../libbeat/management/tests/mock_server.go | 7 +- 16 files changed, 613 insertions(+), 67 deletions(-) create mode 100644 testing/certutil/certutil.go diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 40d189797b32..5389ca6551b4 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -15,6 +15,9 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] platform, and when viewed from a metadata API standpoint, it is impossible to differentiate it from OpenStack. If you know that your deployments run on Huawei Cloud exclusively, and you wish to have `cloud.provider` value as `huawei`, you can achieve this by overwriting the value using an `add_fields` processor. {pull}35184[35184] + - In managed mode, Beats running under Elastic Agent will report the package +version of Elastic Agent as their own version. This includes all additional +fields added to events containing the Beats version. {pull}37553[37553] *Auditbeat* diff --git a/NOTICE.txt b/NOTICE.txt index eea974cedd13..573e544bb2e8 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -12494,11 +12494,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-a -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-client/v7 -Version: v7.6.0 +Version: v7.8.0 Licence type (autodetected): Elastic -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-client/v7@v7.6.0/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-client/v7@v7.8.0/LICENSE.txt: ELASTIC LICENSE AGREEMENT @@ -25546,11 +25546,11 @@ Contents of probable licence file $GOMODCACHE/google.golang.org/grpc@v1.58.3/LIC -------------------------------------------------------------------------------- Dependency : google.golang.org/protobuf -Version: v1.31.0 +Version: v1.32.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/protobuf@v1.31.0/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/protobuf@v1.32.0/LICENSE: Copyright (c) 2018 The Go Authors. All rights reserved. diff --git a/docs/devguide/testing.asciidoc b/docs/devguide/testing.asciidoc index 49d2366c920a..9488fe47dcee 100644 --- a/docs/devguide/testing.asciidoc +++ b/docs/devguide/testing.asciidoc @@ -50,11 +50,11 @@ In Metricbeat, run the command from within a module like this: `go test --tags i A note about tags: the `--data` flag is a custom flag added by Metricbeat and Packetbeat frameworks. It will not be present in case tags do not match, as the relevant code will not be run and silently skipped (without the tag the test file is ignored by Go compiler so the framework doesn't load). This may happen if there are different tags in the build tags of the metricset under test (i.e. the GCP billing metricset requires the `billing` tag too). -==== Running Python Tests +==== Running System (integration) Tests (Python and Go) -Python system tests are defined in the `tests/system` directory. They require a testing binary to be available and the python environment to be set up. +The system tests are defined in the `tests/system` (for legacy Python test) and on `tests/integration` (for Go tests) directory. They require a testing binary to be available and the python environment to be set up. -To create the testing binary run `mage buildSystemTestBinary`. This will create the test binary in the beat directory. To setup the testing environment run `mage pythonVirtualEnv` which will create a virtual environment with all test dependencies and print its location. To activate it, the instructions depend on your operating system. See the https://packaging.python.org/en/latest/guides/installing-using-pip-and-virtual-environments/#activating-a-virtual-environment[virtualenv documentation]. +To create the testing binary run `mage buildSystemTestBinary`. This will create the test binary in the beat directory. To set up the Python testing environment run `mage pythonVirtualEnv` which will create a virtual environment with all test dependencies and print its location. To activate it, the instructions depend on your operating system. See the https://packaging.python.org/en/latest/guides/installing-using-pip-and-virtual-environments/#activating-a-virtual-environment[virtualenv documentation]. To run the system and integration tests use the `mage pythonIntegTest` target, which will start the required services using https://docs.docker.com/compose/[docker-compose] and run all integration tests. Similar to Go integration tests, the individual steps can be done manually to allow selecting which tests should be run: @@ -62,12 +62,16 @@ To run the system and integration tests use the `mage pythonIntegTest` target, w ---- # Create and activate the system test virtual environment (assumes a Unix system). source $(mage pythonVirtualEnv)/bin/activate + # Pull and build the containers. Only needs to be done once unless you change the containers. mage docker:composeBuild + # Bring up all containers, wait until they are healthy, and put them in the background. mage docker:composeUp + # Run all system and integration tests. INTEGRATION_TESTS=1 pytest ./tests/system + # Stop all started containers. mage docker:composeDown ---- diff --git a/go.mod b/go.mod index 1c29491fbba4..ee391fb43d20 100644 --- a/go.mod +++ b/go.mod @@ -69,7 +69,7 @@ require ( github.com/dustin/go-humanize v1.0.1 github.com/eapache/go-resiliency v1.2.0 github.com/eclipse/paho.mqtt.golang v1.3.5 - github.com/elastic/elastic-agent-client/v7 v7.6.0 + github.com/elastic/elastic-agent-client/v7 v7.8.0 github.com/elastic/go-concert v0.2.0 github.com/elastic/go-libaudit/v2 v2.5.0 github.com/elastic/go-licenser v0.4.1 @@ -164,7 +164,7 @@ require ( google.golang.org/api v0.128.0 google.golang.org/genproto v0.0.0-20230920204549-e6e6cdab5c13 // indirect google.golang.org/grpc v1.58.3 - google.golang.org/protobuf v1.31.0 + google.golang.org/protobuf v1.32.0 gopkg.in/inf.v0 v0.9.1 gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect diff --git a/go.sum b/go.sum index 039364a70a4d..52051e7f4cf6 100644 --- a/go.sum +++ b/go.sum @@ -663,8 +663,8 @@ github.com/elastic/ebpfevents v0.3.2 h1:UJ8kW5jw2TpUR5MEMaZ1O62sK9JQ+5xTlj+YpQC6 github.com/elastic/ebpfevents v0.3.2/go.mod h1:o21z5xup/9dK8u0Hg9bZRflSqqj1Zu5h2dg2hSTcUPQ= github.com/elastic/elastic-agent-autodiscover v0.6.7 h1:+KVjltN0rPsBrU8b156gV4lOTBgG/vt0efFCFARrf3g= github.com/elastic/elastic-agent-autodiscover v0.6.7/go.mod h1:hFeFqneS2r4jD0/QzGkrNk0YVdN0JGh7lCWdsH7zcI4= -github.com/elastic/elastic-agent-client/v7 v7.6.0 h1:FEn6FjzynW4TIQo5G096Tr7xYK/P5LY9cSS6wRbXZTc= -github.com/elastic/elastic-agent-client/v7 v7.6.0/go.mod h1:GlUKrbVd/O1CRAZonpBeN3J0RlVqP6VGcrBjFWca+aM= +github.com/elastic/elastic-agent-client/v7 v7.8.0 h1:GHFzDJIWpdgI0qDk5EcqbQJGvwTsl2E2vQK3/xe+MYQ= +github.com/elastic/elastic-agent-client/v7 v7.8.0/go.mod h1:ihtjqJzYiIltlRhNruaSSc0ogxIhqPD5hOMKq16cI1s= github.com/elastic/elastic-agent-libs v0.7.5 h1:4UMqB3BREvhwecYTs/L23oQp1hs/XUkcunPlmTZn5yg= github.com/elastic/elastic-agent-libs v0.7.5/go.mod h1:pGMj5myawdqu+xE+WKvM5FQzKQ/MonikkWOzoFTJxaU= github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3 h1:sb+25XJn/JcC9/VL8HX4r4QXSUq4uTNzGS2kxOE7u1U= @@ -2656,8 +2656,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index 7a70b1a55baa..4b7470b1dbd5 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -838,10 +838,25 @@ func (b *Beat) configure(settings Settings) error { } // initialize config manager - b.Manager, err = management.NewManager(b.Config.Management, reload.RegisterV2) + m, err := management.NewManager(b.Config.Management, reload.RegisterV2) if err != nil { return err } + b.Manager = m + + if b.Manager.AgentInfo().Version != "" { + // During the manager initialization the client to connect to the agent is + // also initialized. That makes the beat to read information sent by the + // agent, which includes the AgentInfo with the agent's package version. + // Components running under agent should report the agent's package version + // as their own version. + // In order to do so b.Info.Version needs to be set to the version the agent + // sent. As this Beat instance is initialized much before the package + // version is received, it's overridden here. So far it's early enough for + // the whole beat to report the right version. + b.Info.Version = b.Manager.AgentInfo().Version + version.SetPackageVersion(b.Info.Version) + } if err := b.Manager.CheckRawConfig(b.RawConfig); err != nil { return err @@ -1521,13 +1536,13 @@ func (bc *beatConfig) Validate() error { if bc.Pipeline.Queue.IsSet() && outputPC.Queue.IsSet() { return fmt.Errorf("top level queue and output level queue settings defined, only one is allowed") } - //elastic-agent doesn't support disk queue yet + // elastic-agent doesn't support disk queue yet if bc.Management.Enabled() && outputPC.Queue.Config().Enabled() && outputPC.Queue.Name() == diskqueue.QueueType { return fmt.Errorf("disk queue is not supported when management is enabled") } } - //elastic-agent doesn't support disk queue yet + // elastic-agent doesn't support disk queue yet if bc.Management.Enabled() && bc.Pipeline.Queue.Config().Enabled() && bc.Pipeline.Queue.Name() == diskqueue.QueueType { return fmt.Errorf("disk queue is not supported when management is enabled") } diff --git a/libbeat/management/management.go b/libbeat/management/management.go index 88faa48f5408..177642b33988 100644 --- a/libbeat/management/management.go +++ b/libbeat/management/management.go @@ -82,9 +82,12 @@ type Manager interface { // // Calls to 'CheckRawConfig()' or 'SetPayload()' will be ignored after calling stop. // - // Note: Stop will not call 'UnregisterAction()' automaticallty. + // Note: Stop will not call 'UnregisterAction()' automatically. Stop() + // AgentInfo returns the information of the agent to which the manager is connected. + AgentInfo() client.AgentInfo + // SetStopCallback accepts a function that need to be called when the manager want to shutdown the // beats. This is needed when you want your beats to be gracefully shutdown remotely by the Elastic Agent // when a policy doesn't need to run this beat. @@ -190,6 +193,7 @@ func (n *fallbackManager) Stop() { // but that does not mean the Beat is being managed externally, // hence it will always return false. func (n *fallbackManager) Enabled() bool { return false } +func (n *fallbackManager) AgentInfo() client.AgentInfo { return client.AgentInfo{} } func (n *fallbackManager) Start() error { return nil } func (n *fallbackManager) CheckRawConfig(cfg *config.C) error { return nil } func (n *fallbackManager) RegisterAction(action client.Action) {} diff --git a/libbeat/tests/integration/cmd_keystore_test.go b/libbeat/tests/integration/cmd_keystore_test.go index eb4b697cafa2..efb9b91a1c92 100644 --- a/libbeat/tests/integration/cmd_keystore_test.go +++ b/libbeat/tests/integration/cmd_keystore_test.go @@ -100,19 +100,23 @@ func TestKeystoreRemoveMultipleExistingKeys(t *testing.T) { mockbeat.Stop() mockbeat.Start("keystore", "add", "key1", "--stdin") - fmt.Fprintf(os.Stdin, "pass1") + + fmt.Fprintf(mockbeat.stdin, "pass1") + require.NoError(t, mockbeat.stdin.Close(), "could not close mockbeat stdin") procState, err := mockbeat.Process.Wait() require.NoError(t, err) require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") mockbeat.Start("keystore", "add", "key2", "--stdin") - fmt.Fprintf(os.Stdin, "pass2") + fmt.Fprintf(mockbeat.stdin, "pass2") + require.NoError(t, mockbeat.stdin.Close(), "could not close mockbeat stdin") procState, err = mockbeat.Process.Wait() require.NoError(t, err) require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") mockbeat.Start("keystore", "add", "key3", "--stdin") - fmt.Fprintf(os.Stdin, "pass3") + fmt.Fprintf(mockbeat.stdin, "pass3") + require.NoError(t, mockbeat.stdin.Close(), "could not close mockbeat stdin") procState, err = mockbeat.Process.Wait() require.NoError(t, err) require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") @@ -138,19 +142,22 @@ func TestKeystoreList(t *testing.T) { mockbeat.Stop() mockbeat.Start("keystore", "add", "key1", "--stdin") - fmt.Fprintf(os.Stdin, "pass1") + fmt.Fprintf(mockbeat.stdin, "pass1") + require.NoError(t, mockbeat.stdin.Close(), "could not close mockbeat stdin") procState, err := mockbeat.Process.Wait() require.NoError(t, err) require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") mockbeat.Start("keystore", "add", "key2", "--stdin") - fmt.Fprintf(os.Stdin, "pass2") + fmt.Fprintf(mockbeat.stdin, "pass2") + require.NoError(t, mockbeat.stdin.Close(), "could not close mockbeat stdin") procState, err = mockbeat.Process.Wait() require.NoError(t, err) require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") mockbeat.Start("keystore", "add", "key3", "--stdin") - fmt.Fprintf(os.Stdin, "pass3") + fmt.Fprintf(mockbeat.stdin, "pass3") + require.NoError(t, mockbeat.stdin.Close(), "could not close mockbeat stdin") procState, err = mockbeat.Process.Wait() require.NoError(t, err) require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") @@ -186,7 +193,8 @@ func TestKeystoreAddSecretFromStdin(t *testing.T) { require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") mockbeat.Start("keystore", "add", "key1", "--stdin") - fmt.Fprintf(os.Stdin, "pass1") + fmt.Fprintf(mockbeat.stdin, "pass1") + require.NoError(t, mockbeat.stdin.Close(), "could not close mockbeat stdin") procState, err = mockbeat.Process.Wait() require.NoError(t, err) require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") @@ -202,13 +210,15 @@ func TestKeystoreUpdateForce(t *testing.T) { require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") mockbeat.Start("keystore", "add", "key1", "--stdin") - fmt.Fprintf(os.Stdin, "pass1") + fmt.Fprintf(mockbeat.stdin, "pass1") + require.NoError(t, mockbeat.stdin.Close(), "could not close mockbeat stdin") procState, err = mockbeat.Process.Wait() require.NoError(t, err) require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") mockbeat.Start("keystore", "add", "key1", "--force", "--stdin") - fmt.Fprintf(os.Stdin, "pass2") + fmt.Fprintf(mockbeat.stdin, "pass2") + require.NoError(t, mockbeat.stdin.Close(), "could not close mockbeat stdin") procState, err = mockbeat.Process.Wait() require.NoError(t, err) require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") diff --git a/libbeat/tests/integration/framework.go b/libbeat/tests/integration/framework.go index 046c578d7cd7..9657fbaeaff4 100644 --- a/libbeat/tests/integration/framework.go +++ b/libbeat/tests/integration/framework.go @@ -30,6 +30,7 @@ import ( "net/http" "net/url" "os" + "os/exec" "path/filepath" "regexp" "strings" @@ -55,6 +56,7 @@ type BeatProc struct { logFileOffset int64 t *testing.T tempDir string + stdin io.WriteCloser stdout *os.File stderr *os.File Process *os.Process @@ -90,7 +92,7 @@ type Total struct { Value int `json:"value"` } -// NewBeat createa a new Beat process from the system tests binary. +// NewBeat creates a new Beat process from the system tests binary. // It sets some required options like the home path, logging, etc. // `tempDir` will be used as home and logs directory for the Beat // `args` will be passed as CLI arguments to the Beat @@ -98,10 +100,12 @@ func NewBeat(t *testing.T, beatName, binary string, args ...string) *BeatProc { require.FileExistsf(t, binary, "beat binary must exists") tempDir := createTempDir(t) configFile := filepath.Join(tempDir, beatName+".yml") + stdoutFile, err := os.Create(filepath.Join(tempDir, "stdout")) require.NoError(t, err, "error creating stdout file") stderrFile, err := os.Create(filepath.Join(tempDir, "stderr")) require.NoError(t, err, "error creating stderr file") + p := BeatProc{ Binary: binary, baseArgs: append([]string{ @@ -213,15 +217,27 @@ func (b *BeatProc) Start(args ...string) { func (b *BeatProc) startBeat() { b.cmdMutex.Lock() defer b.cmdMutex.Unlock() + _, _ = b.stdout.Seek(0, 0) _ = b.stdout.Truncate(0) _, _ = b.stderr.Seek(0, 0) _ = b.stderr.Truncate(0) - var procAttr os.ProcAttr - procAttr.Files = []*os.File{os.Stdin, b.stdout, b.stderr} - process, err := os.StartProcess(b.fullPath, b.Args, &procAttr) + + cmd := exec.Cmd{ + Path: b.fullPath, + Args: b.Args, + Stdout: b.stdout, + Stderr: b.stderr, + } + + var err error + b.stdin, err = cmd.StdinPipe() + require.NoError(b.t, err, "could not get cmd StdinPipe") + + err = cmd.Start() require.NoError(b.t, err, "error starting beat process") - b.Process = process + + b.Process = cmd.Process } // waitBeatToExit blocks until the Beat exits, it returns @@ -515,6 +531,10 @@ func (b *BeatProc) LoadMeta() (Meta, error) { return m, nil } +func (b *BeatProc) Stdin() io.WriteCloser { + return b.stdin +} + func GetESURL(t *testing.T, scheme string) url.URL { t.Helper() diff --git a/libbeat/tests/integration/mockserver.go b/libbeat/tests/integration/mockserver.go index 0a396cb78399..763467819fa2 100644 --- a/libbeat/tests/integration/mockserver.go +++ b/libbeat/tests/integration/mockserver.go @@ -38,18 +38,18 @@ type unitKey struct { } // NewMockServer creates a GRPC server to mock the Elastic-Agent. -// On the first check in call it will send the first element of `unit` +// On the first check-in call it will send the first element of `unit` // as the expected unit, on successive calls, if the Beat has reached // that state, it will move on to sending the next state. // It will also validate the features. // // if `observedCallback` is not nil, it will be called on every -// check in receiving the `proto.CheckinObserved` sent by the +// check-in receiving the `proto.CheckinObserved` sent by the // Beat and index from `units` that was last sent to the Beat. // // If `delay` is not zero, when the Beat state matches the last // sent units, the server will wait for `delay` before sending the -// the next state. This will block the check in call from the Beat. +// next state. This will block the check-in call from the Beat. func NewMockServer( units [][]*proto.UnitExpected, featuresIdxs []uint64, @@ -58,7 +58,7 @@ func NewMockServer( delay time.Duration, ) *mock.StubServerV2 { i := 0 - agentInfo := &proto.CheckinAgentInfo{ + agentInfo := &proto.AgentInfo{ Id: "elastic-agent-id", Version: version.GetDefaultVersion(), Snapshot: true, diff --git a/libbeat/version/helper.go b/libbeat/version/helper.go index 5ed206d8a6c0..92b2ed2cb4cd 100644 --- a/libbeat/version/helper.go +++ b/libbeat/version/helper.go @@ -17,23 +17,36 @@ package version -import "time" +import ( + "sync/atomic" + "time" +) + +var ( + packageVersion atomic.Value + buildTime = "unknown" + commit = "unknown" + qualifier = "" +) -// GetDefaultVersion returns the current libbeat version. -// This method is in a separate file as the version.go file is auto generated +// GetDefaultVersion returns the current version. +// If running in stand-alone mode, it's the libbeat version. If running in +// managed mode, a.k.a under the agent, it's the package version set using +// SetPackageVersion. If SetPackageVersion haven't been called, it reports the +// libbeat version +// +// This method is in a separate file as the version.go file is auto-generated. func GetDefaultVersion() string { + if v, ok := packageVersion.Load().(string); ok && v != "" { + return v + } + if qualifier == "" { return defaultBeatVersion } return defaultBeatVersion + "-" + qualifier } -var ( - buildTime = "unknown" - commit = "unknown" - qualifier = "" -) - // BuildTime exposes the compile-time build time information. // It will represent the zero time instant if parsing fails. func BuildTime() time.Time { @@ -48,3 +61,10 @@ func BuildTime() time.Time { func Commit() string { return commit } + +// SetPackageVersion sets the package version, overriding the defaultBeatVersion. +func SetPackageVersion(version string) { + // Currently, the Elastic Agent does not perform any validation on the + // package version, therefore, no validation is done here either. + packageVersion.Store(version) +} diff --git a/testing/certutil/certutil.go b/testing/certutil/certutil.go new file mode 100644 index 000000000000..422bf4969d43 --- /dev/null +++ b/testing/certutil/certutil.go @@ -0,0 +1,186 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package certutil + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "time" +) + +// TODO: move it to a more generic place. Probably elastic-agent-client. +// Moving it to the agent-client would allow to have a mock.StubServerV2 with +// TLS out of the box. With that, we could also remove the +// `management.insecure_grpc_url_for_testing` flag from the beats. +// This can also be expanded to save the certificates and keys to disk, making +// an tool for us to generate certificates whenever we need. + +// NewRootCA generates a new x509 Certificate and returns: +// - the private key +// - the certificate +// - the certificate in PEM format as a byte slice. +// +// If any error occurs during the generation process, a non-nil error is returned. +func NewRootCA() (*ecdsa.PrivateKey, *x509.Certificate, []byte, error) { + rootKey, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not create private key: %w", err) + } + + notBefore := time.Now() + notAfter := notBefore.Add(3 * time.Hour) + + rootTemplate := x509.Certificate{ + DNSNames: []string{"localhost"}, + SerialNumber: big.NewInt(1653), + Subject: pkix.Name{ + Organization: []string{"Gallifrey"}, + CommonName: "localhost", + }, + NotBefore: notBefore, + NotAfter: notAfter, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + IsCA: true, + } + + rootCertRawBytes, err := x509.CreateCertificate( + rand.Reader, &rootTemplate, &rootTemplate, &rootKey.PublicKey, rootKey) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not create CA: %w", err) + } + + rootPrivKeyDER, err := x509.MarshalECPrivateKey(rootKey) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not marshal private key: %w", err) + } + + // PEM private key + var rootPrivBytesOut []byte + rootPrivateKeyBuff := bytes.NewBuffer(rootPrivBytesOut) + err = pem.Encode(rootPrivateKeyBuff, &pem.Block{ + Type: "EC PRIVATE KEY", Bytes: rootPrivKeyDER}) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not pem.Encode private key: %w", err) + } + + // PEM certificate + var rootCertBytesOut []byte + rootCertPemBuff := bytes.NewBuffer(rootCertBytesOut) + err = pem.Encode(rootCertPemBuff, &pem.Block{ + Type: "CERTIFICATE", Bytes: rootCertRawBytes}) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not pem.Encode certificate: %w", err) + } + + // tls.Certificate + rootTLSCert, err := tls.X509KeyPair( + rootCertPemBuff.Bytes(), rootPrivateKeyBuff.Bytes()) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not create key pair: %w", err) + } + + rootCACert, err := x509.ParseCertificate(rootTLSCert.Certificate[0]) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not parse certificate: %w", err) + } + + return rootKey, rootCACert, rootCertPemBuff.Bytes(), nil +} + +// GenerateChildCert generates a x509 Certificate as a child of caCert and +// returns the following: +// - the certificate in PEM format as a byte slice +// - the private key in PEM format as a byte slice +// - the certificate and private key as a tls.Certificate +// +// If any error occurs during the generation process, a non-nil error is returned. +func GenerateChildCert(name string, caPrivKey *ecdsa.PrivateKey, caCert *x509.Certificate) ( + []byte, []byte, *tls.Certificate, error) { + + notBefore := time.Now() + notAfter := notBefore.Add(3 * time.Hour) + + certTemplate := &x509.Certificate{ + DNSNames: []string{name}, + SerialNumber: big.NewInt(1658), + Subject: pkix.Name{ + Organization: []string{"Gallifrey"}, + CommonName: name, + }, + NotBefore: notBefore, + NotAfter: notAfter, + KeyUsage: x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + } + + privateKey, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not create private key: %w", err) + } + + certRawBytes, err := x509.CreateCertificate( + rand.Reader, certTemplate, caCert, &privateKey.PublicKey, caPrivKey) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not create CA: %w", err) + } + + privateKeyDER, err := x509.MarshalECPrivateKey(privateKey) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not marshal private key: %w", err) + } + + // PEM private key + var privBytesOut []byte + privateKeyBuff := bytes.NewBuffer(privBytesOut) + err = pem.Encode(privateKeyBuff, &pem.Block{ + Type: "EC PRIVATE KEY", Bytes: privateKeyDER}) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not pem.Encode private key: %w", err) + } + privateKeyPemBytes := privateKeyBuff.Bytes() + + // PEM certificate + var certBytesOut []byte + certBuff := bytes.NewBuffer(certBytesOut) + err = pem.Encode(certBuff, &pem.Block{ + Type: "CERTIFICATE", Bytes: certRawBytes}) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not pem.Encode certificate: %w", err) + } + certPemBytes := certBuff.Bytes() + + // TLS Certificate + tlsCert, err := tls.X509KeyPair(certPemBytes, privateKeyPemBytes) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not create key pair: %w", err) + } + + return privateKeyPemBytes, certPemBytes, &tlsCert, nil +} diff --git a/x-pack/filebeat/tests/integration/managerV2_test.go b/x-pack/filebeat/tests/integration/managerV2_test.go index 3332d549fa20..b541b8d54093 100644 --- a/x-pack/filebeat/tests/integration/managerV2_test.go +++ b/x-pack/filebeat/tests/integration/managerV2_test.go @@ -7,21 +7,51 @@ package integration import ( + "bufio" + "crypto/tls" + "crypto/x509" + "encoding/json" "fmt" + "io" + "math" "os" "path/filepath" + "strings" "sync/atomic" "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + protobuf "google.golang.org/protobuf/proto" "github.com/elastic/beats/v7/libbeat/tests/integration" + "github.com/elastic/beats/v7/libbeat/version" + "github.com/elastic/beats/v7/testing/certutil" "github.com/elastic/beats/v7/x-pack/libbeat/management" "github.com/elastic/elastic-agent-client/v7/pkg/client/mock" "github.com/elastic/elastic-agent-client/v7/pkg/proto" ) +// Event is the common part of a beats event, the beats and Elastic Agent +// metadata. +type Event struct { + Metadata struct { + Version string `json:"version"` + } `json:"@metadata"` + ElasticAgent struct { + Snapshot bool `json:"snapshot"` + Version string `json:"version"` + Id string `json:"id"` + } `json:"elastic_agent"` + Agent struct { + Version string `json:"version"` + Id string `json:"id"` + } `json:"agent"` +} + // TestInputReloadUnderElasticAgent will start a Filebeat and cause the input // reload issue described on https://github.com/elastic/beats/issues/33653. // In short, a new input for a file needs to be started while there are still @@ -500,6 +530,208 @@ func TestRecoverFromInvalidOutputConfiguration(t *testing.T) { } } +func TestAgentPackageVersionOnStartUpInfo(t *testing.T) { + wantVersion := "8.13.0+build20131123" + + filebeat := integration.NewBeat( + t, + "filebeat", + "../../filebeat.test", + ) + + logFilePath := filepath.Join(filebeat.TempDir(), "logs-to-ingest.log") + generateLogFile(t, logFilePath) + + eventsDir := filepath.Join(filebeat.TempDir(), "ingested-events") + logLevel := proto.UnitLogLevel_INFO + units := []*proto.UnitExpected{ + { + Id: "output-file-unit", + Type: proto.UnitType_OUTPUT, + ConfigStateIdx: 1, + State: proto.State_HEALTHY, + LogLevel: logLevel, + Config: &proto.UnitExpectedConfig{ + Id: "default", + Type: "file", + Name: "events-to-file", + Source: integration.RequireNewStruct(t, + map[string]interface{}{ + "name": "events-to-file", + "type": "file", + "path": eventsDir, + }), + }, + }, + { + Id: "input-unit-1", + Type: proto.UnitType_INPUT, + ConfigStateIdx: 1, + State: proto.State_HEALTHY, + LogLevel: logLevel, + Config: &proto.UnitExpectedConfig{ + Id: "filestream-monitoring-agent", + Type: "filestream", + Name: "filestream-monitoring-agent", + Streams: []*proto.Stream{ + { + Id: "log-input-1", + Source: integration.RequireNewStruct(t, map[string]interface{}{ + "enabled": true, + "type": "log", + "paths": []interface{}{logFilePath}, + }), + }, + }, + }, + }, + } + wantAgentInfo := proto.AgentInfo{ + Id: "agent-id", + Version: wantVersion, + Snapshot: true, + } + + observedCh := make(chan *proto.CheckinObserved, 5) + server := &mock.StubServerV2{ + CheckinV2Impl: func(observed *proto.CheckinObserved) *proto.CheckinExpected { + observedCh <- observed + return &proto.CheckinExpected{ + AgentInfo: &wantAgentInfo, + Units: units, + } + }, + ActionImpl: func(response *proto.ActionResponse) error { return nil }, + } + + rootKey, rootCACert, rootCertPem, err := certutil.NewRootCA() + require.NoError(t, err, "could not generate root CA") + + rootCertPool := x509.NewCertPool() + ok := rootCertPool.AppendCertsFromPEM(rootCertPem) + require.Truef(t, ok, "could not append certs from PEM to cert pool") + + beatPrivKeyPem, beatCertPem, beatTLSCert, err := + certutil.GenerateChildCert("localhost", rootKey, rootCACert) + require.NoError(t, err, "could not generate child TLS certificate") + + getCert := func(info *tls.ClientHelloInfo) (*tls.Certificate, error) { + // it's one of the child certificates. As there is only one, return it + return beatTLSCert, nil + } + + creds := credentials.NewTLS(&tls.Config{ + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: rootCertPool, + GetCertificate: getCert, + MinVersion: tls.VersionTLS12, + }) + err = server.Start(grpc.Creds(creds)) + require.NoError(t, err, "failed starting GRPC server") + t.Cleanup(server.Stop) + + filebeat.Start("-E", "management.enabled=true") + + startUpInfo := &proto.StartUpInfo{ + Addr: fmt.Sprintf("localhost:%d", server.Port), + ServerName: "localhost", + Token: "token", + CaCert: rootCertPem, + PeerCert: beatCertPem, + PeerKey: beatPrivKeyPem, + Services: []proto.ConnInfoServices{proto.ConnInfoServices_CheckinV2}, + AgentInfo: &wantAgentInfo, + } + writeStartUpInfo(t, filebeat.Stdin(), startUpInfo) + // for some reason the pipe needs to be closed for filebeat to read it. + require.NoError(t, filebeat.Stdin().Close(), "failed closing stdin pipe") + + // get 1st observed + observed := <-observedCh + // drain observedCh so server won't block + go func() { + for { + <-observedCh + } + }() + + msg := strings.Builder{} + require.Eventuallyf(t, func() bool { + msg.Reset() + + _, err = os.Stat(eventsDir) + if err != nil { + fmt.Fprintf(&msg, "could not verify output directory exists: %v", + err) + return false + } + + entries, err := os.ReadDir(eventsDir) + if err != nil { + fmt.Fprintf(&msg, "failed checking output directory for files: %v", + err) + return false + } + + if len(entries) == 0 { + fmt.Fprintf(&msg, "no file found on %s", eventsDir) + return false + } + + for _, e := range entries { + if e.IsDir() { + continue + } + + i, err := e.Info() + if err != nil { + fmt.Fprintf(&msg, "could not read info of %q", e.Name()) + return false + } + if i.Size() == 0 { + fmt.Fprintf(&msg, "file %q was created, but it's still empty", + e.Name()) + return false + } + + // read one line to make sure it isn't a 1/2 written JSON + eventsFile := filepath.Join(eventsDir, e.Name()) + f, err := os.Open(eventsFile) + if err != nil { + fmt.Fprintf(&msg, "could not open file %q", eventsFile) + return false + } + + scanner := bufio.NewScanner(f) + if scanner.Scan() { + var ev Event + err := json.Unmarshal(scanner.Bytes(), &ev) + if err != nil { + fmt.Fprintf(&msg, "failed to read event from file: %v", err) + return false + } + return true + } + } + + return true + }, 30*time.Second, time.Second, "no event was produced: %s", &msg) + + assert.Equal(t, version.Commit(), observed.VersionInfo.BuildHash) + + evs := getEventsFromFileOutput[Event](t, eventsDir, 100) + for _, got := range evs { + assert.Equal(t, wantVersion, got.Metadata.Version) + + assert.Equal(t, wantAgentInfo.Id, got.ElasticAgent.Id) + assert.Equal(t, wantAgentInfo.Version, got.ElasticAgent.Version) + assert.Equal(t, wantAgentInfo.Snapshot, got.ElasticAgent.Snapshot) + + assert.Equal(t, wantAgentInfo.Id, got.Agent.Id) + assert.Equal(t, wantVersion, got.Agent.Version) + } +} + // generateLogFile generates a log file by appending the current // time to it every second. func generateLogFile(t *testing.T, fullPath string) { @@ -543,3 +775,52 @@ func generateLogFile(t *testing.T, fullPath string) { } }() } + +// getEventsFromFileOutput reads all events from all the files on dir. If n > 0, +// then it reads up to n events. It considers all files are ndjson, and it skips +// any directory within dir. +func getEventsFromFileOutput[E any](t *testing.T, dir string, n int) []E { + t.Helper() + + if n < 1 { + n = math.MaxInt + } + + var events []E + entries, err := os.ReadDir(dir) + require.NoError(t, err, "could not read events directory") + for _, e := range entries { + if e.IsDir() { + continue + } + f, err := os.Open(filepath.Join(dir, e.Name())) + require.NoErrorf(t, err, "could not open file %q", e.Name()) + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + var ev E + err := json.Unmarshal(scanner.Bytes(), &ev) + require.NoError(t, err, "failed to read event") + events = append(events, ev) + + if len(events) >= n { + return events + } + } + } + + return events +} + +func writeStartUpInfo(t *testing.T, w io.Writer, info *proto.StartUpInfo) { + t.Helper() + if len(info.Services) == 0 { + info.Services = []proto.ConnInfoServices{proto.ConnInfoServices_CheckinV2} + } + + infoBytes, err := protobuf.Marshal(info) + require.NoError(t, err, "failed to marshal connection information") + + _, err = w.Write(infoBytes) + require.NoError(t, err, "failed to write connection information") +} diff --git a/x-pack/libbeat/management/managerV2.go b/x-pack/libbeat/management/managerV2.go index 235325c0cbfc..71b14152c654 100644 --- a/x-pack/libbeat/management/managerV2.go +++ b/x-pack/libbeat/management/managerV2.go @@ -23,16 +23,15 @@ import ( "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/common/reload" "github.com/elastic/beats/v7/libbeat/features" + lbmanagement "github.com/elastic/beats/v7/libbeat/management" + "github.com/elastic/beats/v7/libbeat/publisher" + "github.com/elastic/beats/v7/libbeat/version" "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent-client/v7/pkg/proto" conf "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/logp" - - "github.com/elastic/beats/v7/libbeat/common/reload" - lbmanagement "github.com/elastic/beats/v7/libbeat/management" - "github.com/elastic/beats/v7/libbeat/publisher" - "github.com/elastic/beats/v7/libbeat/version" ) // diagnosticHandler is a wrapper type that's a bit of a hack, the compiler won't let us send the raw unit struct, @@ -161,6 +160,13 @@ func NewV2AgentManager(config *conf.C, registry *reload.Registry) (lbmanagement. } } + versionInfo := client.VersionInfo{ + Name: "beat-v2-client", + BuildHash: version.Commit(), + Meta: map[string]string{ + "commit": version.Commit(), + "build_time": version.BuildTime().String(), + }} var agentClient client.V2 var err error if c.InsecureGRPCURLForTesting != "" && c.Enabled { @@ -168,20 +174,11 @@ func NewV2AgentManager(config *conf.C, registry *reload.Registry) (lbmanagement. logger.Info("Using INSECURE GRPC connection, this should be only used for testing!") agentClient = client.NewV2(c.InsecureGRPCURLForTesting, "", // Insecure connection for test, no token needed - client.VersionInfo{ - Name: "beat-v2-client-for-testing", - Version: version.GetDefaultVersion(), - }, client.WithGRPCDialOptions(grpc.WithTransportCredentials(insecure.NewCredentials()))) + versionInfo, + client.WithGRPCDialOptions(grpc.WithTransportCredentials(insecure.NewCredentials()))) } else { // Normal Elastic-Agent-Client initialisation - agentClient, _, err = client.NewV2FromReader(os.Stdin, client.VersionInfo{ - Name: "beat-v2-client", - Version: version.GetDefaultVersion(), - Meta: map[string]string{ - "commit": version.Commit(), - "build_time": version.BuildTime().String(), - }, - }) + agentClient, _, err = client.NewV2FromReader(os.Stdin, versionInfo) if err != nil { return nil, fmt.Errorf("error reading control config from agent: %w", err) } @@ -231,6 +228,14 @@ func NewV2AgentManagerWithClient(config *Config, registry *reload.Registry, agen // Beats central management interface implementation // ================================ +func (cm *BeatV2Manager) AgentInfo() client.AgentInfo { + if cm.client.AgentInfo() == nil { + return client.AgentInfo{} + } + + return *cm.client.AgentInfo() +} + // RegisterDiagnosticHook will register a diagnostic callback function when elastic-agent asks for a diagnostics dump func (cm *BeatV2Manager) RegisterDiagnosticHook(name string, description string, filename string, contentType string, hook client.DiagnosticHook) { cm.client.RegisterDiagnosticHook(name, description, filename, contentType, hook) diff --git a/x-pack/libbeat/management/managerV2_test.go b/x-pack/libbeat/management/managerV2_test.go index ea67fdd89f40..66ca7f17966c 100644 --- a/x-pack/libbeat/management/managerV2_test.go +++ b/x-pack/libbeat/management/managerV2_test.go @@ -204,8 +204,7 @@ func TestManagerV2(t *testing.T) { defer srv.Stop() client := client.NewV2(fmt.Sprintf(":%d", srv.Port), "", client.VersionInfo{ - Name: "program", - Version: "v1.0.0", + Name: "program", Meta: map[string]string{ "key": "value", }, diff --git a/x-pack/libbeat/management/tests/mock_server.go b/x-pack/libbeat/management/tests/mock_server.go index 8671b1242339..a90ae633885d 100644 --- a/x-pack/libbeat/management/tests/mock_server.go +++ b/x-pack/libbeat/management/tests/mock_server.go @@ -31,7 +31,7 @@ func NewMockServer(t *testing.T, canStop func(string) bool, inputConfig *proto.U unitOutID := mock.NewID() token := mock.NewID() - //var gotConfig bool + // var gotConfig bool var mut sync.Mutex @@ -98,8 +98,7 @@ func NewMockServer(t *testing.T, canStop func(string) bool, inputConfig *proto.U require.NoError(t, err) client := client.NewV2(fmt.Sprintf(":%d", srv.Port), token, client.VersionInfo{ - Name: "program", - Version: "v1.0.0", + Name: "program", Meta: map[string]string{ "key": "value", }, @@ -111,7 +110,7 @@ func NewMockServer(t *testing.T, canStop func(string) bool, inputConfig *proto.U // helper to wrap the CheckinExpected config we need with every refresh of the mock server func sendUnitsWithState(state proto.State, input, output *proto.UnitExpectedConfig, inId, outId string, stateIndex uint64) *proto.CheckinExpected { return &proto.CheckinExpected{ - AgentInfo: &proto.CheckinAgentInfo{ + AgentInfo: &proto.AgentInfo{ Id: "test-agent", Version: "8.4.0", Snapshot: true, From 12b73b7543fe0bd805be25b08a3b2dec3cb47ae6 Mon Sep 17 00:00:00 2001 From: Florian Zipperle Date: Mon, 5 Feb 2024 16:46:38 +0100 Subject: [PATCH 114/129] Fix Stringer implementation of fingerprint processor (#36468) * Fix Stringer implementation of fingerprint processor * Fix the linter issues * Tests: use require instead of assert. --------- Co-authored-by: Pierre HILBERT --- CHANGELOG-developer.next.asciidoc | 1 + libbeat/processors/fingerprint/config.go | 25 ++++++++++++++---- libbeat/processors/fingerprint/encode.go | 20 ++++++++++---- libbeat/processors/fingerprint/fingerprint.go | 7 +++-- .../fingerprint/fingerprint_test.go | 20 +++++++++++--- libbeat/processors/fingerprint/hash.go | 26 +++++++++++++------ 6 files changed, 74 insertions(+), 25 deletions(-) diff --git a/CHANGELOG-developer.next.asciidoc b/CHANGELOG-developer.next.asciidoc index 4e650a193d16..d27a957b0f3e 100644 --- a/CHANGELOG-developer.next.asciidoc +++ b/CHANGELOG-developer.next.asciidoc @@ -87,6 +87,7 @@ The list below covers the major changes between 7.0.0-rc2 and main only. - Fix ingest pipeline for panw module to parse url scheme correctly {pull}35757[35757] - Renamed an httpjson input metric to follow naming conventions. `httpjson_interval_pages_total` was renamed to `httpjson_interval_pages` because the `_total` suffix is reserved for counters. {issue}35933[35933] {pull}36169[36169] - Fixed some race conditions in tests {pull}36185[36185] +- Fix Stringer implementation of fingerprint processor {issue}35174[35174] - Re-enable HTTPJSON fixed flakey test. {issue}34929[34929] {pull}36525[36525] - Make winlogbeat/sys/wineventlog follow the unsafe.Pointer rules. {pull}36650[36650] - Cleaned up documentation errors & fixed a minor bug in Filebeat Azure blob storage input. {pull}36714[36714] diff --git a/libbeat/processors/fingerprint/config.go b/libbeat/processors/fingerprint/config.go index dc36b6bceffb..2f31691e7414 100644 --- a/libbeat/processors/fingerprint/config.go +++ b/libbeat/processors/fingerprint/config.go @@ -17,13 +17,15 @@ package fingerprint +import "encoding/json" + // Config for fingerprint processor. type Config struct { - Method hashMethod `config:"method"` // Hash function to use for fingerprinting - Fields []string `config:"fields" validate:"required"` // Source fields to compute fingerprint from - TargetField string `config:"target_field"` // Target field for the fingerprint - Encoding encodingMethod `config:"encoding"` // Encoding to use for target field value - IgnoreMissing bool `config:"ignore_missing"` // Ignore missing fields? + Method namedHashMethod `config:"method"` // Hash function to use for fingerprinting + Fields []string `config:"fields" validate:"required"` // Source fields to compute fingerprint from + TargetField string `config:"target_field"` // Target field for the fingerprint + Encoding namedEncodingMethod `config:"encoding"` // Encoding to use for target field value + IgnoreMissing bool `config:"ignore_missing"` // Ignore missing fields? } func defaultConfig() Config { @@ -34,3 +36,16 @@ func defaultConfig() Config { IgnoreMissing: false, } } + +func (c *Config) MarshalJSON() ([]byte, error) { + type Alias Config + return json.Marshal(&struct { + Method string + Encoding string + *Alias + }{ + Method: c.Method.Name, + Encoding: c.Encoding.Name, + Alias: (*Alias)(c), + }) +} diff --git a/libbeat/processors/fingerprint/encode.go b/libbeat/processors/fingerprint/encode.go index 843c7bd5d293..dd04068df732 100644 --- a/libbeat/processors/fingerprint/encode.go +++ b/libbeat/processors/fingerprint/encode.go @@ -24,16 +24,26 @@ import ( "strings" ) +type namedEncodingMethod struct { + Name string + Encode encodingMethod +} type encodingMethod func([]byte) string -var encodings = map[string]encodingMethod{ - "hex": hex.EncodeToString, - "base32": base32.StdEncoding.EncodeToString, - "base64": base64.StdEncoding.EncodeToString, +var encodings = map[string]namedEncodingMethod{} + +func init() { + for _, e := range []namedEncodingMethod{ + {Name: "hex", Encode: hex.EncodeToString}, + {Name: "base32", Encode: base32.StdEncoding.EncodeToString}, + {Name: "base64", Encode: base64.StdEncoding.EncodeToString}, + } { + encodings[e.Name] = e + } } // Unpack creates the encodingMethod from the given string -func (e *encodingMethod) Unpack(str string) error { +func (e *namedEncodingMethod) Unpack(str string) error { str = strings.ToLower(str) m, found := encodings[str] diff --git a/libbeat/processors/fingerprint/fingerprint.go b/libbeat/processors/fingerprint/fingerprint.go index 3f22082bad42..fdbcf158b27c 100644 --- a/libbeat/processors/fingerprint/fingerprint.go +++ b/libbeat/processors/fingerprint/fingerprint.go @@ -60,7 +60,7 @@ func New(cfg *config.C) (beat.Processor, error) { p := &fingerprint{ config: config, - hash: config.Method, + hash: config.Method.Hash, fields: fields, } @@ -75,7 +75,7 @@ func (p *fingerprint) Run(event *beat.Event) (*beat.Event, error) { return nil, makeErrComputeFingerprint(err) } - encodedHash := p.config.Encoding(hashFn.Sum(nil)) + encodedHash := p.config.Encoding.Encode(hashFn.Sum(nil)) if _, err := event.PutValue(p.config.TargetField, encodedHash); err != nil { return nil, makeErrComputeFingerprint(err) @@ -85,8 +85,7 @@ func (p *fingerprint) Run(event *beat.Event) (*beat.Event, error) { } func (p *fingerprint) String() string { - //nolint:staticcheck // https://github.com/elastic/beats/issues/35174 - json, _ := json.Marshal(p.config) + json, _ := json.Marshal(&p.config) return procName + "=" + string(json) } diff --git a/libbeat/processors/fingerprint/fingerprint_test.go b/libbeat/processors/fingerprint/fingerprint_test.go index ead0bc2c0055..5f6bdb70b5ed 100644 --- a/libbeat/processors/fingerprint/fingerprint_test.go +++ b/libbeat/processors/fingerprint/fingerprint_test.go @@ -18,6 +18,7 @@ package fingerprint import ( + "fmt" "math/rand" "strconv" "testing" @@ -77,6 +78,7 @@ func TestWithConfig(t *testing.T) { Fields: test.input.Clone(), } newEvent, err := p.Run(testEvent) + assert.NoError(t, err) v, err := newEvent.GetValue("fingerprint") assert.NoError(t, err) assert.Equal(t, test.want, v) @@ -459,6 +461,18 @@ func TestIgnoreMissing(t *testing.T) { } } +func TestProcessorStringer(t *testing.T) { + testConfig, err := config.NewConfigFrom(mapstr.M{ + "fields": []string{"field1"}, + "encoding": "hex", + "method": "md5", + }) + require.NoError(t, err) + p, err := New(testConfig) + require.NoError(t, err) + require.Equal(t, `fingerprint={"Method":"md5","Encoding":"hex","Fields":["field1"],"TargetField":"fingerprint","IgnoreMissing":false}`, fmt.Sprint(p)) +} + func BenchmarkHashMethods(b *testing.B) { events := nRandomEvents(100000) @@ -472,8 +486,8 @@ func BenchmarkHashMethods(b *testing.B) { b.Run(method, func(b *testing.B) { b.ResetTimer() - for _, e := range events { - _, err := p.Run(&e) + for i := range events { + _, err := p.Run(&events[i]) if err != nil { b.Fatal(err) } @@ -491,7 +505,7 @@ func nRandomEvents(num int) []beat.Event { charsetLen := len(charset) b := make([]byte, 200) - var events []beat.Event + events := make([]beat.Event, num) for i := 0; i < num; i++ { for j := range b { b[j] = charset[prng.Intn(charsetLen)] diff --git a/libbeat/processors/fingerprint/hash.go b/libbeat/processors/fingerprint/hash.go index 1c4af0d0161a..1c8cf146a147 100644 --- a/libbeat/processors/fingerprint/hash.go +++ b/libbeat/processors/fingerprint/hash.go @@ -28,19 +28,29 @@ import ( "github.com/cespare/xxhash/v2" ) +type namedHashMethod struct { + Name string + Hash hashMethod +} type hashMethod func() hash.Hash -var hashes = map[string]hashMethod{ - "md5": md5.New, - "sha1": sha1.New, - "sha256": sha256.New, - "sha384": sha512.New384, - "sha512": sha512.New, - "xxhash": newXxHash, +var hashes = map[string]namedHashMethod{} + +func init() { + for _, h := range []namedHashMethod{ + {Name: "md5", Hash: md5.New}, + {Name: "sha1", Hash: sha1.New}, + {Name: "sha256", Hash: sha256.New}, + {Name: "sha384", Hash: sha512.New384}, + {Name: "sha512", Hash: sha512.New}, + {Name: "xxhash", Hash: newXxHash}, + } { + hashes[h.Name] = h + } } // Unpack creates the hashMethod from the given string -func (f *hashMethod) Unpack(str string) error { +func (f *namedHashMethod) Unpack(str string) error { str = strings.ToLower(str) m, found := hashes[str] From 64460ba8aca9757e3cca3e797d2b26e993deab27 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 5 Feb 2024 14:45:57 -0500 Subject: [PATCH 115/129] chore: Update snapshot.yml (#37860) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Made with ❤️️ by updatecli Co-authored-by: apmmachine Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- testing/environments/snapshot.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index ee1b8a9c56c1..c9e2ce4a83e9 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-c876301f-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-2eea2ca0-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.13.0-c876301f-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.13.0-2eea2ca0-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.13.0-c876301f-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.13.0-2eea2ca0-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 1c9560bd8696816b54d9a528b000ac333c779cd1 Mon Sep 17 00:00:00 2001 From: Dan Kortschak <90160302+efd6@users.noreply.github.com> Date: Tue, 6 Feb 2024 16:20:18 +1030 Subject: [PATCH 116/129] x-pack/filebeat/input/cel: add support for pem encoded keys (#37813) This adds a new Okta auth field, jwk_pem, that allows users to specify a PEM-encoded private key for authentication. Also refactor the JSON-based code to simplify and add minimal testing. --- CHANGELOG.next.asciidoc | 1 + .../filebeat/docs/inputs/input-cel.asciidoc | 7 + x-pack/filebeat/input/cel/config_auth.go | 24 +++- x-pack/filebeat/input/cel/config_okta_auth.go | 136 ++++++++++-------- .../input/cel/config_okta_auth_test.go | 88 ++++++++++++ x-pack/filebeat/input/cel/config_test.go | 8 +- 6 files changed, 202 insertions(+), 62 deletions(-) create mode 100644 x-pack/filebeat/input/cel/config_okta_auth_test.go diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 5389ca6551b4..120b5d14bb1c 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -188,6 +188,7 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d - Relax TCP/UDP metric polling expectations to improve metric collection. {pull}37714[37714] - Add support for PEM-based Okta auth in HTTPJSON. {pull}37772[37772] - Prevent complete loss of long request trace data. {issue}37826[37826] {pull}37836[37836] +- Add support for PEM-based Okta auth in CEL. {pull}37813[37813] *Auditbeat* diff --git a/x-pack/filebeat/docs/inputs/input-cel.asciidoc b/x-pack/filebeat/docs/inputs/input-cel.asciidoc index 837ea80ea1ee..b6eaa9ad744e 100644 --- a/x-pack/filebeat/docs/inputs/input-cel.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-cel.asciidoc @@ -580,6 +580,13 @@ The RSA JWK Private Key JSON for your Okta Service App which is used for interac NOTE: Only one of the credentials settings can be set at once. For more information please refer to https://developer.okta.com/docs/guides/implement-oauth-for-okta-serviceapp/main/ +[float] +==== `auth.oauth2.okta.jwk_pem` + +The RSA JWK private key PEM block for your Okta Service App which is used for interacting with Okta Org Auth Server to mint tokens with okta.* scopes. + +NOTE: Only one of the credentials settings can be set at once. For more information please refer to https://developer.okta.com/docs/guides/implement-oauth-for-okta-serviceapp/main/ + [[resource-parameters]] [float] ==== `resource.url` diff --git a/x-pack/filebeat/input/cel/config_auth.go b/x-pack/filebeat/input/cel/config_auth.go index e550a9635d51..d6b35d633e69 100644 --- a/x-pack/filebeat/input/cel/config_auth.go +++ b/x-pack/filebeat/input/cel/config_auth.go @@ -6,6 +6,7 @@ package cel import ( "context" + "crypto/x509" "encoding/json" "errors" "fmt" @@ -141,6 +142,7 @@ type oAuth2Config struct { // okta specific RSA JWK private key OktaJWKFile string `config:"okta.jwk_file"` OktaJWKJSON common.JSONBlob `config:"okta.jwk_json"` + OktaJWKPEM string `config:"okta.jwk_pem"` } // isEnabled returns true if the `enable` field is set to true in the yaml. @@ -321,8 +323,26 @@ func (o *oAuth2Config) validateGoogleProvider() error { } func (o *oAuth2Config) validateOktaProvider() error { - if o.TokenURL == "" || o.ClientID == "" || len(o.Scopes) == 0 || (o.OktaJWKJSON == nil && o.OktaJWKFile == "") { - return errors.New("okta validation error: token_url, client_id, scopes and at least one of okta.jwk_json or okta.jwk_file must be provided") + if o.TokenURL == "" || o.ClientID == "" || len(o.Scopes) == 0 { + return errors.New("okta validation error: token_url, client_id, scopes must be provided") + } + var n int + if o.OktaJWKJSON != nil { + n++ + } + if o.OktaJWKFile != "" { + n++ + } + if o.OktaJWKPEM != "" { + n++ + } + if n != 1 { + return errors.New("okta validation error: one of okta.jwk_json, okta.jwk_file or okta.jwk_pem must be provided") + } + // jwk_pem + if o.OktaJWKPEM != "" { + _, err := x509.ParsePKCS1PrivateKey([]byte(o.OktaJWKPEM)) + return err } // jwk_file if o.OktaJWKFile != "" { diff --git a/x-pack/filebeat/input/cel/config_okta_auth.go b/x-pack/filebeat/input/cel/config_okta_auth.go index cf9003dee8a1..74366afd3d5f 100644 --- a/x-pack/filebeat/input/cel/config_okta_auth.go +++ b/x-pack/filebeat/input/cel/config_okta_auth.go @@ -5,10 +5,13 @@ package cel import ( + "bytes" "context" "crypto/rsa" + "crypto/x509" "encoding/base64" "encoding/json" + "encoding/pem" "fmt" "math/big" "net/http" @@ -43,9 +46,20 @@ func (o *oAuth2Config) fetchOktaOauthClient(ctx context.Context, _ *http.Client) }, } - oktaJWT, err := generateOktaJWT(o.OktaJWKJSON, conf) - if err != nil { - return nil, fmt.Errorf("oauth2 client: error generating Okta JWT: %w", err) + var ( + oktaJWT string + err error + ) + if len(o.OktaJWKPEM) != 0 { + oktaJWT, err = generateOktaJWTPEM(o.OktaJWKPEM, conf) + if err != nil { + return nil, fmt.Errorf("oauth2 client: error generating Okta JWT PEM: %w", err) + } + } else { + oktaJWT, err = generateOktaJWT(o.OktaJWKJSON, conf) + if err != nil { + return nil, fmt.Errorf("oauth2 client: error generating Okta JWT: %w", err) + } } token, err := exchangeForBearerToken(ctx, oktaJWT, conf) @@ -59,14 +73,16 @@ func (o *oAuth2Config) fetchOktaOauthClient(ctx context.Context, _ *http.Client) oktaJWK: o.OktaJWKJSON, token: token, } - // reuse the tokenSource to refresh the token (automatically calls the custom Token() method when token is no longer valid). + // reuse the tokenSource to refresh the token (automatically calls + // the custom Token() method when token is no longer valid). client := oauth2.NewClient(ctx, oauth2.ReuseTokenSource(token, tokenSource)) return client, nil } -// Token implements the oauth2.TokenSource interface and helps to implement custom token refresh logic. -// Parent context is passed via the customTokenSource struct since we cannot modify the function signature here. +// Token implements the oauth2.TokenSource interface and helps to implement +// custom token refresh logic. The parent context is passed via the +// customTokenSource struct since we cannot modify the function signature here. func (ts *oktaTokenSource) Token() (*oauth2.Token, error) { ts.mu.Lock() defer ts.mu.Unlock() @@ -85,70 +101,79 @@ func (ts *oktaTokenSource) Token() (*oauth2.Token, error) { } func generateOktaJWT(oktaJWK []byte, cnf *oauth2.Config) (string, error) { - // unmarshal the JWK into a map - var jwkData map[string]string + // Unmarshal the JWK into big ints. + var jwkData struct { + N base64int `json:"n"` + E base64int `json:"e"` + D base64int `json:"d"` + P base64int `json:"p"` + Q base64int `json:"q"` + Dp base64int `json:"dp"` + Dq base64int `json:"dq"` + Qinv base64int `json:"qi"` + } err := json.Unmarshal(oktaJWK, &jwkData) if err != nil { return "", fmt.Errorf("error decoding JWK: %w", err) } - // create an RSA private key from JWK components - decodeBase64 := func(key string) (*big.Int, error) { - data, err := base64.RawURLEncoding.DecodeString(jwkData[key]) - if err != nil { - return nil, fmt.Errorf("error decoding RSA JWK component %s: %w", key, err) - } - return new(big.Int).SetBytes(data), nil + // Create an RSA private key from JWK components. + key := &rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + N: &jwkData.N.Int, + E: int(jwkData.E.Int64()), + }, + D: &jwkData.D.Int, + Primes: []*big.Int{&jwkData.P.Int, &jwkData.Q.Int}, + Precomputed: rsa.PrecomputedValues{ + Dp: &jwkData.Dp.Int, + Dq: &jwkData.Dq.Int, + Qinv: &jwkData.Qinv.Int, + }, } - n, err := decodeBase64("n") - if err != nil { - return "", err - } - e, err := decodeBase64("e") - if err != nil { - return "", err - } - d, err := decodeBase64("d") - if err != nil { - return "", err - } - p, err := decodeBase64("p") - if err != nil { - return "", err + return signJWT(cnf, key) + +} + +// base64int is a JSON decoding shim for base64-encoded big.Int. +type base64int struct { + big.Int +} + +func (i *base64int) UnmarshalJSON(b []byte) error { + src, ok := bytes.CutPrefix(b, []byte{'"'}) + if !ok { + return fmt.Errorf("invalid JSON type: %s", b) } - q, err := decodeBase64("q") - if err != nil { - return "", err + src, ok = bytes.CutSuffix(src, []byte{'"'}) + if !ok { + return fmt.Errorf("invalid JSON type: %s", b) } - dp, err := decodeBase64("dp") + dst := make([]byte, base64.RawURLEncoding.DecodedLen(len(src))) + _, err := base64.RawURLEncoding.Decode(dst, src) if err != nil { - return "", err + return err } - dq, err := decodeBase64("dq") - if err != nil { - return "", err + i.SetBytes(dst) + return nil +} + +func generateOktaJWTPEM(pemdata string, cnf *oauth2.Config) (string, error) { + blk, rest := pem.Decode([]byte(pemdata)) + if rest := bytes.TrimSpace(rest); len(rest) != 0 { + return "", fmt.Errorf("PEM text has trailing data: %s", rest) } - qi, err := decodeBase64("qi") + key, err := x509.ParsePKCS8PrivateKey(blk.Bytes) if err != nil { return "", err } + return signJWT(cnf, key) +} - privateKeyRSA := &rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - N: n, - E: int(e.Int64()), - }, - D: d, - Primes: []*big.Int{p, q}, - Precomputed: rsa.PrecomputedValues{ - Dp: dp, - Dq: dq, - Qinv: qi, - }, - } - - // create a JWT token using required claims and sign it with the private key +// signJWT creates a JWT token using required claims and sign it with the +// private key. +func signJWT(cnf *oauth2.Config, key any) (string, error) { now := time.Now() tok, err := jwt.NewBuilder().Audience([]string{cnf.Endpoint.TokenURL}). Issuer(cnf.ClientID). @@ -159,11 +184,10 @@ func generateOktaJWT(oktaJWK []byte, cnf *oauth2.Config) (string, error) { if err != nil { return "", err } - signedToken, err := jwt.Sign(tok, jwt.WithKey(jwa.RS256, privateKeyRSA)) + signedToken, err := jwt.Sign(tok, jwt.WithKey(jwa.RS256, key)) if err != nil { return "", fmt.Errorf("failed to sign token: %w", err) } - return string(signedToken), nil } diff --git a/x-pack/filebeat/input/cel/config_okta_auth_test.go b/x-pack/filebeat/input/cel/config_okta_auth_test.go new file mode 100644 index 000000000000..fc02a2ec9e79 --- /dev/null +++ b/x-pack/filebeat/input/cel/config_okta_auth_test.go @@ -0,0 +1,88 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cel + +import ( + "testing" + + "github.com/lestrrat-go/jwx/v2/jwt" + "golang.org/x/oauth2" +) + +func TestGenerateOktaJWT(t *testing.T) { + // jwt is a JWT obtained from the Okta integration. + const jwtText = `{ "d": "Cmhokw2MnZfX6da36nnsnQ7IPX9vE6se8_D1NgyL9j9rarYpexhlp45hswcAIFNgWA03NV848Gc0e84AW6wMbyD2E8LPI0Bd8lhdmzRE6L4or2Rxqqjk2Pr2aqGnqs4A0uTijAA7MfPF1zFFdR3EOVx499fEeTiMcLjO83IJCoNiOySDoQgt3KofX5bCbaDy2eiB83rzf0fEcWrWfTY65_Hc2c5lek-1uuF7NpELVzX80p5H-b9MOfLn0BdOGe-mJ2j5bXi-UCQ45Wxj2jdkoA_Qwb4MEtXZjp5LjcM75SrlGfVd99acML2wGZgYLGweJ0sAPDlKzGvj4ve-JT8nNw", "p": "8-UBb4psN0wRPktkh3S48L3ng4T5zR08t7nwXDYNajROrS2j7oq60dtlGY4IwgwcC0c9GDQP7NiN2IpU2uahYkGQ7lDyM_h7UfQWL5fMrsYiKgn2pUgSy5TTT8smkSLbJAD35nAH6PknsQ2PuvOlb4laiC0MXw1Rw4vT9HAEB9M", "q": "0DJkPEN0bECG_6lorlNJgIfoNahVevGKK-Yti1YZ5K-nQCuffPCwPG0oZZo_55y5LODe9W7psxnAt7wxkpAY4lK2hpHTWJSkPjqXWFYIP8trn4RZDShnJXli0i1XqPOqkiVzBZGx5nLtj2bUtmXfIU7-kneHGvLQ5EXcyQW1ISM", "dp": "Ye1PWEPSE5ndSo_m-2RoZXE6pdocmrjkijiEQ-IIHN6HwI0Ux1C4lk5rF4mqBo_qKrUd2Lv-sPB6c7mHPKVhoxwEX0vtE-TvTwacadufeYVgblS1zcNUmJ1XAzDkeV3vc1NYNhRBeM-hmjuBvGTbxh72VLsRvpCQhd186yaW17U", "dq": "jvSK7vZCUrJb_-CLCGgX6DFpuK5FQ43mmg4K58nPLb-Oz_kkId4CpPsu6dToXFi4raAad9wYi-n68i4-u6xF6eFxgyVOQVyPCkug7_7i2ysKUxXFL8u2R3z55edMca4eSQt91y0bQmlXxUeOd0-rzms3UcrQ8igYVyXBXCaXIJE", "qi": "iIY1Y4bzMYIFG7XH7gNP7C-mWi6QH4l9aGRTzPB_gPaFThvc0XKW0S0l82bfp_PPPWg4D4QpDCp7rZ6KhEA8BlNi86Vt3V6F3Hz5XiDa4ikgQNsAXiXLqf83R-y1-cwHjW70PP3U89hmalCRRFfVXcLHV77AVHqbrp9rAIo-X-I", "kty": "RSA", "e": "AQAB", "kid": "koeFQjkyiav_3Qwr3aRinCqCD2LaEHOjFnje7XlkbdI", "n": "xloTY8bAuI5AEo8JursCd7w0LmELCae7JOFaVo9njGrG8tRNqgIdjPyoGY_ABwKkmjcCMLGMA29llFDbry8rB4LTWai-h_jX4_uUUnl52mLX-lO6merL5HEPZF438Ql9Hrxs5yGzT8n865-E_3uwYSBrhTjvlZJeXYUeVHfKo8pJSSsw3RZEjBW4Tt0eFmCZnFErtTyk3oUPaYVP-8YLLAenhUDV4Lm1dC4dxqUj0Oh6XrWgIb-eYHGolMY9g9xbgyd4ir39RodA_1DOjzHWpNfCM-J5ZOtfpuKCAe5__u7L8FT0m56XOxcDoVVsz1J1VNrACWAGbhDWNjyHfL5E2Q" }` + cnf := &oauth2.Config{ + ClientID: "0oaajljpeokFZLyKU5d7", + Scopes: []string{"okta.logs.read"}, + } + got, err := generateOktaJWT([]byte(jwtText), cnf) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + tok, err := jwt.Parse([]byte(got), jwt.WithVerify(false)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if tok.Issuer() != cnf.ClientID { + t.Errorf("unexpected issuer: got:%s want:%s", tok.Issuer(), cnf.ClientID) + } + if tok.Subject() != cnf.ClientID { + t.Errorf("unexpected subject: got:%s want:%s", tok.Subject(), cnf.ClientID) + } +} + +func TestGenerateOktaJWTPEM(t *testing.T) { + // jwtText is generated by https://mkjwk.org/ using the instructions at + // https://developer.okta.com/docs/guides/dpop/nonoktaresourceserver/main/#create-the-json-web-token + const jwtText = ` +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCOuef3HMRhohVT +5kSoAJgV+atpDjkwTwkOq+ImnbBlv75GaApG90w8VpjXjhqN/1KJmwfyrKiquiMq +OPu+o/672Dys5rUAaWSbT7wRF1GjLDDZrM0GHRdV4DGxM/LKI8I5yE1Mx3EzV+D5 +ZLmcRc5U4oEoMwtGpr0zRZ7uUr6a28UQwcUsVIPItc1/9rERlo1WTv8dcaj4ECC3 +2Sc0y/F+9XqwJvLd4Uv6ckzP0Sv4tbDA+7jpD9MneAIUiZ4LVj2cwbBd+YRY6jXx +MkevcCSmSX60clBY1cIFkw1DYHqtdHEwAQcQHLGMoi72xRP2qrdzIPsaTKVYoHVo +WA9vADdHAgMBAAECggEAIlx7jjCsztyYyeQsL05FTzUWoWo9NnYwtgmHnshkCXsK +MiUmJEOxZO1sSqj5l6oakupyFWigCspZYPbrFNCiqVK7+NxqQzkccY/WtT6p9uDS +ufUyPwCN96zMCd952lSVlBe3FH8Hr9a+YQxw60CbFjCZ67WuR0opTsi6JKJjJSDb +TQQZ4qJR97D05I1TgfmO+VO7G/0/dDaNHnnlYz0AnOgZPSyvrU2G5cYye4842EMB +ng81xjHD+xp55JNui/xYkhmYspYhrB2KlEjkKb08OInUjBeaLEAgA1r9yOHsfV/3 +DQzDPRO9iuqx5BfJhdIqUB1aifrye+sbxt9uMBtUgQKBgQDVdfO3GYT+ZycOQG9P +QtdMn6uiSddchVCGFpk331u6M6yafCKjI/MlJDl29B+8R5sVsttwo8/qnV/xd3cn +pY14HpKAsE4l6/Ciagzoj+0NqfPEDhEzbo8CyArcd7pSxt3XxECAfZe2+xivEPHe +gFO60vSFjFtvlLRMDMOmqX3kYQKBgQCrK1DISyQTnD6/axsgh2/ESOmT7n+JRMx/ +YzA7Lxu3zGzUC8/sRDa1C41t054nf5ZXJueYLDSc4kEAPddzISuCLxFiTD2FQ75P +lHWMgsEzQObDm4GPE9cdKOjoAvtAJwbvZcjDa029CDx7aCaDzbNvdmplZ7EUrznR +55U8Wsm8pwKBgBytxTmzZwfbCgdDJvFKNKzpwuCB9TpL+v6Y6Kr2Clfg+26iAPFU +MiWqUUInGGBuamqm5g6jI5sM28gQWeTsvC4IRXyes1Eq+uCHSQax15J/Y+3SSgNT +9kjUYYkvWMwoRcPobRYWSZze7XkP2L8hFJ7EGvAaZGqAWxzgliS9HtnhAoGAONZ/ +UqMw7Zoac/Ga5mhSwrj7ZvXxP6Gqzjofj+eKqrOlB5yMhIX6LJATfH6iq7cAMxxm +Fu/G4Ll4oB3o5wACtI3wldV/MDtYfJBtoCTjBqPsfNOsZ9hMvBATlsc2qwzKjsAb +tFhzTevoOYpSD75EcSS/G8Ec2iN9bagatBnpl00CgYBVqAOFZelNfP7dj//lpk8y +EUAw7ABOq0S9wkpFWTXIVPoBQUipm3iAUqGNPmvr/9ShdZC9xeu5AwKram4caMWJ +ExRhcDP1hFM6CdmSkIYEgBKvN9N0O4Lx1ba34gk74Hm65KXxokjJHOC0plO7c7ok +LNV/bIgMHOMoxiGrwyjAhg== +-----END PRIVATE KEY----- +` + cnf := &oauth2.Config{ + ClientID: "0oaajljpeokFZLyKU5d7", + Scopes: []string{"okta.logs.read"}, + } + got, err := generateOktaJWTPEM(jwtText, cnf) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + tok, err := jwt.Parse([]byte(got), jwt.WithVerify(false)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if tok.Issuer() != cnf.ClientID { + t.Errorf("unexpected issuer: got:%s want:%s", tok.Issuer(), cnf.ClientID) + } + if tok.Subject() != cnf.ClientID { + t.Errorf("unexpected subject: got:%s want:%s", tok.Subject(), cnf.ClientID) + } +} diff --git a/x-pack/filebeat/input/cel/config_test.go b/x-pack/filebeat/input/cel/config_test.go index 0cd404705e2d..e4c98b78dc5e 100644 --- a/x-pack/filebeat/input/cel/config_test.go +++ b/x-pack/filebeat/input/cel/config_test.go @@ -489,8 +489,8 @@ var oAuth2ValidationTests = []struct { }, }, { - name: "okta requires token_url, client_id, scopes and at least one of okta.jwk_json or okta.jwk_file to be provided", - wantErr: errors.New("okta validation error: token_url, client_id, scopes and at least one of okta.jwk_json or okta.jwk_file must be provided accessing 'auth.oauth2'"), + name: "unique_okta_jwk_token", + wantErr: errors.New("okta validation error: one of okta.jwk_json, okta.jwk_file or okta.jwk_pem must be provided accessing 'auth.oauth2'"), input: map[string]interface{}{ "auth.oauth2": map[string]interface{}{ "provider": "okta", @@ -501,7 +501,7 @@ var oAuth2ValidationTests = []struct { }, }, { - name: "okta oauth2 validation fails if jwk_json is not a valid JSON", + name: "invalid_okta_jwk_json", wantErr: errors.New("the field can't be converted to valid JSON accessing 'auth.oauth2.okta.jwk_json'"), input: map[string]interface{}{ "auth.oauth2": map[string]interface{}{ @@ -514,7 +514,7 @@ var oAuth2ValidationTests = []struct { }, }, { - name: "okta successful oauth2 validation", + name: "okta_successful_oauth2_validation", input: map[string]interface{}{ "auth.oauth2": map[string]interface{}{ "provider": "okta", From ac6e2230bcfe766f1bd5695a59aa7d55116098f7 Mon Sep 17 00:00:00 2001 From: Christiano Haesbaert Date: Tue, 6 Feb 2024 09:24:52 +0100 Subject: [PATCH 117/129] linux capabilities: normalization and auditbeat process support (#37453) Capabilities normalization and auditbeat process support This draft implements `process.thread.capabilities.{effective,permitted}` to auditbeat/system/process and normalizes the other uses of linux capabilities across beats (only two other cases). I've tested metricbeat, auditbeat and filebeat+journald. Co-authored-by: Dan Kortschak <90160302+efd6@users.noreply.github.com> Co-authored-by: Mattia Meleleo --- CHANGELOG.next.asciidoc | 1 + auditbeat/docs/fields.asciidoc | 22 +++ .../input/journald/pkg/journalfield/conv.go | 65 +------ .../pkg/journalfield/conv_expand_test.go | 6 +- .../common/capabilities/capabilities_linux.go | 161 ++++++++++++++++++ .../capabilities/capabilities_linux_test.go | 87 ++++++++++ .../common/capabilities/capabilities_other.go | 47 +++++ libbeat/common/capabilities_linux.go | 66 ------- libbeat/common/seccomp/policy_linux_386.go | 1 + libbeat/common/seccomp/policy_linux_amd64.go | 1 + metricbeat/helper/socket/ptable_linux.go | 15 +- .../auditbeat/module/system/_meta/fields.yml | 19 +++ x-pack/auditbeat/module/system/fields.go | 2 +- .../module/system/process/process.go | 38 ++++- 14 files changed, 387 insertions(+), 144 deletions(-) create mode 100644 libbeat/common/capabilities/capabilities_linux.go create mode 100644 libbeat/common/capabilities/capabilities_linux_test.go create mode 100644 libbeat/common/capabilities/capabilities_other.go delete mode 100644 libbeat/common/capabilities_linux.go diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 120b5d14bb1c..f93cde9590a2 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -144,6 +144,7 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d *Auditbeat* +- Add linux capabilities to processes in the system/process. {pull}37453[37453] *Filebeat* diff --git a/auditbeat/docs/fields.asciidoc b/auditbeat/docs/fields.asciidoc index bd4db4ce5b6c..9eee5f008fc1 100644 --- a/auditbeat/docs/fields.asciidoc +++ b/auditbeat/docs/fields.asciidoc @@ -18925,6 +18925,28 @@ type: keyword -- +*`process.thread.capabilities.effective`*:: ++ +-- +This is the set of capabilities used by the kernel to perform permission checks for the thread. + +type: keyword + +example: ["CAP_BPF", "CAP_SYS_ADMIN"] + +-- + +*`process.thread.capabilities.permitted`*:: ++ +-- +This is a limiting superset for the effective capabilities that the thread may assume. + +type: keyword + +example: ["CAP_BPF", "CAP_SYS_ADMIN"] + +-- + [float] === hash diff --git a/filebeat/input/journald/pkg/journalfield/conv.go b/filebeat/input/journald/pkg/journalfield/conv.go index bd7403ae142f..94447b773b7e 100644 --- a/filebeat/input/journald/pkg/journalfield/conv.go +++ b/filebeat/input/journald/pkg/journalfield/conv.go @@ -19,11 +19,11 @@ package journalfield import ( "fmt" - "math/bits" "regexp" "strconv" "strings" + "github.com/elastic/beats/v7/libbeat/common/capabilities" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -190,72 +190,13 @@ func expandCapabilities(fields mapstr.M) { if !ok { return } - w, err := strconv.ParseUint(c, 16, 64) - if err != nil { - return - } - if w == 0 { + caps, err := capabilities.FromString(c, 16) + if err != nil || len(caps) == 0 { return } - caps := make([]string, 0, bits.OnesCount64(w)) - for i := 0; w != 0; i++ { - if w&1 != 0 { - if i < len(capTable) { - caps = append(caps, capTable[i]) - } else { - caps = append(caps, strconv.Itoa(i)) - } - } - w >>= 1 - } fields.Put("process.thread.capabilities.effective", caps) } -// include/uapi/linux/capability.h -var capTable = [...]string{ - 0: "CAP_CHOWN", - 1: "CAP_DAC_OVERRIDE", - 2: "CAP_DAC_READ_SEARCH", - 3: "CAP_FOWNER", - 4: "CAP_FSETID", - 5: "CAP_KILL", - 6: "CAP_SETGID", - 7: "CAP_SETUID", - 8: "CAP_SETPCAP", - 9: "CAP_LINUX_IMMUTABLE", - 10: "CAP_NET_BIND_SERVICE", - 11: "CAP_NET_BROADCAST", - 12: "CAP_NET_ADMIN", - 13: "CAP_NET_RAW", - 14: "CAP_IPC_LOCK", - 15: "CAP_IPC_OWNER", - 16: "CAP_SYS_MODULE", - 17: "CAP_SYS_RAWIO", - 18: "CAP_SYS_CHROOT", - 19: "CAP_SYS_PTRACE", - 20: "CAP_SYS_PACCT", - 21: "CAP_SYS_ADMIN", - 22: "CAP_SYS_BOOT", - 23: "CAP_SYS_NICE", - 24: "CAP_SYS_RESOURCE", - 25: "CAP_SYS_TIME", - 26: "CAP_SYS_TTY_CONFIG", - 27: "CAP_MKNOD", - 28: "CAP_LEASE", - 29: "CAP_AUDIT_WRITE", - 30: "CAP_AUDIT_CONTROL", - 31: "CAP_SETFCAP", - 32: "CAP_MAC_OVERRIDE", - 33: "CAP_MAC_ADMIN", - 34: "CAP_SYSLOG", - 35: "CAP_WAKE_ALARM", - 36: "CAP_BLOCK_SUSPEND", - 37: "CAP_AUDIT_READ", - 38: "CAP_PERFMON", - 39: "CAP_BPF", - 40: "CAP_CHECKPOINT_RESTORE", -} - func getStringFromFields(key string, fields mapstr.M) string { value, _ := fields.GetValue(key) str, _ := value.(string) diff --git a/filebeat/input/journald/pkg/journalfield/conv_expand_test.go b/filebeat/input/journald/pkg/journalfield/conv_expand_test.go index c43e57a1c494..09daf7c8f5b6 100644 --- a/filebeat/input/journald/pkg/journalfield/conv_expand_test.go +++ b/filebeat/input/journald/pkg/journalfield/conv_expand_test.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +//go:build linux && cgo + package journalfield import ( @@ -228,8 +230,8 @@ var expandCapabilitiesTests = []struct { "CAP_PERFMON", "CAP_BPF", "CAP_CHECKPOINT_RESTORE", - "41", - "42", + "CAP_41", + "CAP_42", }, }, }, diff --git a/libbeat/common/capabilities/capabilities_linux.go b/libbeat/common/capabilities/capabilities_linux.go new file mode 100644 index 000000000000..715b86d9bc7e --- /dev/null +++ b/libbeat/common/capabilities/capabilities_linux.go @@ -0,0 +1,161 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux + +package capabilities + +import ( + "errors" + "math/bits" + "strconv" + "strings" + + "kernel.org/pub/linux/libs/security/libcap/cap" +) + +var ( + // errInvalidCapability expresses an invalid capability ID: x < 0 || x >= 64. + errInvalidCapability = errors.New("invalid capability") +) + +// The capability set flag/vector, re-exported from +// libcap(3). Inherit, Bound & Ambient not exported since we have no +// use for it yet. +type Flag = cap.Flag + +const ( + // aka CapEff + Effective = cap.Effective + // aka CapPrm + Permitted = cap.Permitted +) + +// Fetch the capabilities of pid for a given flag/vector and convert +// it to the representation used in ECS. cap.GetPID() fetches it with +// SYS_CAPGET. +// Returns errors.ErrUnsupported on "not linux". +func FromPid(flag Flag, pid int) ([]string, error) { + set, err := cap.GetPID(pid) + if err != nil { + return nil, err + } + empty, err := isEmpty(flag, set) + if err != nil { + return nil, err + } + if empty { + return []string{}, nil + } + + sl := make([]string, 0, cap.MaxBits()) + for i := 0; i < int(cap.MaxBits()); i++ { + c := cap.Value(i) + enabled, err := set.GetFlag(flag, c) + if err != nil { + return nil, err + } + if !enabled { + continue + } + s, err := toECS(i) + // impossible since MaxBits <= 64 + if err != nil { + return nil, err + } + sl = append(sl, s) + } + + return sl, err +} + +// Convert a uint64 to the capabilities representation used in ECS. +// Returns errors.ErrUnsupported on "not linux". +func FromUint64(w uint64) ([]string, error) { + sl := make([]string, 0, bits.OnesCount64(w)) + for i := 0; w != 0; i++ { + if w&1 != 0 { + s, err := toECS(i) + // impossible since MaxBits <= 64 + if err != nil { + return nil, err + } + sl = append(sl, s) + } + w >>= 1 + } + + return sl, nil +} + +// Convert a string to the capabilities representation used in +// ECS. Example input: "1ffffffffff", 16. +// Returns errors.ErrUnsupported on "not linux". +func FromString(s string, base int) ([]string, error) { + w, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return nil, err + } + + return FromUint64(w) +} + +// True if sets are equal for the given flag/vector, errors out in +// case any of the sets is malformed. +func isEqual(flag Flag, a *cap.Set, b *cap.Set) (bool, error) { + d, err := a.Cf(b) + if err != nil { + return false, err + } + + return !d.Has(flag), nil +} + +// Convert the capability ID to a string suitable to be used in +// ECS. +// If capabiliy ID X is unknown, but valid (0 <= X < 64), "CAP_X" +// will be returned instead. Fetches from an internal table built at +// startup. +var toECS = makeToECS() + +// Make toECS() which creates a map of every possible valid capability +// ID on startup. Returns errInvalidCapabilty for an invalid ID. +func makeToECS() func(int) (string, error) { + ecsNames := make(map[int]string) + + for i := 0; i < 64; i++ { + c := cap.Value(i) + if i < int(cap.MaxBits()) { + ecsNames[i] = strings.ToUpper(c.String()) + } else { + ecsNames[i] = strings.ToUpper("CAP_" + c.String()) + } + } + + return func(b int) (string, error) { + s, ok := ecsNames[b] + if !ok { + return "", errInvalidCapability + } + return s, nil + } +} + +// Like isAll(), but for the empty set, here for symmetry. +func isEmpty(flag Flag, set *cap.Set) (bool, error) { + return isEqual(flag, set, cap.NewSet()) +} diff --git a/libbeat/common/capabilities/capabilities_linux_test.go b/libbeat/common/capabilities/capabilities_linux_test.go new file mode 100644 index 000000000000..1481fc5679b2 --- /dev/null +++ b/libbeat/common/capabilities/capabilities_linux_test.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package capabilities + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" + "kernel.org/pub/linux/libs/security/libcap/cap" +) + +func TestEmpty(t *testing.T) { + sl, err := FromString("0", 16) + assert.Nil(t, err) + assert.Equal(t, len(sl), 0) + + sl, err = FromUint64(0) + assert.Nil(t, err) + assert.Equal(t, len(sl), 0) + + // assumes non root has no capabilities + if os.Geteuid() != 0 { + empty := cap.NewSet() + self := cap.GetProc() + d, err := self.Cf(empty) + assert.Nil(t, err) + assert.False(t, d.Has(cap.Effective)) + assert.False(t, d.Has(cap.Permitted)) + assert.False(t, d.Has(cap.Inheritable)) + } +} + +func TestOverflow(t *testing.T) { + sl, err := FromUint64(^uint64(0)) + assert.Nil(t, err) + assert.Equal(t, len(sl), 64) + + for _, cap := range []string{ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_IPC_LOCK", + "CAP_MAC_OVERRIDE", + } { + assertHasCap(t, sl, cap) + } + if cap.MaxBits() <= 62 { + assertHasCap(t, sl, "CAP_62") + } + if cap.MaxBits() <= 63 { + assertHasCap(t, sl, "CAP_63") + } +} + +func assertHasCap(t *testing.T, sl []string, s string) { + var found int + + for _, s2 := range sl { + if s2 == s { + found++ + } + } + + assert.Equal(t, found, 1, s) +} diff --git a/libbeat/common/capabilities/capabilities_other.go b/libbeat/common/capabilities/capabilities_other.go new file mode 100644 index 000000000000..fbd7e8797728 --- /dev/null +++ b/libbeat/common/capabilities/capabilities_other.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build !linux + +package capabilities + +import "errors" + +// Dummy value on "not linux". +type Flag = uint + +const ( + // Meaningless on "not linux". + Effective = Flag(0) + // Meaningless on "not linux". + Permitted = Flag(1) +) + +// Returns errors.ErrUnsupported on "not linux". +func FromPid(flag Flag, pid int) ([]string, error) { + return nil, errors.ErrUnsupported +} + +// Returns errors.ErrUnsupported on "not linux". +func FromUint64(w uint64) ([]string, error) { + return nil, errors.ErrUnsupported +} + +// Returns errors.ErrUnsupported on "not linux". +func FromString(s string, base int) ([]string, error) { + return nil, errors.ErrUnsupported +} diff --git a/libbeat/common/capabilities_linux.go b/libbeat/common/capabilities_linux.go deleted file mode 100644 index b2992c251ef8..000000000000 --- a/libbeat/common/capabilities_linux.go +++ /dev/null @@ -1,66 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -//go:build linux - -package common - -import ( - "errors" - "fmt" - - "github.com/elastic/go-sysinfo" - "github.com/elastic/go-sysinfo/types" -) - -// Capabilities contains the capability sets of a process -type Capabilities types.CapabilityInfo - -// Check performs a permission check for a given capabilities set -func (c Capabilities) Check(set []string) bool { - for _, capability := range set { - found := false - for _, effective := range c.Effective { - if capability == effective { - found = true - break - } - } - if !found { - return false - } - } - return true -} - -// GetCapabilities gets the capabilities of this process -func GetCapabilities() (Capabilities, error) { - p, err := sysinfo.Self() - if err != nil { - return Capabilities{}, fmt.Errorf("failed to read self process information: %w", err) - } - - if c, ok := p.(types.Capabilities); ok { - capabilities, err := c.Capabilities() - if err != nil { - return Capabilities{}, fmt.Errorf("failed to read process capabilities: %w", err) - } - return Capabilities(*capabilities), nil - } - - return Capabilities{}, errors.New("capabilities not available") -} diff --git a/libbeat/common/seccomp/policy_linux_386.go b/libbeat/common/seccomp/policy_linux_386.go index 724666987201..ac2a93a5c741 100644 --- a/libbeat/common/seccomp/policy_linux_386.go +++ b/libbeat/common/seccomp/policy_linux_386.go @@ -31,6 +31,7 @@ func init() { "_llseek", "access", "brk", + "capget", "chmod", "chown", "clock_gettime", diff --git a/libbeat/common/seccomp/policy_linux_amd64.go b/libbeat/common/seccomp/policy_linux_amd64.go index 0a05bdde9275..624f48c890a2 100644 --- a/libbeat/common/seccomp/policy_linux_amd64.go +++ b/libbeat/common/seccomp/policy_linux_amd64.go @@ -34,6 +34,7 @@ func init() { "arch_prctl", "bind", "brk", + "capget", "chmod", "chown", "clock_gettime", diff --git a/metricbeat/helper/socket/ptable_linux.go b/metricbeat/helper/socket/ptable_linux.go index 88fff488bc22..ffe585f70949 100644 --- a/metricbeat/helper/socket/ptable_linux.go +++ b/metricbeat/helper/socket/ptable_linux.go @@ -20,17 +20,22 @@ package socket import ( - "github.com/elastic/beats/v7/libbeat/common" + "kernel.org/pub/linux/libs/security/libcap/cap" ) -var requiredCapabilities = []string{"sys_ptrace", "dac_read_search"} - // isPrivileged checks if this process has privileges to read sockets // of all users func isPrivileged() (bool, error) { - capabilities, err := common.GetCapabilities() + set := cap.GetProc() + + ptrace, err := set.GetFlag(cap.Effective, cap.SYS_PTRACE) + if err != nil { + return false, err + } + dac_read_search, err := set.GetFlag(cap.Effective, cap.DAC_READ_SEARCH) if err != nil { return false, err } - return capabilities.Check(requiredCapabilities), nil + + return ptrace && dac_read_search, nil } diff --git a/x-pack/auditbeat/module/system/_meta/fields.yml b/x-pack/auditbeat/module/system/_meta/fields.yml index 61908a6ce292..43101839c0a1 100644 --- a/x-pack/auditbeat/module/system/_meta/fields.yml +++ b/x-pack/auditbeat/module/system/_meta/fields.yml @@ -30,6 +30,25 @@ - name: process type: group fields: + - name: thread.capabilities.effective + level: extended + type: keyword + ignore_above: 1024 + description: This is the set of capabilities used by the kernel to perform permission + checks for the thread. + example: '["CAP_BPF", "CAP_SYS_ADMIN"]' + pattern: ^(CAP_[A-Z_]+|\d+)$ + default_field: false + - name: thread.capabilities.permitted + level: extended + type: keyword + ignore_above: 1024 + description: This is a limiting superset for the effective capabilities that + the thread may assume. + example: '["CAP_BPF", "CAP_SYS_ADMIN"]' + pattern: ^(CAP_[A-Z_]+|\d+)$ + default_field: false + - name: hash type: group description: > diff --git a/x-pack/auditbeat/module/system/fields.go b/x-pack/auditbeat/module/system/fields.go index 7711dffe2c09..4b0a95d23b0a 100644 --- a/x-pack/auditbeat/module/system/fields.go +++ b/x-pack/auditbeat/module/system/fields.go @@ -19,5 +19,5 @@ func init() { // AssetSystem returns asset data. // This is the base64 encoded zlib format compressed contents of module/system. func AssetSystem() string { - return "eJy0WV1v2zoSffevGPSlCeAqiNsEhR8WSJtiE2y7DdYp0DebEscSNxSpS1JJ1F9/QerDkk3ZlqMroEDNkOec+eBoSH2AJyzmoAttMJ0AGGY4zuHdwg28mwBQ1JFimWFSzOFfEwCAxwQ1AlEIJkFYM+RUQ4wCFTFIISzceIkJqaQ5x2ACoJAj0TiHEA2ZQLVwPpkAfABBUpwDPqMwjsMUGc4hVjLP3O96sv1/PVsqFjPhhuoFT1i8SEWrMY92+/x060CunU7HGcBjwjRERECIQGDNOEJGTAJnGMQBrC6eibrgMrb/gsvV+bRBk8rBWEk1ZGV6JNNMChQGTEIM6DzLOEPqplBiSI0t0HAmnlbnQdsXuUZ1tCtQGGaKJaPDvXF/C7lgf+XIC2DUAq0LJmKn0moAKYBAIrUJ4N6A9ZJMs9xGmmggsLi7+TC7uoaE6GTjlNIRdhXc305LIPsfImj5w+oOOjYYVCkThA834bFaWdNago4vMyUj1Ppod7Zs2Z7eK+KO6AR1k1WvGOWGhBxtaqG1Q7stQ3gsFTNJ6qi0c4hd8Ex4jm5Kg+g8iK+AIpIUKVAWozbVTGfftv6NBSEnTzgLl7Or6w2ex6Nb5nz5fvOfb7OwCajHnEkP08fPn05h+vj501Cmq8vZKUxXl7NjmXRCZrNB5izubmazoy3RCRnorsXdzQBPWfzlcAvcmmEcw9Kr5Dg+txzHCZ5aDvXVwJRyHMPy6epydkJEri5nF8Ni4ngGR8XxHB+X19fkepApv39f7zWiMcC9OQOSU+bvAzzFt1sAW0VcatMM+gp5D179rCzACiIpDGGi7nB4+VJjYi1VSuy6oLVqu8epn22Nrdd8ZliKHeJSKZci7gyXhHOguXK8nT8ykeVmWU8RREiNkRRUd2bJ3LSnEX1LCu+MTGHEtHPKZefve/xln1/OGmCiLSHwmB1KaXoMp8TgEM4vUhqwWD6eKnqo2B+kHrJQSo5EDOFboAG2rtLAdkANh0+AFfZHCgzsT4+A7W1zhID/tlrNGr7dcU3B9ZVfFo97Bcn1WqMJNEbHZN8BTY8bHRbVZsCe6FuV4/njrkLzMTFf0E/kgPtbHwVRUcIMRiZXIxrUga1OCq+fr5fXn859IlLii+IJ3D9uvgKhVKHW6I0dyzxEW4MHOO4f9lNI7aHYrtwHWFZSt2p3q1wDCWVu3GaRmT2y2sNO9d7p1tudmt0uKxR3Enif1w/65OeiAZ3a8kJEUUVdG4UmSs4Dr5KME2NtG1VJDVopiFAYqaeQh7kw+RRemKDyRfcoGt0v7mhdKvlBIjvyu4d6TVLGi1HJS8iKXiFNiJkCxZARMYW1Qgw1PeSRZ1R6+4X9Vl0Vpp/wCZVAPh7fo2ezvNcVzX4plnVUw+1qONOI8O3rAqQO7EDL8c3GINETifFNHWCFsbeQEAFMaEM4RwpSgcJUPiOt+d/WHW7f6xxy4F737bvpqdUevOKpGo2dyDRXPRUSlJWMiMYTvjzprRknmvjQIvfx+HbiG6n2WFXFe0y2CrKvDxmTqt2A+Pg4i1CMa10F6W07yj02ypmhpqswew8Pmv056mR2FJkF85LkaUpUcQJgudCHmSs+Zlh+/e/7bn1t7qfbFEOKqwU42KLZSbq8gt7t0Y6vp/9UdwLwq3uZveMlto34drbuMWTDFY/L9W8by14yytTYhr3XkMgULTRGRnZTu33JhXzE3gbgQclYkRSMBJULIAa4jFlPP2MTctnK1VE9Xt0wuQ8k7Rsm+CngOxP56xRMwrR9Q9vNEWMkdZntPRmxc2aqFcrw/xiZYQJXDu5AM1SUpHrz/YhpyIgytnE4C7GQ1QePvIx4ppitYuWqrf7Zv5Nh/24+FIWjIgFN/u9ubdi75Tb0TBiMcXuXDKTv234Z0dpjXN9R+XBsa8D94W2iVs2GMyFN1UBWI8xo5OvBkfScE2CsSN7syLawATxIrVnI2x/fYKUTQuXLsvFHD+ZZx2jXGduNKcoPwA7DfUU+n258u6RMk5AjXU17UFdCbpgtR7nZKRExKplr14+LQgp036q5jIGJc9dm9yFGqshMG/QlQdENmYuN1X6BJrpwwxQ0Yqp7QI2ss8Qef1A4DnfmKRF3ot/qGok2yyixBvVvnZ12rnyOCvaj+7pedGpMbegL0U4AVAKCyd8BAAD//yDbzZE=" + return "eJy8Wm1v27oV/u5fcRAMaILrKotvExT+MMBt7pZg7W0wp0C3uzubEo8lLhSpkVQSFfvxA6kXSzZlW4k7A0VjmXyec57z4kPJb+EBiynoQhtMRwCGGY5TOJm7CycjAIo6UiwzTIop/GkEAHCfoEYgCsEkCCuGnGqIUaAiBimEhbteYkIqac4xGAEo5Eg0TiFEQ0ZQbZyORgBvQZAUp4CPKIzjMEWGU4iVzDP3vl5s/65XS8ViJtylesMDFk9S0eqax3b7+uL2gVw5Ox1nAPcJ0xARASECgRXjCBkxCZxiEAewPH8k6pzL2P4LLpZn4wZNKgdjTaohK9cjmWZSoDBgEmJA51nGGVK3hBJDamyBhjPxsDwL2lrkGtXBUqAwzBQLRoercXsNuWD/yZEXwKgFWhVMxM5KawNIAQQSqU0AtwasSjLNchtpooHA/Gb2dnJ5BQnRyVqUUgi7C26vxyWQ/YMIWr6xdgcdHwyqlAnCh7twX+2saS1BR8tMyQi1PlhOkygkNIhIRkLGmWGoA1ytMDLsEStajo/Ip4DPBgXFXcKzWEiFCxLKR5zCxR8n73zuuARkukwgNNaXNr91qqmtB1QCORgJGaqVVKn9P2VaMykaVaIEowcNqypBK5+qj/GZpJkt9Te/nXyc3S0+3P35ZAzuz/nf54vZ9efbX09+f1OtzogxqMQU/nVqV/w2e/uPxe8//fef9KezPzS+rEjOzcLJOYUV4Rr3auqsNqZR70dpSoCzlBmb1jrPUFl9a12auHbltiXbSLnWD1JSANE6r7P3/yVlR8tWrW2mc2+R3BCdoG663jNGuSEhR9v6bEYV2rV0wmOpmElSR6VdwdoNj4Tn6JZ0VEnwGVBEkiIFymLUploZjKp17fpaexBy8oCTcDG5vFrjeeK84c6HT7O//jIJm4bjcWfUw/Tz+3cvYfr5/buhTJcXk5cwXV5MDmXSCZlMBrkzv5lNJgd7ohMyUK75zWyAUhZ/MdwDt2cYx7D0KjkOzy3H8QKlFkO1GphSjmNYPl1eTF4QkcuLyfmwmDiewVFxPIfH5fk5uRrkyrdvVzudaBxwk11Acsr8c6qn+XYbYKuJS73+hvE18h68+rW0AEuIpDCEiXoC5+XQxYQdC4jdF7R2bc7g9WvTxtYYmhmWYoe4tJRLEXcul4RToLlyvJ0Pmchys6iXCCKkxkgKqjurZG7ay4i+JoV3RaYwYtqJctH5fIde9vXVeQNMtE0IPG6HUpoexykxOITzg5QGLJaPp4oeKvYdqYcslJIjEUP45miArao0sLNPw+EzwBr2XQoM7FuPAZtlc4ABv7aOQjV8+0QwBnfu+TC/32mQXK00mkBjdEj27bHpfm2HRbUZsCP61srj6XFTofmYmC/oL+SA22sfBVFRwgxGJldHdKgDW51kn99fLa7enfmMSIkvii/g/jz7CIRShVqjN3Ys8xBtXNzDcXu3m0JqD8Vm597DspS61btb7RpIKHPjikVmaFupPbWU3zvdfrvVs9ttheJWAu9Sfa8mX+YN6Ni2FyKKKuraKDRRchZ4Lck4Mda3o1pSg1YWRCiM1GPIw1yYfAxPTFD5pHssOrou7tZPaclnEtkr33qoVyRlvDgqeQlZ0SukCTFjoBgyIsawUoihpvsUeUSlN7+wX2tXheknLO9fHI/v3lMsb3R9m2SnKZb1qI7b3XCqEeGXj3OQOrAXWsI3hUGiBxLjqybACmNnIyECmNCGcI4UpAKFqXxEWvO/bjrcvO+4T8Cd8u26E1lbu/cWZDVobEWmuRVZIUHZyYholPDlSW/PeKGLdy1yH4+vEl9JtcOrKt7HZKsg++aQY1K1BxAfH2cRiuN6V0F6x46yxo5yZqjpKszew4Nm3w86mR1EZsG8JHmaElW8ALDc6MPMFT9mWL7+7dN2f22en7QphjRXC7B3RLOLdPmIZHtGO7yf/qjpBOBr92HLlkpsE/H1bN1jyJorPi7XX2wse8koU8d27I2GRKZooTEyspva7ZtcyI842wDcKRkrkoKRoHIBxACXMeuZZ2xCLlq5elTFqztM7gFe+w4TfBHwiYn8eQym9RgrxkjqMtt7MmLrzFRbKMN/Y2SGGbh0cHuGoaIk1evnm0xDRpR74HYaYiGrBx55GfFMMdvFyl0b87O/kmF3Ne+LwkGRgCb/t0sbdpbcmp4JgzFuVslA+r7yy4jWHuf6jsr7Y1sD7g5vE7VqNZwKaaoBsrrCjEa+GhxJzzkBjhXJ2ZbZFjaAO6k1C3n74RssdUKofFo0evRgnnacdpOxLUxRPl92GO5XDmfjtbYLyjQJOdLluAd1KeSa2XKUxU6JiFHJXLt5XBRSoPstBZcxMHHmxuw+xEgVmWmDPiUouiFzsbG2n6OJzt1lChox1T2gRtZZYo8/KByHO/OUiFvRb02NRJtFlFiH+ktna5wrXwcF+979+qPo9Jja0SeinQFQGRCM/hcAAP//VUidAg==" } diff --git a/x-pack/auditbeat/module/system/process/process.go b/x-pack/auditbeat/module/system/process/process.go index d2dfae065980..08a72fe562e5 100644 --- a/x-pack/auditbeat/module/system/process/process.go +++ b/x-pack/auditbeat/module/system/process/process.go @@ -18,6 +18,7 @@ import ( "github.com/elastic/beats/v7/auditbeat/datastore" "github.com/elastic/beats/v7/auditbeat/helper/hasher" + "github.com/elastic/beats/v7/libbeat/common/capabilities" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/x-pack/auditbeat/cache" @@ -101,12 +102,14 @@ type MetricSet struct { // Process represents information about a process. type Process struct { - Info types.ProcessInfo - UserInfo *types.UserInfo - User *user.User - Group *user.Group - Hashes map[hasher.HashType]hasher.Digest - Error error + Info types.ProcessInfo + UserInfo *types.UserInfo + User *user.User + Group *user.Group + CapEffective []string + CapPermitted []string + Hashes map[hasher.HashType]hasher.Digest + Error error } // Hash creates a hash for Process. @@ -376,6 +379,13 @@ func (ms *MetricSet) processEvent(process *Process, eventType string, action eve event.RootFields.Put("user.group.name", process.Group.Name) } + if len(process.CapEffective) > 0 { + event.RootFields.Put("process.thread.capabilities.effective", process.CapEffective) + } + if len(process.CapPermitted) > 0 { + event.RootFields.Put("process.thread.capabilities.permitted", process.CapPermitted) + } + if process.Hashes != nil { for hashType, digest := range process.Hashes { fieldName := "process.hash." + string(hashType) @@ -489,8 +499,20 @@ func (ms *MetricSet) getProcesses() ([]*Process, error) { } // Exclude Linux kernel processes, they are not very interesting. - if runtime.GOOS == "linux" && userInfo.UID == "0" && process.Info.Exe == "" { - continue + if runtime.GOOS == "linux" { + if userInfo.UID == "0" && process.Info.Exe == "" { + continue + } + + // Fetch Effective and Permitted capabilities + process.CapEffective, err = capabilities.FromPid(capabilities.Effective, pInfo.PID) + if err != nil && process.Error == nil { + process.Error = err + } + process.CapPermitted, err = capabilities.FromPid(capabilities.Permitted, pInfo.PID) + if err != nil && process.Error == nil { + process.Error = err + } } processes = append(processes, process) From 31819ecf6ecf3ff515e4bd56f65ccb4cf2f73fae Mon Sep 17 00:00:00 2001 From: Olga Naydyonock Date: Tue, 6 Feb 2024 13:41:30 +0200 Subject: [PATCH 118/129] Auditbeat pipeline migration to Buildkite (#37668) * wip: getting GH message * added test scripts * added windows tests * added packaging step * updated packaging execution conditions * win-test failure: updated artifact path * pr fixes * changed group_test.go * moved env setup to separate script * added dynamic step for packaging * added tests and scrosscompile scripts for auditbeat * fixes for crosscompile and macos steps * added env vars for linux packaging * added env vars for linux packaging * enabled packaging step * added docker login * debugging arm packaing * enabled synamic packaging step * updated packaging group steps * fixed conflicts * updated win test script --- .buildkite/auditbeat/auditbeat-pipeline.yml | 136 +++++++++++++++++- .buildkite/auditbeat/scripts/crosscompile.sh | 8 ++ .buildkite/auditbeat/scripts/package-step.sh | 46 ++++++ .buildkite/auditbeat/scripts/package.sh | 13 ++ .../auditbeat/scripts/unit-tests-win.ps1 | 51 +++++++ .buildkite/auditbeat/scripts/unit-tests.sh | 12 ++ .buildkite/env-scripts/env.sh | 6 + .buildkite/env-scripts/linux-env.sh | 41 ++++-- .buildkite/env-scripts/macos-env.sh | 8 -- .buildkite/env-scripts/util.sh | 14 ++ .buildkite/env-scripts/win-env.sh | 12 +- .buildkite/filebeat/filebeat-pipeline.yml | 2 - .../filebeat/scripts/integration-gotests.sh | 2 +- .../filebeat/scripts/integration-pytests.sh | 2 +- .buildkite/filebeat/scripts/package-step.sh | 12 +- .buildkite/filebeat/scripts/package.sh | 4 +- .buildkite/filebeat/scripts/unit-tests.sh | 3 +- .buildkite/hooks/post-checkout | 2 +- .buildkite/hooks/pre-command | 6 +- .buildkite/pull-requests.json | 4 +- 20 files changed, 336 insertions(+), 48 deletions(-) create mode 100755 .buildkite/auditbeat/scripts/crosscompile.sh create mode 100755 .buildkite/auditbeat/scripts/package-step.sh create mode 100755 .buildkite/auditbeat/scripts/package.sh create mode 100644 .buildkite/auditbeat/scripts/unit-tests-win.ps1 create mode 100755 .buildkite/auditbeat/scripts/unit-tests.sh delete mode 100644 .buildkite/env-scripts/macos-env.sh diff --git a/.buildkite/auditbeat/auditbeat-pipeline.yml b/.buildkite/auditbeat/auditbeat-pipeline.yml index 34321b61161b..147ca45ced16 100644 --- a/.buildkite/auditbeat/auditbeat-pipeline.yml +++ b/.buildkite/auditbeat/auditbeat-pipeline.yml @@ -1,5 +1,137 @@ # yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json +env: + IMAGE_UBUNTU_X86_64: "family/core-ubuntu-2204" + IMAGE_UBUNTU_ARM_64: "core-ubuntu-2004-aarch64" + IMAGE_WIN_2016: "family/core-windows-2016" + IMAGE_WIN_2019: "family/core-windows-2019" + IMAGE_WIN_2022: "family/core-windows-2022" + IMAGE_RHEL9: "family/core-rhel-9" + IMAGE_MACOS_X86_64: "generic-13-ventura-x64" + steps: - - label: "Example test" - command: echo "Hello!" + - group: "Auditbeat Mandatory Testing" + key: "mandatory-tests" + if: build.env("GITHUB_PR_TRIGGER_COMMENT") == "auditbeat" || build.env("BUILDKITE_PULL_REQUEST") != "false" + + steps: + - label: ":ubuntu: Unit Tests" + command: + - ".buildkite/auditbeat/scripts/unit-tests.sh" + notify: + - github_commit_status: + context: "auditbeat: Unit Tests" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + artifact_paths: + - "auditbeat/build/*.xml" + - "auditbeat/build/*.json" + + - label: ":rhel: Unit Tests" + command: + - ".buildkite/auditbeat/scripts/unit-tests.sh" + notify: + - github_commit_status: + context: "auditbeat: Unit Tests" + agents: + provider: "gcp" + image: "${IMAGE_RHEL9}" + artifact_paths: + - "auditbeat/build/*.xml" + - "auditbeat/build/*.json" + + - label: ":windows:-{{matrix.image}} Unit Tests" + command: ".buildkite/auditbeat/scripts/unit-tests-win.ps1" + notify: + - github_commit_status: + context: "auditbeat: Unit Tests" + agents: + provider: "gcp" + image: "{{matrix.image}}" + machine_type: "n2-standard-8" + disk_size: 200 + disk_type: "pd-ssd" + matrix: + setup: + image: + - "${IMAGE_WIN_2016}" + - "${IMAGE_WIN_2022}" + artifact_paths: + - "auditbeat/build/*.xml" + - "auditbeat/build/*.json" + + - label: ":linux: Crosscompile" + command: + - ".buildkite/auditbeat/scripts/crosscompile.sh" + env: + GOX_FLAGS: "-arch amd64" + notify: + - github_commit_status: + context: "auditbeat: Unit Tests" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + + - group: "Extended Testing" + key: "extended-tests" + if: build.env("BUILDKITE_PULL_REQUEST") != "false" || build.env("GITHUB_PR_TRIGGER_COMMENT") == "auditbeat for extended support" + + steps: + - label: ":linux: ARM64 Unit Tests" + key: "arm-extended" + if: build.env("GITHUB_PR_TRIGGER_COMMENT") == "auditbeat for arm" || build.env("GITHUB_PR_LABELS") =~ /.*arm.*/ + command: + - ".buildkite/auditbeat/scripts/unit-tests.sh" + notify: + - github_commit_status: + context: "auditbeat/Extended: Unit Tests ARM" + agents: + provider: "aws" + imagePrefix: "${IMAGE_UBUNTU_ARM_64}" + instanceType: "t4g.large" + artifact_paths: "auditbeat/build/*.xml" + + - label: ":mac: MacOS Unit Tests" + key: "macos-extended" + if: build.env("GITHUB_PR_TRIGGER_COMMENT") == "auditbeat for macos" || build.env("GITHUB_PR_LABELS") =~ /.*macOS.*/ + command: + - ".buildkite/auditbeat/scripts/unit-tests.sh" + notify: + - github_commit_status: + context: "auditbeat/Extended: MacOS Unit Tests" + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: "auditbeat/build/*.xml" + + - group: "Windows Extended Testing" + key: "extended-tests-win" + if: build.env("GITHUB_PR_TRIGGER_COMMENT") == "auditbeat for windows" || build.env("GITHUB_PR_LABELS") =~ /.*windows.*/ + + steps: + - label: ":windows: Win 2019 Unit Tests" + key: "win-extended-2019" + command: ".buildkite/auditbeat/scripts/unit-tests-win.ps1" + notify: + - github_commit_status: + context: "auditbeat/Extended: Win-2019 Unit Tests" + agents: + provider: "gcp" + image: "${IMAGE_WIN_2019}" + machine_type: "n2-standard-8" + disk_size: 200 + disk_type: "pd-ssd" + artifact_paths: + - "auditbeat/build/*.xml" + - "auditbeat/build/*.json" + + - group: "Packaging" + key: "packaging" + if: build.env("BUILDKITE_PULL_REQUEST") != "false" + depends_on: + - "mandatory-tests" + + steps: + - label: Package pipeline + commands: ".buildkite/auditbeat/scripts/package-step.sh | buildkite-agent pipeline upload" diff --git a/.buildkite/auditbeat/scripts/crosscompile.sh b/.buildkite/auditbeat/scripts/crosscompile.sh new file mode 100755 index 000000000000..866d6be42239 --- /dev/null +++ b/.buildkite/auditbeat/scripts/crosscompile.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -euo pipefail + +source .buildkite/env-scripts/linux-env.sh + +echo "--- Executing Crosscompile" +make -C auditbeat crosscompile diff --git a/.buildkite/auditbeat/scripts/package-step.sh b/.buildkite/auditbeat/scripts/package-step.sh new file mode 100755 index 000000000000..021240589923 --- /dev/null +++ b/.buildkite/auditbeat/scripts/package-step.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +set -euo pipefail + +source .buildkite/env-scripts/util.sh + +changeset="^auditbeat/ +^go.mod +^pytest.ini +^dev-tools/ +^libbeat/ +^testing/ +^\.buildkite/auditbeat/" + +if are_files_changed "$changeset"; then + cat <<-EOF + steps: + - label: ":ubuntu: Packaging Linux X86" + key: "package-linux-x86" + env: + PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" + command: + - ".buildkite/auditbeat/scripts/package.sh" + notify: + - github_commit_status: + context: "Auditbeat/Packaging: Linux X86" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + + - label: ":linux: Packaging Linux ARM" + key: "package-linux-arm" + env: + PLATFORMS: "linux/arm64" + PACKAGES: "docker" + command: + - ".buildkite/auditbeat/scripts/package.sh" + notify: + - github_commit_status: + context: "Auditbeat/Packaging: ARM" + agents: + provider: "aws" + imagePrefix: "${IMAGE_UBUNTU_ARM_64}" + instanceType: "t4g.large" +EOF +fi diff --git a/.buildkite/auditbeat/scripts/package.sh b/.buildkite/auditbeat/scripts/package.sh new file mode 100755 index 000000000000..71872ca15a35 --- /dev/null +++ b/.buildkite/auditbeat/scripts/package.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +set -euo pipefail + +source .buildkite/env-scripts/linux-env.sh + +echo "--- Docker Version: $(docker --version)" + +echo "--- Start Packaging" +cd auditbeat +umask 0022 +mage package + diff --git a/.buildkite/auditbeat/scripts/unit-tests-win.ps1 b/.buildkite/auditbeat/scripts/unit-tests-win.ps1 new file mode 100644 index 000000000000..200627d518f0 --- /dev/null +++ b/.buildkite/auditbeat/scripts/unit-tests-win.ps1 @@ -0,0 +1,51 @@ +$ErrorActionPreference = "Stop" # set -e +$GoVersion = $env:GOLANG_VERSION # If Choco doesn't have the version specified in .go-version file, should be changed manually + +# Forcing to checkout again all the files with a correct autocrlf. +# Doing this here because we cannot set git clone options before. +function fixCRLF() { + Write-Host "--- Fixing CRLF in git checkout --" + git config core.autocrlf false + git rm --quiet --cached -r . + git reset --quiet --hard +} + +function withGolang() { + Write-Host "--- Install golang $GoVersion --" + choco install golang -y --version $GoVersion + + $choco = Convert-Path "$((Get-Command choco).Path)\..\.." + Import-Module "$choco\helpers\chocolateyProfile.psm1" + refreshenv + go version + go env +} + +function installGoDependencies() { + $installPackages = @( + "github.com/magefile/mage" + "github.com/elastic/go-licenser" + "golang.org/x/tools/cmd/goimports" + "github.com/jstemmer/go-junit-report" + "github.com/tebeka/go2xunit" + ) + foreach ($pkg in $installPackages) { + go install "$pkg" + } +} + +fixCRLF + +$ErrorActionPreference = "Continue" # set +e + +Set-Location -Path auditbeat +New-Item -ItemType Directory -Force -Path "build" +withGolang +installGoDependencies + +mage build unitTest + +$EXITCODE=$LASTEXITCODE +$ErrorActionPreference = "Stop" + +Exit $EXITCODE diff --git a/.buildkite/auditbeat/scripts/unit-tests.sh b/.buildkite/auditbeat/scripts/unit-tests.sh new file mode 100755 index 000000000000..c1f5685c77fe --- /dev/null +++ b/.buildkite/auditbeat/scripts/unit-tests.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -euo pipefail + +source .buildkite/env-scripts/linux-env.sh + +echo "--- Running Unit Tests" +sudo chmod -R go-w auditbeat/ + +cd auditbeat +umask 0022 +mage build unitTest diff --git a/.buildkite/env-scripts/env.sh b/.buildkite/env-scripts/env.sh index d94d03aad53b..4dfc01bafc34 100644 --- a/.buildkite/env-scripts/env.sh +++ b/.buildkite/env-scripts/env.sh @@ -5,9 +5,15 @@ WORKSPACE="$(pwd)" BIN="${WORKSPACE}/bin" HW_TYPE="$(uname -m)" PLATFORM_TYPE="$(uname)" +REPO="beats" +TMP_FOLDER="tmp.${REPO}" +DOCKER_REGISTRY="docker.elastic.co" export SETUP_GVM_VERSION export WORKSPACE export BIN export HW_TYPE export PLATFORM_TYPE +export REPO +export TMP_FOLDER +export DOCKER_REGISTRY diff --git a/.buildkite/env-scripts/linux-env.sh b/.buildkite/env-scripts/linux-env.sh index edaf1a3100c2..5e6e5f7cbf05 100644 --- a/.buildkite/env-scripts/linux-env.sh +++ b/.buildkite/env-scripts/linux-env.sh @@ -1,24 +1,47 @@ #!/usr/bin/env bash +set -euo pipefail + source .buildkite/env-scripts/util.sh DEBIAN_FRONTEND="noninteractive" -export DEBIAN_FRONTEND - sudo mkdir -p /etc/needrestart echo "\$nrconf{restart} = 'a';" | sudo tee -a /etc/needrestart/needrestart.conf > /dev/null -# Remove this code once beats specific agent is set up +echo "--- PLATFORM TYPE $PLATFORM_TYPE" + if [[ $PLATFORM_TYPE == "Linux" ]]; then - echo ":: Installing libs ::" - sudo apt-get update - sudo apt-get install -y libsystemd-dev - sudo apt install -y python3-pip - sudo apt-get install -y python3-venv + # Remove this code once beats specific agent is set up + if grep -q 'Ubuntu' /etc/*release; then + export DEBIAN_FRONTEND + + echo "--- Ubuntu - Installing libs" + sudo apt-get update + sudo apt-get install -y libsystemd-dev + sudo apt install -y python3-pip + sudo apt-get install -y python3-venv + fi + + # Remove this code once beats specific agent is set up + if grep -q 'Red Hat' /etc/*release; then + echo "--- RHL - Installing libs" + sudo yum update -y + sudo yum install -y systemd-devel + sudo yum install -y python3-pip + sudo yum install -y python3 + pip3 install virtualenv + fi +fi + +if [[ $PLATFORM_TYPE == Darwin* ]]; then + echo "--- Setting larger ulimit on MacOS" + # To bypass file descriptor errors like "Too many open files error" on MacOS + ulimit -Sn 50000 + echo "--- ULIMIT: $(ulimit -n)" fi -echo ":: Setting up environment ::" +echo "--- Setting up environment" add_bin_path with_go with_mage diff --git a/.buildkite/env-scripts/macos-env.sh b/.buildkite/env-scripts/macos-env.sh deleted file mode 100644 index ac1486b64fdd..000000000000 --- a/.buildkite/env-scripts/macos-env.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash - -if [[ $PLATFORM_TYPE == Darwin* ]]; then - echo ":: Setting larger ulimit on MacOS ::" - # To bypass file descriptor errors like "Too many open files error" on MacOS - ulimit -Sn 50000 - echo ":: ULIMIT :: $(ulimit -n)" -fi diff --git a/.buildkite/env-scripts/util.sh b/.buildkite/env-scripts/util.sh index 157a5aff37af..7aef69cff389 100644 --- a/.buildkite/env-scripts/util.sh +++ b/.buildkite/env-scripts/util.sh @@ -89,3 +89,17 @@ are_files_changed() { return 1; fi } + +cleanup() { + echo "Deleting temporary files..." + rm -rf ${BIN}/${TMP_FOLDER}.* + echo "Done." +} + +unset_secrets () { + for var in $(printenv | sed 's;=.*;;' | sort); do + if [[ "$var" == *_SECRET || "$var" == *_TOKEN ]]; then + unset "$var" + fi + done +} diff --git a/.buildkite/env-scripts/win-env.sh b/.buildkite/env-scripts/win-env.sh index aa5f67ca4cee..ccf5479b46e1 100644 --- a/.buildkite/env-scripts/win-env.sh +++ b/.buildkite/env-scripts/win-env.sh @@ -1,8 +1,8 @@ #!/usr/bin/env bash -install_python_win() { - if [[ ${PLATFORM_TYPE} = MINGW* ]]; then - choco install mingw -y - choco install python --version=3.11.0 -y - fi -} +echo "--- PLATFORM TYPE: ${PLATFORM_TYPE}" +if [[ ${PLATFORM_TYPE} = MINGW* ]]; then + echo "--- Installing Python on Win" + choco install mingw -y + choco install python --version=3.11.0 -y +fi diff --git a/.buildkite/filebeat/filebeat-pipeline.yml b/.buildkite/filebeat/filebeat-pipeline.yml index e3d7384a71ea..eda9fb93a669 100644 --- a/.buildkite/filebeat/filebeat-pipeline.yml +++ b/.buildkite/filebeat/filebeat-pipeline.yml @@ -134,8 +134,6 @@ steps: if: build.env("BUILDKITE_PULL_REQUEST") != "false" depends_on: - "mandatory-tests" - - "extended-tests" - - "extended-tests-win" steps: - label: Package pipeline diff --git a/.buildkite/filebeat/scripts/integration-gotests.sh b/.buildkite/filebeat/scripts/integration-gotests.sh index a3eabf70c0d3..d64ce7c98eb2 100755 --- a/.buildkite/filebeat/scripts/integration-gotests.sh +++ b/.buildkite/filebeat/scripts/integration-gotests.sh @@ -4,7 +4,7 @@ set -euo pipefail source .buildkite/env-scripts/linux-env.sh -echo ":: Execute Integration Tests ::" +echo "--- Executing Integration Tests" sudo chmod -R go-w filebeat/ cd filebeat diff --git a/.buildkite/filebeat/scripts/integration-pytests.sh b/.buildkite/filebeat/scripts/integration-pytests.sh index 5e2e403dda87..b51e8ae18a68 100755 --- a/.buildkite/filebeat/scripts/integration-pytests.sh +++ b/.buildkite/filebeat/scripts/integration-pytests.sh @@ -4,7 +4,7 @@ set -euo pipefail source .buildkite/env-scripts/linux-env.sh -echo ":: Execute Integration Tests ::" +echo "--- Executing Integration Tests" sudo chmod -R go-w filebeat/ cd filebeat diff --git a/.buildkite/filebeat/scripts/package-step.sh b/.buildkite/filebeat/scripts/package-step.sh index a4127c3cd1d6..985125433cec 100755 --- a/.buildkite/filebeat/scripts/package-step.sh +++ b/.buildkite/filebeat/scripts/package-step.sh @@ -5,12 +5,12 @@ set -euo pipefail source .buildkite/env-scripts/util.sh changeset="^filebeat/ - ^go.mod - ^pytest.ini - ^dev-tools/ - ^libbeat/ - ^testing/ - ^\.buildkite/filebeat/" +^go.mod +^pytest.ini +^dev-tools/ +^libbeat/ +^testing/ +^\.buildkite/filebeat/" if are_files_changed "$changeset"; then cat <<-EOF diff --git a/.buildkite/filebeat/scripts/package.sh b/.buildkite/filebeat/scripts/package.sh index 2ae226eb739c..0bb03250348c 100755 --- a/.buildkite/filebeat/scripts/package.sh +++ b/.buildkite/filebeat/scripts/package.sh @@ -4,9 +4,7 @@ set -euo pipefail source .buildkite/env-scripts/linux-env.sh -echo ":: Evaluate Filebeat Changes ::" - -echo ":: Start Packaging ::" +echo "--- Start Packaging" cd filebeat umask 0022 mage package diff --git a/.buildkite/filebeat/scripts/unit-tests.sh b/.buildkite/filebeat/scripts/unit-tests.sh index cda1dd85aea2..08ce9d4ea1c6 100755 --- a/.buildkite/filebeat/scripts/unit-tests.sh +++ b/.buildkite/filebeat/scripts/unit-tests.sh @@ -3,9 +3,8 @@ set -euo pipefail source .buildkite/env-scripts/linux-env.sh -source .buildkite/env-scripts/macos-env.sh -echo ":: Execute Unit Tests ::" +echo "--- Executing Unit Tests" sudo chmod -R go-w filebeat/ umask 0022 diff --git a/.buildkite/hooks/post-checkout b/.buildkite/hooks/post-checkout index e10f15de7b65..b6cc7ad60bda 100644 --- a/.buildkite/hooks/post-checkout +++ b/.buildkite/hooks/post-checkout @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -euo pipefail diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command index ef38478a4327..c448a7102512 100644 --- a/.buildkite/hooks/pre-command +++ b/.buildkite/hooks/pre-command @@ -2,15 +2,11 @@ set -euo pipefail -if [[ "$BUILDKITE_PIPELINE_SLUG" == "filebeat" ]]; then +if [[ "$BUILDKITE_PIPELINE_SLUG" == "filebeat" || "$BUILDKITE_PIPELINE_SLUG" == "auditbeat" ]]; then source .buildkite/env-scripts/env.sh source .buildkite/env-scripts/util.sh source .buildkite/env-scripts/win-env.sh - if [[ ${PLATFORM_TYPE} = MINGW* ]]; then - install_python_win - fi - if [[ -z "${GOLANG_VERSION-""}" ]]; then export GOLANG_VERSION=$(cat "${WORKSPACE}/.go-version") fi diff --git a/.buildkite/pull-requests.json b/.buildkite/pull-requests.json index 4607a0576d0b..abe0d20f6805 100644 --- a/.buildkite/pull-requests.json +++ b/.buildkite/pull-requests.json @@ -57,8 +57,8 @@ "set_commit_status": true, "build_on_commit": true, "build_on_comment": true, - "trigger_comment_regex": "^/test auditbeat$", - "always_trigger_comment_regex": "^/test auditbeat$", + "trigger_comment_regex": "^/test auditbeat(for (arm|macos|windows|extended support))?$", + "always_trigger_comment_regex": "^/test auditbeat(for (arm|macos|windows|extended support))?$", "skip_ci_labels": [ ], "skip_target_branches": [ ], "skip_ci_on_only_changed": [ ], From a21051d44047c0f746f55f1d36862366015a67c0 Mon Sep 17 00:00:00 2001 From: sharbuz <87968844+sharbuz@users.noreply.github.com> Date: Tue, 6 Feb 2024 16:29:01 +0200 Subject: [PATCH 119/129] Fix libbeat BK crosscompile step (#37870) --- libbeat/scripts/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libbeat/scripts/Makefile b/libbeat/scripts/Makefile index 100ccd3f0137..7ad73e85907d 100755 --- a/libbeat/scripts/Makefile +++ b/libbeat/scripts/Makefile @@ -87,7 +87,7 @@ SYSTEM_TESTS?=false ## @testing if true, "make test" and "make testsuite" run un STRESS_TESTS?=false ## @testing if true, "make test" and "make testsuite" run also run the stress tests STRESS_TEST_OPTIONS?=-timeout=20m -race -v GOX_OS?=linux darwin windows freebsd netbsd openbsd ## @Building List of all OS to be supported by "make crosscompile". -GOX_OSARCH?=!darwin/arm !darwin/arm64 ## @building Space separated list of GOOS/GOARCH pairs to build by "make crosscompile". +GOX_OSARCH?=!darwin/arm !darwin/386 !linux/386 !windows/386 !freebsd/386 !netbsd/386 !openbsd/386 !linux/ppc64 ## @building Space-separated list of GOOS/GOARCH pairs to exclude (unsupported by GO and generated by GOX) in the "make crosscompile" build. GOX_FLAGS?= ## @building Additional flags to append to the gox command used by "make crosscompile". # XXX: Should be switched back to `snapshot` once the Elasticsearch # snapshots are working. https://github.com/elastic/beats/pull/6416 From e12d5b51d5a76388d8fbd60bffd26b0f5dbaf819 Mon Sep 17 00:00:00 2001 From: sharbuz <87968844+sharbuz@users.noreply.github.com> Date: Tue, 6 Feb 2024 16:31:00 +0200 Subject: [PATCH 120/129] temporary disable the failed windows test (#37880) * temporary disable the failed windows test * Update metricbeat/tests/system/test_reload.py Co-authored-by: Victor Martinez --------- Co-authored-by: Victor Martinez --- metricbeat/tests/system/test_reload.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/metricbeat/tests/system/test_reload.py b/metricbeat/tests/system/test_reload.py index 29d82bbf82b2..99aa8e2c2f27 100644 --- a/metricbeat/tests/system/test_reload.py +++ b/metricbeat/tests/system/test_reload.py @@ -42,7 +42,8 @@ def test_reload(self): self.wait_until(lambda: self.output_lines() > 0) proc.check_kill_and_wait() - @unittest.skipUnless(re.match("(?i)win|linux|darwin|freebsd|openbsd", sys.platform), "os") + # windows is disabled, see https://github.com/elastic/beats/issues/37841 + @unittest.skipUnless(re.match("(?i)linux|darwin|freebsd|openbsd", sys.platform), "os") def test_start_stop(self): """ Test if module is properly started and stopped From 3912c08debec1fba0416d035ed37815aa695621a Mon Sep 17 00:00:00 2001 From: sharbuz <87968844+sharbuz@users.noreply.github.com> Date: Tue, 6 Feb 2024 16:31:53 +0200 Subject: [PATCH 121/129] fix libbeat stress-tests (#37873) --- libbeat/scripts/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libbeat/scripts/Makefile b/libbeat/scripts/Makefile index 7ad73e85907d..4360aa0c1927 100755 --- a/libbeat/scripts/Makefile +++ b/libbeat/scripts/Makefile @@ -46,7 +46,7 @@ export PATH := ./bin:$(PATH) GOFILES = $(shell find . -type f -name '*.go' 2>/dev/null) GOFILES_NOVENDOR = $(shell find . -type f -name '*.go' -not -path "*/vendor/*" 2>/dev/null) GOFILES_ALL = $(GOFILES) $(shell find $(ES_BEATS) -type f -name '*.go' 2>/dev/null) -GOPACKAGES_STRESSTESTS=$(shell find . -name '*.go' 2>/dev/null | xargs grep -l '\+build.*stresstest' | xargs -n1 dirname | uniq) +GOPACKAGES_STRESSTESTS=$(shell find . -type d \( -name "stress" \) 2>/dev/null) SHELL=bash ES_HOST?=elasticsearch ES_PORT?=9200 From df9694e1b62530ca2790878828abcedcd264da84 Mon Sep 17 00:00:00 2001 From: sharbuz <87968844+sharbuz@users.noreply.github.com> Date: Tue, 6 Feb 2024 16:51:52 +0200 Subject: [PATCH 122/129] winlogbeat BK pipeline init (#37859) * winlogbeat BK pipeline init * Update catalog-info.yaml Co-authored-by: Victor Martinez --------- Co-authored-by: Victor Martinez --- .buildkite/pull-requests.json | 16 ++++++ .buildkite/winlogbeat/pipeline.winlogbeat.yml | 5 ++ catalog-info.yaml | 49 ++++++++++++++++++- 3 files changed, 68 insertions(+), 2 deletions(-) create mode 100644 .buildkite/winlogbeat/pipeline.winlogbeat.yml diff --git a/.buildkite/pull-requests.json b/.buildkite/pull-requests.json index abe0d20f6805..66c508e252c9 100644 --- a/.buildkite/pull-requests.json +++ b/.buildkite/pull-requests.json @@ -143,6 +143,22 @@ "skip_target_branches": [ ], "skip_ci_on_only_changed": ["^x-pack/elastic-agent/README.md", "^x-pack/elastic-agent/docs/.*", "^x-pack/elastic-agent/devtools/.*" ], "always_require_ci_on_changed": ["^x-pack/elastic-agent/.*", ".buildkite/x-pack/elastic-agent/.*", "^go.mod", "^pytest.ini", "^dev-tools/.*", "^libbeat/.*", "^testing/.*"] + }, + { + "enabled": true, + "pipelineSlug": "beats-winlogbeat", + "allow_org_users": true, + "allowed_repo_permissions": ["admin", "write"], + "allowed_list": [ ], + "set_commit_status": true, + "build_on_commit": true, + "build_on_comment": true, + "trigger_comment_regex": "^/test winlogbeat$", + "always_trigger_comment_regex": "^/test winlogbeat$", + "skip_ci_labels": [ ], + "skip_target_branches": [ ], + "skip_ci_on_only_changed": [ ], + "always_require_ci_on_changed": ["^winlogbeat/.*", ".buildkite/winlogbeat/.*", "^go.mod", "^pytest.ini", "^dev-tools/.*", "^libbeat/.*", "^testing/.*"] } ] } diff --git a/.buildkite/winlogbeat/pipeline.winlogbeat.yml b/.buildkite/winlogbeat/pipeline.winlogbeat.yml new file mode 100644 index 000000000000..34321b61161b --- /dev/null +++ b/.buildkite/winlogbeat/pipeline.winlogbeat.yml @@ -0,0 +1,5 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json + +steps: + - label: "Example test" + command: echo "Hello!" diff --git a/catalog-info.yaml b/catalog-info.yaml index f3dd3094788e..037d92e415c8 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -413,9 +413,54 @@ spec: cancel_intermediate_builds: true cancel_intermediate_builds_branch_filter: "!main !7.17 !8.*" skip_intermediate_builds: true - skip_intermediate_builds_branch_filter: "!main !7.17 !8.*" + skip_intermediate_builds_branch_filter: "!main !7.17 !8.*" + teams: + ingest-fp: + access_level: MANAGE_BUILD_AND_READ + everyone: + access_level: READ_ONLY + +--- +# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json +apiVersion: backstage.io/v1alpha1 +kind: Resource +metadata: + name: buildkite-pipeline-beats-winlogbeat + description: "Beats winlogbeat pipeline" + links: + - title: Pipeline + url: https://buildkite.com/elastic/beats-winlogbeat + +spec: + type: buildkite-pipeline + owner: group:ingest-fp + system: buildkite + implementation: + apiVersion: buildkite.elastic.dev/v1 + kind: Pipeline + metadata: + name: beats-winlogbeat + description: "Beats winlogbeat pipeline" + spec: +# branch_configuration: "main 7.17 8.*" TODO: temporarily commented to build PRs from forks + pipeline_file: ".buildkite/winlogbeat/pipeline.winlogbeat.yml" +# maximum_timeout_in_minutes: 120 TODO: uncomment when pipeline is ready + provider_settings: + build_pull_request_forks: false + build_pull_requests: true # requires filter_enabled and filter_condition settings as below when used with buildkite-pr-bot + build_tags: true + filter_enabled: true + filter_condition: >- + build.pull_request.id == null || (build.creator.name == 'elasticmachine' && build.pull_request.id != null) + repository: elastic/beats + cancel_intermediate_builds: true + cancel_intermediate_builds_branch_filter: "!main !7.17 !8.*" + skip_intermediate_builds: true + skip_intermediate_builds_branch_filter: "!main !7.17 !8.*" + # env: + # ELASTIC_PR_COMMENTS_ENABLED: "true" TODO: uncomment when pipeline is ready teams: ingest-fp: access_level: MANAGE_BUILD_AND_READ everyone: - access_level: READ_ONLY \ No newline at end of file + access_level: READ_ONLY From 4e255e6fcc578d8628ff9d415a7a30504f5c13f1 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 6 Feb 2024 12:38:06 -0500 Subject: [PATCH 123/129] chore: Update snapshot.yml (#37891) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Made with ❤️️ by updatecli Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index c9e2ce4a83e9..992e38dd763d 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-2eea2ca0-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-f56d9bd5-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.13.0-2eea2ca0-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.13.0-f56d9bd5-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.13.0-2eea2ca0-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.13.0-f56d9bd5-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 33d13f5f10eb5e7acc1e92640010a070352d9e50 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 7 Feb 2024 08:39:13 +0100 Subject: [PATCH 124/129] docs: Prepare Changelog for 8.12.1 (#37895) (#37897) * docs: Close changelog for 8.12.1 * Remove empty sections. * Remove old entries. * Fix whitespace --------- Co-authored-by: Craig MacKenzie (cherry picked from commit c60864592ddfb57152c061b1727d8b8223f1e6db) Co-authored-by: Elastic Machine --- CHANGELOG.asciidoc | 51 +++++++++++++++++++++++++++++++++++ CHANGELOG.next.asciidoc | 25 ++++++----------- libbeat/docs/release.asciidoc | 1 + 3 files changed, 60 insertions(+), 17 deletions(-) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index eef386a8a6ed..cb5aa3b6354b 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -3,6 +3,57 @@ :issue: https://github.com/elastic/beats/issues/ :pull: https://github.com/elastic/beats/pull/ +[[release-notes-8.12.1]] +=== Beats version 8.12.1 +https://github.com/elastic/beats/compare/v8.12.0\...v8.12.1[View commits] + +==== Known Issues + +*Affecting all Beats* + +Performance regression in AWS S3 inputs using SQS notification. + +In 8.12 the default memory queue flush interval was raised from 1 second to 10 seconds. In many configurations this improves performance because it allows the output to batch more events per round trip, which improves efficiency. However, the SQS input has an extra bottleneck that interacts badly with the new value. For more details see {issue}37754[37754]. + +If you are using the Elasticsearch output, and your output configuration uses a performance preset, switch it to `preset: latency`. If you use no preset or use `preset: custom`, then set `queue.mem.flush.timeout: 1s` in your queue or output configuration. + +==== Breaking changes + +*Affecting all Beats* + +- add_cloud_metadata processor: `huawei` provider is now treated as `openstack`. Huawei cloud runs on OpenStack +platform, and when viewed from a metadata API standpoint, it is impossible to differentiate it from OpenStack. If you +know that your deployments run on Huawei Cloud exclusively, and you wish to have `cloud.provider` value as `huawei`, +you can achieve this by overwriting the value using an `add_fields` processor. {pull}35184[35184] + +==== Bugfixes + +*Affecting all Beats* + +- aws: Add credential caching for `AssumeRole` session tokens. {issue}37787[37787] +- Lower logging level to debug when attempting to configure beats with unknown fields from autodiscovered events/environments. {pull}[37816][37816] + +*Filebeat* + +- Fix nil pointer dereference in the httpjson input. {pull}37591[37591] +- Fix TCP/UDP metric queue length parsing base. {pull}37714[37714] +- Fix m365_defender cursor value and query building. {pull}37116[37116] +- Update github.com/lestrrat-go/jwx dependency. {pull}37799[37799] + +*Heartbeat* + +- Fix setuid root when running under cgroups v2. {pull}37794[37794] + +*Metricbeat* + +- Fix Azure Resource Metrics missing metrics (min and max aggregations) after upgrade to 8.11.3. {issue}37642[37642] {pull}37643[37643] + +==== Added + +*Filebeat* + +- Relax TCP/UDP metric polling expectations to improve metric collection. {pull}37714[37714] + [[release-notes-8.12.0]] === Beats version 8.12.0 https://github.com/elastic/beats/compare/v8.11.4\...v8.12.0[View commits] diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index f93cde9590a2..645409067f14 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -67,6 +67,7 @@ fields added to events containing the Beats version. {pull}37553[37553] - Lower logging level to debug when attempting to configure beats with unknown fields from autodiscovered events/environments {pull}[37816][37816] - Set timeout of 1 minute for FQDN requests {pull}37756[37756] + *Auditbeat* @@ -91,25 +92,9 @@ fields added to events containing the Beats version. {pull}37553[37553] *Heartbeat* -- Fix panics when parsing dereferencing invalid parsed url. {pull}34702[34702] -- Fix setuid root when running under cgroups v2. {pull}37794[37794] *Metricbeat* -- in module/windows/perfmon, changed collection method of the second counter value required to create a displayable value {pull}32305[32305] -- Fix and improve AWS metric period calculation to avoid zero-length intervals {pull}32724[32724] -- Add missing cluster metadata to k8s module metricsets {pull}32979[32979] {pull}33032[33032] -- Add GCP CloudSQL region filter {pull}32943[32943] -- Fix logstash cgroup mappings {pull}33131[33131] -- Remove unused `elasticsearch.node_stats.indices.bulk.avg_time.bytes` mapping {pull}33263[33263] -- Make generic SQL GA {pull}34637[34637] -- Collect missing remote_cluster in elasticsearch ccr metricset {pull}34957[34957] -- Add context with timeout in AWS API calls {pull}35425[35425] -- Fix EC2 host.cpu.usage {pull}35717[35717] -- Add option in SQL module to execute queries for all dbs. {pull}35688[35688] -- Add remaining dimensions for azure storage account to make them available for tsdb enablement. {pull}36331[36331] -- Add log error when statsd server fails to start {pull}36477[36477] -- Fix Azure Resource Metrics missing metrics (min and max aggregations) after upgrade to 8.11.3 {issue}37642[37642] {pull}37643[37643] *Osquerybeat* @@ -120,7 +105,6 @@ fields added to events containing the Beats version. {pull}37553[37553] *Winlogbeat* - *Elastic Logging Plugin* @@ -214,6 +198,10 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d - Add a `/inputs/` route to the HTTP monitoring endpoint that exposes metrics for each metricset instance. {pull}36971[36971] - Add linux IO metrics to system/process {pull}37213[37213] - Add new memory/cgroup metrics to Kibana module {pull}37232[37232] + + +*Metricbeat* + - Update `getOpTimestamp` in `replstatus` to fix sort and temp files generation issue in mongodb. {pull}37688[37688] *Osquerybeat* @@ -311,6 +299,9 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d + + + diff --git a/libbeat/docs/release.asciidoc b/libbeat/docs/release.asciidoc index 47a6f1eaf23f..08da0875d41e 100644 --- a/libbeat/docs/release.asciidoc +++ b/libbeat/docs/release.asciidoc @@ -8,6 +8,7 @@ This section summarizes the changes in each release. Also read <> for more detail about changes that affect upgrade. +* <> * <> * <> * <> From 625ba402b8db1f07fabdf55f8d9276a29d3c2ea7 Mon Sep 17 00:00:00 2001 From: sharbuz <87968844+sharbuz@users.noreply.github.com> Date: Wed, 7 Feb 2024 13:25:21 +0200 Subject: [PATCH 125/129] migrate libbeat pipeline (#37725) --- .buildkite/hooks/pre-command | 2 +- .buildkite/libbeat/pipeline.libbeat.yml | 44 +++++++- .buildkite/metricbeat/pipeline.yml | 16 ++- .buildkite/scripts/common.sh | 81 ++++++------- .../scripts/generate_libbeat_pipeline.sh | 89 +++++++++++++++ .../scripts/generate_metricbeat_pipeline.sh | 106 ++++++++---------- .buildkite/scripts/install_tools.sh | 2 + .buildkite/scripts/py_int_tests.sh | 1 + .buildkite/scripts/stress_tests.sh | 13 +++ catalog-info.yaml | 17 +-- 10 files changed, 254 insertions(+), 117 deletions(-) create mode 100755 .buildkite/scripts/generate_libbeat_pipeline.sh create mode 100755 .buildkite/scripts/stress_tests.sh diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command index c448a7102512..d4b33be7690c 100644 --- a/.buildkite/hooks/pre-command +++ b/.buildkite/hooks/pre-command @@ -12,7 +12,7 @@ if [[ "$BUILDKITE_PIPELINE_SLUG" == "filebeat" || "$BUILDKITE_PIPELINE_SLUG" == fi fi -if [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-metricbeat" ]]; then +if [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-metricbeat" || "$BUILDKITE_PIPELINE_SLUG" == "beats-libbeat" ]]; then source .buildkite/scripts/setenv.sh if [[ "${BUILDKITE_COMMAND}" =~ ^buildkite-agent ]]; then echo "Skipped pre-command when running the Upload pipeline" diff --git a/.buildkite/libbeat/pipeline.libbeat.yml b/.buildkite/libbeat/pipeline.libbeat.yml index 34321b61161b..1fb185b59d19 100644 --- a/.buildkite/libbeat/pipeline.libbeat.yml +++ b/.buildkite/libbeat/pipeline.libbeat.yml @@ -1,5 +1,45 @@ # yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json +env: + IMAGE_UBUNTU_X86_64: "family/core-ubuntu-2204" + IMAGE_UBUNTU_ARM_64: "core-ubuntu-2004-aarch64" + GCP_DEFAULT_MACHINE_TYPE: "c2d-highcpu-8" + GCP_HI_PERF_MASHINE_TYPE: "c2d-highcpu-16" + GCP_WIN_MACHINE_TYPE: "n2-standard-8" + AWS_ARM_INSTANCE_TYPE: "t4g.xlarge" + BEATS_PROJECT_NAME: "libbeat" + steps: - - label: "Example test" - command: echo "Hello!" + + - input: "Input Parameters" + key: "input-run-all-stages" + fields: + - select: "Libbeat - runLibbeat" + key: "runLibbeat" + options: + - label: "True" + value: "true" + - label: "False" + value: "false" + default: "false" + - select: "Libbeat - runLibBeatArmTest" + key: "runLibBeatArmTest" + options: + - label: "True" + value: "true" + - label: "False" + value: "false" + default: "false" + if: "build.source == 'ui'" + + - wait: ~ + if: "build.source == 'ui'" + allow_dependency_failure: false + + - label: ":linux: Load dynamic Libbeat pipeline" + key: "libbeat-pipeline" + command: ".buildkite/scripts/generate_libbeat_pipeline.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "${GCP_DEFAULT_MACHINE_TYPE}" diff --git a/.buildkite/metricbeat/pipeline.yml b/.buildkite/metricbeat/pipeline.yml index 0abc58a85ae5..c42f17d2a363 100644 --- a/.buildkite/metricbeat/pipeline.yml +++ b/.buildkite/metricbeat/pipeline.yml @@ -10,22 +10,27 @@ env: IMAGE_MACOS_X86_64: "generic-13-ventura-x64" GO_AGENT_IMAGE: "golang:${GO_VERSION}" BEATS_PROJECT_NAME: "metricbeat" + GCP_DEFAULT_MACHINE_TYPE: "c2d-highcpu-8" + GCP_HI_PERF_MASHINE_TYPE: "c2d-highcpu-16" + GCP_WIN_MACHINE_TYPE: "n2-standard-8" + AWS_ARM_INSTANCE_TYPE: "t4g.xlarge" + steps: - input: "Input Parameters" - key: "input-run-all-stages" + key: "runMetricbeat" fields: - - select: "Metricbeat - runAllStages" - key: "runAllStages" + - select: "Metricbeat - runMetricbeat" + key: "runMetricbeat" options: - label: "True" value: "true" - label: "False" value: "false" default: "false" - - select: "Metricbeat - runMacOsTests" - key: "UI_MACOS_TESTS" + - select: "Metricbeat - runMetricbeatMacOsTests" + key: "runMetricbeatMacOsTests" options: - label: "True" value: "true" @@ -44,3 +49,4 @@ steps: agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" + machineType: "${GCP_DEFAULT_MACHINE_TYPE}" diff --git a/.buildkite/scripts/common.sh b/.buildkite/scripts/common.sh index a27fa820a7ab..e3dd2ec4ac41 100755 --- a/.buildkite/scripts/common.sh +++ b/.buildkite/scripts/common.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -euo pipefail WORKSPACE=${WORKSPACE:-"$(pwd)"} @@ -8,8 +8,11 @@ platform_type_lowercase=$(echo "$platform_type" | tr '[:upper:]' '[:lower:]') arch_type="$(uname -m)" GITHUB_PR_TRIGGER_COMMENT=${GITHUB_PR_TRIGGER_COMMENT:-""} ONLY_DOCS=${ONLY_DOCS:-"true"} -UI_MACOS_TESTS="$(buildkite-agent meta-data get UI_MACOS_TESTS --default ${UI_MACOS_TESTS:-"false"})" -runAllStages="$(buildkite-agent meta-data get runAllStages --default ${runAllStages:-"false"})" +runLibbeat="$(buildkite-agent meta-data get runLibbeat --default ${runLibbeat:-"false"})" +runMetricbeat="$(buildkite-agent meta-data get runMetricbeat --default ${runMetricbeat:-"false"})" +runLibBeatArmTest="$(buildkite-agent meta-data get runLibbeat --default ${runLibbeat:-"false"})" +runMetricbeatMacOsTests="$(buildkite-agent meta-data get runMetricbeatMacOsTests --default ${runMetricbeatMacOsTests:-"false"})" + metricbeat_changeset=( "^metricbeat/.*" "^go.mod" @@ -175,64 +178,54 @@ are_changed_only_paths() { local changed_files=$(git diff --name-only HEAD@{1} HEAD) if [ -z "$changed_files" ] || grep -qE "$(IFS=\|; echo "${patterns[*]}")" <<< "$changed_files"; then return 0 - else - return 1 fi + return 1 } are_conditions_met_mandatory_tests() { - if [[ "${BUILDKITE_PULL_REQUEST}" == "" ]] || [[ "${runAllStages}" == "true" ]] || [[ "${ONLY_DOCS}" == "false" && "${BUILDKITE_PULL_REQUEST}" != "" ]]; then # from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/Jenkinsfile#L107-L137 - if are_paths_changed "${metricbeat_changeset[@]}" || are_paths_changed "${oss_changeset[@]}" || are_paths_changed "${ci_changeset[@]}" || [[ "${GITHUB_PR_TRIGGER_COMMENT}" == "/test metricbeat" ]] || [[ "${GITHUB_PR_LABELS}" =~ Metricbeat ]]; then # from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/metricbeat/Jenkinsfile.yml#L3-L12 - return 0 - else - return 1 + if are_paths_changed "${metricbeat_changeset[@]}" || are_paths_changed "${oss_changeset[@]}" || are_paths_changed "${ci_changeset[@]}" ]]; then # from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/metricbeat/Jenkinsfile.yml#L3-L12 + if [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-metricbeat" ]]; then + if [[ "${GITHUB_PR_TRIGGER_COMMENT}" == "/test metricbeat" || "${GITHUB_PR_LABELS}" =~ Metricbeat || "${runMetricbeat}" == "true" ]]; then + return 0 + fi + elif [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-libbeat" ]]; then + if [[ "${GITHUB_PR_TRIGGER_COMMENT}" == "/test libbeat" || "${GITHUB_PR_LABELS}" =~ libbeat || "${runLibbeat}" == "true" ]]; then + return 0 + fi fi - else - return 1 fi + return 1 } -are_conditions_met_extended_tests() { +are_conditions_met_libbeat_arm_tests() { if are_conditions_met_mandatory_tests; then #from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/Jenkinsfile#L145-L171 - return 0 - else - return 1 + if [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-libbeat" ]]; then + if [[ "${GITHUB_PR_TRIGGER_COMMENT}" == "/test libbeat for arm" || "${GITHUB_PR_LABELS}" =~ arm || "${runLibBeatArmTest}" == "true" ]]; then + return 0 + fi + fi fi + return 1 } -are_conditions_met_macos_tests() { - if are_conditions_met_mandatory_tests; then #from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/Jenkinsfile#L145-L171 - if [[ "${UI_MACOS_TESTS}" == true ]] || [[ "${GITHUB_PR_TRIGGER_COMMENT}" == "/test metricbeat for macos" ]] || [[ "${GITHUB_PR_LABELS}" =~ macOS ]]; then # from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/metricbeat/Jenkinsfile.yml#L3-L12 - return 0 - else - return 1 - fi - else - return 1 +are_conditions_met_metricbeat_macos_tests() { + if [[ "${runMetricbeatMacOsTests}" == true ]] || [[ "${GITHUB_PR_TRIGGER_COMMENT}" == "/test metricbeat for macos" ]] || [[ "${GITHUB_PR_LABELS}" =~ macOS ]]; then # from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/metricbeat/Jenkinsfile.yml#L3-L12 + return 0 fi + return 1 } -are_conditions_met_extended_windows_tests() { - if [[ "${ONLY_DOCS}" == "false" && "${BUILDKITE_PULL_REQUEST}" != "" ]] || [[ "${runAllStages}" == "true" ]]; then #from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/Jenkinsfile#L145-L171 - if are_paths_changed "${metricbeat_changeset[@]}" || are_paths_changed "${oss_changeset[@]}" || are_paths_changed "${ci_changeset[@]}" || [[ "${GITHUB_PR_TRIGGER_COMMENT}" == "/test metricbeat" ]] || [[ "${GITHUB_PR_LABELS}" =~ Metricbeat ]]; then # from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/metricbeat/Jenkinsfile.yml#L3-L12 - return 0 - else - return 1 - fi - else - return 1 +are_conditions_met_packaging() { + if are_paths_changed "${metricbeat_changeset[@]}" || are_paths_changed "${oss_changeset[@]}" || [[ "${BUILDKITE_TAG}" == "" ]] || [[ "${BUILDKITE_PULL_REQUEST}" != "" ]]; then # from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/metricbeat/Jenkinsfile.yml#L101-L103 + return 0 fi + return 1 } -are_conditions_met_packaging() { - if are_conditions_met_extended_windows_tests; then #from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/Jenkinsfile#L145-L171 - if are_paths_changed "${metricbeat_changeset[@]}" || are_paths_changed "${oss_changeset[@]}" || [[ "${BUILDKITE_TAG}" == "" ]] || [[ "${BUILDKITE_PULL_REQUEST}" != "" ]]; then # from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/metricbeat/Jenkinsfile.yml#L101-L103 - return 0 - else - return 1 - fi - else - return 1 +config_git() { + if [ -z "$(git config --get user.email)" ]; then + git config --global user.email "beatsmachine@users.noreply.github.com" + git config --global user.name "beatsmachine" fi } @@ -249,4 +242,4 @@ fi if are_paths_changed "${packaging_changeset[@]}" ; then PACKAGING_CHANGES="true" -fi +fi \ No newline at end of file diff --git a/.buildkite/scripts/generate_libbeat_pipeline.sh b/.buildkite/scripts/generate_libbeat_pipeline.sh new file mode 100755 index 000000000000..6da1bef711dc --- /dev/null +++ b/.buildkite/scripts/generate_libbeat_pipeline.sh @@ -0,0 +1,89 @@ +#!/usr/bin/env bash + +source .buildkite/scripts/common.sh + +set -euo pipefail + +pipelineName="pipeline.libbeat-dynamic.yml" + +echo "Add the mandatory and extended tests without additional conditions into the pipeline" +if are_conditions_met_mandatory_tests; then + cat > $pipelineName <<- YAML + +steps: + + - group: "Mandatory Tests" + key: "mandatory-tests" + steps: + - label: ":linux: Ubuntu Unit Tests" + key: "mandatory-linux-unit-test" + command: ".buildkite/scripts/unit_tests.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "${GCP_DEFAULT_MACHINE_TYPE}" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.xml" + + - label: ":go: Go Integration Tests" + key: "mandatory-int-test" + command: ".buildkite/scripts/go_int_tests.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "${GCP_HI_PERF_MASHINE_TYPE}" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.xml" + + - label: ":python: Python Integration Tests" + key: "mandatory-python-int-test" + command: ".buildkite/scripts/py_int_tests.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "${GCP_HI_PERF_MASHINE_TYPE}" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.xml" + + - label: ":negative_squared_cross_mark: Cross compile" + key: "mandatory-cross-compile" + command: ".buildkite/scripts/crosscompile.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "${GCP_HI_PERF_MASHINE_TYPE}" + artifact_paths: " ${BEATS_PROJECT_NAME}/build/*.xml" + + - label: ":testengine: Stress Tests" + key: "mandatory-stress-test" + command: ".buildkite/scripts/stress_tests.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "${GCP_DEFAULT_MACHINE_TYPE}" + artifact_paths: "${BEATS_PROJECT_NAME}/libbeat-stress-test.xml" + +YAML +fi + +echo "Check and add the Extended Tests into the pipeline" +if are_conditions_met_libbeat_arm_tests; then + cat >> $pipelineName <<- YAML + + - group: "Extended Tests" + key: "extended-tests" + steps: + - label: ":linux: Arm64 Unit Tests" + key: "extended-arm64-unit-tests" + command: ".buildkite/scripts/unit_tests.sh" + agents: + provider: "aws" + imagePrefix: "${IMAGE_UBUNTU_ARM_64}" + instanceType: "${AWS_ARM_INSTANCE_TYPE}" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.xml" + +YAML +fi + +echo "--- Printing dynamic steps" #TODO: remove if the pipeline is public +cat $pipelineName + +echo "--- Loading dynamic steps" +buildkite-agent pipeline upload $pipelineName diff --git a/.buildkite/scripts/generate_metricbeat_pipeline.sh b/.buildkite/scripts/generate_metricbeat_pipeline.sh index a15447ba4bf6..0ea19734c4fd 100755 --- a/.buildkite/scripts/generate_metricbeat_pipeline.sh +++ b/.buildkite/scripts/generate_metricbeat_pipeline.sh @@ -6,15 +6,12 @@ set -euo pipefail pipelineName="pipeline.metricbeat-dynamic.yml" -cat > $pipelineName <<- YAML +echo "Add the mandatory and extended tests without additional conditions into the pipeline" +if are_conditions_met_mandatory_tests; then + cat > $pipelineName <<- YAML steps: -YAML - -if are_conditions_met_mandatory_tests; then - cat >> $pipelineName <<- YAML - - group: "Mandatory Tests" key: "mandatory-tests" steps: @@ -24,7 +21,7 @@ if are_conditions_met_mandatory_tests; then agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" - machineType: "c2-standard-16" + machineType: "${GCP_DEFAULT_MACHINE_TYPE}" artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" - label: ":go: Go Intergration Tests" @@ -33,7 +30,7 @@ if are_conditions_met_mandatory_tests; then agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" - machineType: "c2-standard-16" + machineType: "${GCP_HI_PERF_MASHINE_TYPE}" artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" - label: ":python: Python Integration Tests" @@ -42,7 +39,7 @@ if are_conditions_met_mandatory_tests; then agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" - machineType: "c2-standard-16" + machineType: "${GCP_HI_PERF_MASHINE_TYPE}" artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" - label: ":negative_squared_cross_mark: Cross compile" @@ -51,7 +48,7 @@ if are_conditions_met_mandatory_tests; then agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" - machineType: "c2-standard-16" + machineType: "${GCP_DEFAULT_MACHINE_TYPE}" artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" - label: ":windows: Windows 2016/2022 Unit Tests - {{matrix.image}}" @@ -60,7 +57,7 @@ if are_conditions_met_mandatory_tests; then agents: provider: "gcp" image: "{{matrix.image}}" - machine_type: "n2-standard-8" + machine_type: "${GCP_WIN_MACHINE_TYPE}" disk_size: 100 disk_type: "pd-ssd" matrix: @@ -70,30 +67,10 @@ if are_conditions_met_mandatory_tests; then - "${IMAGE_WIN_2022}" artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" -YAML -fi - -if are_conditions_met_extended_tests && are_conditions_met_macos_tests; then - cat >> $pipelineName <<- YAML - - - group: "Extended Tests" - key: "extended-tests" - steps: - - label: ":mac: MacOS Unit Tests" - key: "extended-macos-unit-tests" - command: ".buildkite/scripts/unit_tests.sh" - agents: - provider: "orka" - imagePrefix: "${IMAGE_MACOS_X86_64}" - artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" - -YAML -fi - -if are_conditions_met_extended_windows_tests; then - cat >> $pipelineName <<- YAML +# echo "Add the extended windows tests into the pipeline" +# TODO: ADD conditions from the main pipeline - - group: "Extended Windowds Tests" + - group: "Extended Windows Tests" key: "extended-win-tests" steps: - label: ":windows: Windows 2019 Unit Tests" @@ -102,22 +79,22 @@ if are_conditions_met_extended_windows_tests; then agents: provider: "gcp" image: "${IMAGE_WIN_2019}" - machine_type: "n2-standard-8" + machine_type: "${GCP_WIN_MACHINE_TYPE}" disk_size: 100 disk_type: "pd-ssd" artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" # Temporary disabled https://github.com/elastic/beats/issues/37841 - # - label: ":windows: Windows 10 Unit Tests" - # key: "extended-win-10-unit-tests" - # command: ".buildkite/scripts/win_unit_tests.ps1" - # agents: - # provider: "gcp" - # image: "${IMAGE_WIN_10}" - # machine_type: "n2-standard-8" - # disk_size: 100 - # disk_type: "pd-ssd" - # artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" + - label: ":windows: Windows 10 Unit Tests" + key: "extended-win-10-unit-tests" + command: ".buildkite/scripts/win_unit_tests.ps1" + agents: + provider: "gcp" + image: "${IMAGE_WIN_10}" + machine_type: "${GCP_WIN_MACHINE_TYPE}" + disk_size: 100 + disk_type: "pd-ssd" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" - label: ":windows: Windows 11 Unit Tests" key: "extended-win-11-unit-tests" @@ -125,17 +102,40 @@ if are_conditions_met_extended_windows_tests; then agents: provider: "gcp" image: "${IMAGE_WIN_11}" - machine_type: "n2-standard-8" + machine_type: "${GCP_WIN_MACHINE_TYPE}" disk_size: 100 disk_type: "pd-ssd" artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" +YAML +fi + +echo "Check and add the Extended Tests into the pipeline" +if are_conditions_met_metricbeat_macos_tests; then + cat >> $pipelineName <<- YAML + - group: "Extended Tests" + key: "extended-tests" + steps: + - label: ":mac: MacOS Unit Tests" + key: "extended-macos-unit-tests" + command: ".buildkite/scripts/unit_tests.sh" + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" YAML + fi -if are_conditions_met_extended_windows_tests; then +echo "Check and add the Packaging into the pipeline" +if are_conditions_met_mandatory_tests && are_conditions_met_packaging; then cat >> $pipelineName <<- YAML + - wait: ~ + depends_on: + - step: "mandatory-tests" + allow_failure: false + - group: "Packaging" # TODO: check conditions for future the main pipeline migration: https://github.com/elastic/beats/pull/28589 key: "packaging" steps: @@ -145,7 +145,7 @@ if are_conditions_met_extended_windows_tests; then agents: provider: "gcp" image: "${IMAGE_UBUNTU_X86_64}" - machineType: "c2-standard-16" + machineType: "${GCP_HI_PERF_MASHINE_TYPE}" env: PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" @@ -155,19 +155,11 @@ if are_conditions_met_extended_windows_tests; then agents: provider: "aws" imagePrefix: "${IMAGE_UBUNTU_ARM_64}" - instanceType: "t4g.xlarge" + instanceType: "${AWS_ARM_INSTANCE_TYPE}" env: PLATFORMS: "linux/arm64" PACKAGES: "docker" - depends_on: - - step: "mandatory-tests" - allow_failure: false - - step: "extended-tests" - allow_failure: true - - step: "extended-win-tests" - allow_failure: true - YAML fi diff --git a/.buildkite/scripts/install_tools.sh b/.buildkite/scripts/install_tools.sh index 796892341d30..1a1e3a29f54e 100644 --- a/.buildkite/scripts/install_tools.sh +++ b/.buildkite/scripts/install_tools.sh @@ -36,6 +36,8 @@ with_go "${GO_VERSION}" with_mage with_python with_dependencies +config_git +mage dumpVariables #sudo command doesn't work at the "pre-command" hook because of another user environment (root with strange permissions) sudo chmod -R go-w "${BEATS_PROJECT_NAME}/" #TODO: Remove when the issue is solved https://github.com/elastic/beats/issues/37838 diff --git a/.buildkite/scripts/py_int_tests.sh b/.buildkite/scripts/py_int_tests.sh index f43cc2021b5a..19fa8796c3e7 100755 --- a/.buildkite/scripts/py_int_tests.sh +++ b/.buildkite/scripts/py_int_tests.sh @@ -5,6 +5,7 @@ source .buildkite/scripts/install_tools.sh set -euo pipefail echo "--- Run Python Intergration Tests for $BEATS_PROJECT_NAME" + pushd "${BEATS_PROJECT_NAME}" > /dev/null mage pythonIntegTest diff --git a/.buildkite/scripts/stress_tests.sh b/.buildkite/scripts/stress_tests.sh new file mode 100755 index 000000000000..b177eb53ea6b --- /dev/null +++ b/.buildkite/scripts/stress_tests.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +source .buildkite/scripts/install_tools.sh + +set -euo pipefail + +echo "--- Run Stress Tests for $BEATS_PROJECT_NAME" + +pushd "${BEATS_PROJECT_NAME}" > /dev/null + +make STRESS_TEST_OPTIONS='-timeout=20m -race -v -parallel 1' GOTEST_OUTPUT_OPTIONS='| go-junit-report > libbeat-stress-test.xml' stress-tests + +popd > /dev/null diff --git a/catalog-info.yaml b/catalog-info.yaml index 037d92e415c8..fb0395d20277 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -99,9 +99,8 @@ spec: cancel_intermediate_builds_branch_filter: '!main !7.* !8.*' skip_intermediate_builds: true skip_intermediate_builds_branch_filter: '!main !7.* !8.*' - # TODO uncomment this environment variable when pipeline definition is updated - # env: - # ELASTIC_PR_COMMENTS_ENABLED: 'true' + env: + ELASTIC_PR_COMMENTS_ENABLED: 'true' teams: ingest-fp: access_level: MANAGE_BUILD_AND_READ @@ -310,9 +309,9 @@ spec: name: beats-libbeat description: "Beats libbeat pipeline" spec: -# branch_configuration: "main 7.17 8.* v7.17 v8.*" TODO: temporarily commented to build PRs from forks + branch_configuration: "main 7.17 8.* pipeline_file: ".buildkite/libbeat/pipeline.libbeat.yml" -# maximum_timeout_in_minutes: 120 TODO: uncomment when pipeline is ready + maximum_timeout_in_minutes: 120 provider_settings: build_pull_request_forks: false build_pull_requests: true # requires filter_enabled and filter_condition settings as below when used with buildkite-pr-bot @@ -322,11 +321,11 @@ spec: build.pull_request.id == null || (build.creator.name == 'elasticmachine' && build.pull_request.id != null) repository: elastic/beats cancel_intermediate_builds: true - cancel_intermediate_builds_branch_filter: "!main !7.17 !8.*" + cancel_intermediate_builds_branch_filter: "!main !7.* !8.*" skip_intermediate_builds: true skip_intermediate_builds_branch_filter: "!main !7.17 !8.*" - # env: - # ELASTIC_PR_COMMENTS_ENABLED: "true" TODO: uncomment when pipeline is ready + env: + ELASTIC_PR_COMMENTS_ENABLED: "true" teams: ingest-fp: access_level: MANAGE_BUILD_AND_READ @@ -414,6 +413,8 @@ spec: cancel_intermediate_builds_branch_filter: "!main !7.17 !8.*" skip_intermediate_builds: true skip_intermediate_builds_branch_filter: "!main !7.17 !8.*" + # env: + # ELASTIC_PR_COMMENTS_ENABLED: "true" TODO: uncomment when pipeline is ready teams: ingest-fp: access_level: MANAGE_BUILD_AND_READ From 4348b23741f928e49e3cdf88d7f0c940d7161127 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 7 Feb 2024 12:36:36 -0500 Subject: [PATCH 126/129] chore: Update snapshot.yml (#37905) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Made with ❤️️ by updatecli Co-authored-by: apmmachine --- testing/environments/snapshot.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 992e38dd763d..f6eeaaa7382a 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-f56d9bd5-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.13.0-c6fcd738-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.13.0-f56d9bd5-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.13.0-c6fcd738-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.13.0-f56d9bd5-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.13.0-c6fcd738-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 7546ae1ceefa19c9a4886ab012fa661eaeaccf4b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Chema=20Mart=C3=ADnez?= Date: Wed, 7 Feb 2024 19:09:47 +0100 Subject: [PATCH 127/129] x-pack/libbeat/reader/etw: New reader to collect ETW logs (#36914) Add support for collecting Microsoft ETW events in Libbeat. --- .github/CODEOWNERS | 3 +- CHANGELOG.next.asciidoc | 2 + x-pack/libbeat/Jenkinsfile.yml | 37 ++ x-pack/libbeat/reader/etw/config.go | 16 + x-pack/libbeat/reader/etw/controller.go | 121 +++++++ x-pack/libbeat/reader/etw/controller_test.go | 190 ++++++++++ x-pack/libbeat/reader/etw/event.go | 340 ++++++++++++++++++ x-pack/libbeat/reader/etw/provider.go | 81 +++++ x-pack/libbeat/reader/etw/provider_test.go | 199 ++++++++++ x-pack/libbeat/reader/etw/session.go | 250 +++++++++++++ x-pack/libbeat/reader/etw/session_test.go | 338 +++++++++++++++++ x-pack/libbeat/reader/etw/syscall_advapi32.go | 318 ++++++++++++++++ x-pack/libbeat/reader/etw/syscall_tdh.go | 323 +++++++++++++++++ 13 files changed, 2217 insertions(+), 1 deletion(-) create mode 100644 x-pack/libbeat/reader/etw/config.go create mode 100644 x-pack/libbeat/reader/etw/controller.go create mode 100644 x-pack/libbeat/reader/etw/controller_test.go create mode 100644 x-pack/libbeat/reader/etw/event.go create mode 100644 x-pack/libbeat/reader/etw/provider.go create mode 100644 x-pack/libbeat/reader/etw/provider_test.go create mode 100644 x-pack/libbeat/reader/etw/session.go create mode 100644 x-pack/libbeat/reader/etw/session_test.go create mode 100644 x-pack/libbeat/reader/etw/syscall_advapi32.go create mode 100644 x-pack/libbeat/reader/etw/syscall_tdh.go diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index d3e40d854f57..0df9a9a58779 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -173,6 +173,8 @@ CHANGELOG* /x-pack/filebeat/modules.d/zoom.yml.disabled @elastic/security-service-integrations /x-pack/filebeat/processors/decode_cef/ @elastic/sec-deployment-and-devices /x-pack/heartbeat/ @elastic/obs-ds-hosted-services +/x-pack/libbeat/reader/parquet/ @elastic/security-service-integrations +/x-pack/libbeat/reader/etw/ @elastic/sec-windows-platform /x-pack/metricbeat/ @elastic/elastic-agent-data-plane /x-pack/metricbeat/docs/ # Listed without an owner to avoid maintaining doc ownership for each input and module. /x-pack/metricbeat/module/activemq @elastic/obs-infraobs-integrations @@ -219,4 +221,3 @@ CHANGELOG* /x-pack/osquerybeat/ @elastic/sec-deployment-and-devices /x-pack/packetbeat/ @elastic/sec-linux-platform /x-pack/winlogbeat/ @elastic/sec-windows-platform -/x-pack/libbeat/reader/parquet/ @elastic/security-service-integrations diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 645409067f14..8281f7b79ecb 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -181,6 +181,8 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d *Libbeat* - Add watcher that can be used to monitor Linux kernel events. {pull}37833[37833] +- Added support for ETW reader. {pull}36914[36914] + *Heartbeat* - Added status to monitor run log report. - Upgrade github.com/elastic/go-elasticsearch/v8 to v8.12.0. {pull}37673[37673] diff --git a/x-pack/libbeat/Jenkinsfile.yml b/x-pack/libbeat/Jenkinsfile.yml index 9d4ecfa7bd08..9947fd0096c6 100644 --- a/x-pack/libbeat/Jenkinsfile.yml +++ b/x-pack/libbeat/Jenkinsfile.yml @@ -27,6 +27,43 @@ stages: branches: true ## for all the branches tags: true ## for all the tags stage: extended + ## For now Windows CI tests for Libbeat are only enabled for ETW + ## It only contains Go tests + windows-2022: + mage: "mage -w reader/etw build goUnitTest" + platforms: ## override default labels in this specific stage. + - "windows-2022" + stage: mandatory + windows-2019: + mage: "mage -w reader/etw build goUnitTest" + platforms: ## override default labels in this specific stage. + - "windows-2019" + stage: extended_win + windows-2016: + mage: "mage -w reader/etw build goUnitTest" + platforms: ## override default labels in this specific stage. + - "windows-2016" + stage: mandatory + windows-2012: + mage: "mage -w reader/etw build goUnitTest" + platforms: ## override default labels in this specific stage. + - "windows-2012-r2" + stage: extended_win + windows-11: + mage: "mage -w reader/etw build goUnitTest" + platforms: ## override default labels in this specific stage. + - "windows-11" + stage: extended_win + windows-10: + mage: "mage -w reader/etw build goUnitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + stage: extended_win + windows-8: + mage: "mage -w reader/etw build goUnitTest" + platforms: ## override default labels in this specific stage. + - "windows-8" + stage: extended_win unitTest: mage: "mage build unitTest" stage: mandatory diff --git a/x-pack/libbeat/reader/etw/config.go b/x-pack/libbeat/reader/etw/config.go new file mode 100644 index 000000000000..44f9e68ff2d0 --- /dev/null +++ b/x-pack/libbeat/reader/etw/config.go @@ -0,0 +1,16 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package etw + +type Config struct { + Logfile string // Path to the logfile + ProviderGUID string // GUID of the ETW provider + ProviderName string // Name of the ETW provider + SessionName string // Name for new ETW session + TraceLevel string // Level of tracing (e.g., "verbose") + MatchAnyKeyword uint64 // Filter for any matching keywords (bitmask) + MatchAllKeyword uint64 // Filter for all matching keywords (bitmask) + Session string // Existing session to attach +} diff --git a/x-pack/libbeat/reader/etw/controller.go b/x-pack/libbeat/reader/etw/controller.go new file mode 100644 index 000000000000..f17866440cfc --- /dev/null +++ b/x-pack/libbeat/reader/etw/controller.go @@ -0,0 +1,121 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows + +package etw + +import ( + "errors" + "fmt" + "syscall" +) + +// AttachToExistingSession queries the status of an existing ETW session. +// On success, it updates the Session's handler with the queried information. +func (s *Session) AttachToExistingSession() error { + // Convert the session name to UTF16 for Windows API compatibility. + sessionNamePtr, err := syscall.UTF16PtrFromString(s.Name) + if err != nil { + return fmt.Errorf("failed to convert session name: %w", err) + } + + // Query the current state of the ETW session. + err = s.controlTrace(0, sessionNamePtr, s.properties, EVENT_TRACE_CONTROL_QUERY) + switch { + case err == nil: + // Get the session handler from the properties struct. + s.handler = uintptr(s.properties.Wnode.Union1) + + return nil + + // Handle specific errors related to the query operation. + case errors.Is(err, ERROR_BAD_LENGTH): + return fmt.Errorf("bad length when querying handler: %w", err) + case errors.Is(err, ERROR_INVALID_PARAMETER): + return fmt.Errorf("invalid parameters when querying handler: %w", err) + case errors.Is(err, ERROR_WMI_INSTANCE_NOT_FOUND): + return fmt.Errorf("session is not running: %w", err) + default: + return fmt.Errorf("failed to get handler: %w", err) + } +} + +// CreateRealtimeSession initializes and starts a new real-time ETW session. +func (s *Session) CreateRealtimeSession() error { + // Convert the session name to UTF16 format for Windows API compatibility. + sessionPtr, err := syscall.UTF16PtrFromString(s.Name) + if err != nil { + return fmt.Errorf("failed to convert session name: %w", err) + } + + // Start the ETW trace session. + err = s.startTrace(&s.handler, sessionPtr, s.properties) + switch { + case err == nil: + + // Handle specific errors related to starting the trace session. + case errors.Is(err, ERROR_ALREADY_EXISTS): + return fmt.Errorf("session already exists: %w", err) + case errors.Is(err, ERROR_INVALID_PARAMETER): + return fmt.Errorf("invalid parameters when starting session trace: %w", err) + default: + return fmt.Errorf("failed to start trace: %w", err) + } + + // Set additional parameters for trace enabling. + // See https://learn.microsoft.com/en-us/windows/win32/api/evntrace/ns-evntrace-enable_trace_parameters#members + params := EnableTraceParameters{ + Version: 2, // ENABLE_TRACE_PARAMETERS_VERSION_2 + } + + // Zero timeout means asynchronous enablement + const timeout = 0 + + // Enable the trace session with extended options. + err = s.enableTrace(s.handler, &s.GUID, EVENT_CONTROL_CODE_ENABLE_PROVIDER, s.traceLevel, s.matchAnyKeyword, s.matchAllKeyword, timeout, ¶ms) + switch { + case err == nil: + return nil + // Handle specific errors related to enabling the trace session. + case errors.Is(err, ERROR_INVALID_PARAMETER): + return fmt.Errorf("invalid parameters when enabling session trace: %w", err) + case errors.Is(err, ERROR_TIMEOUT): + return fmt.Errorf("timeout value expired before the enable callback completed: %w", err) + case errors.Is(err, ERROR_NO_SYSTEM_RESOURCES): + return fmt.Errorf("exceeded the number of trace sessions that can enable the provider: %w", err) + default: + return fmt.Errorf("failed to enable trace: %w", err) + } +} + +// StopSession closes the ETW session and associated handles if they were created. +func (s *Session) StopSession() error { + if !s.Realtime { + return nil + } + + if isValidHandler(s.traceHandler) { + // Attempt to close the trace and handle potential errors. + if err := s.closeTrace(s.traceHandler); err != nil && !errors.Is(err, ERROR_CTX_CLOSE_PENDING) { + return fmt.Errorf("failed to close trace: %w", err) + } + } + + if s.NewSession { + // If we created the session, send a control command to stop it. + return s.controlTrace( + s.handler, + nil, + s.properties, + EVENT_TRACE_CONTROL_STOP, + ) + } + + return nil +} + +func isValidHandler(handler uint64) bool { + return handler != 0 && handler != INVALID_PROCESSTRACE_HANDLE +} diff --git a/x-pack/libbeat/reader/etw/controller_test.go b/x-pack/libbeat/reader/etw/controller_test.go new file mode 100644 index 000000000000..0c663433ad1f --- /dev/null +++ b/x-pack/libbeat/reader/etw/controller_test.go @@ -0,0 +1,190 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows + +package etw + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "golang.org/x/sys/windows" +) + +func TestAttachToExistingSession_Error(t *testing.T) { + // Mock implementation of controlTrace + controlTrace := func(traceHandle uintptr, + instanceName *uint16, + properties *EventTraceProperties, + controlCode uint32) error { + return ERROR_WMI_INSTANCE_NOT_FOUND + } + + // Create a Session instance + session := &Session{ + Name: "TestSession", + properties: &EventTraceProperties{}, + controlTrace: controlTrace, + } + + err := session.AttachToExistingSession() + assert.EqualError(t, err, "session is not running: The instance name passed was not recognized as valid by a WMI data provider.") +} + +func TestAttachToExistingSession_Success(t *testing.T) { + // Mock implementation of controlTrace + controlTrace := func(traceHandle uintptr, + instanceName *uint16, + properties *EventTraceProperties, + controlCode uint32) error { + // Set a mock handler value + properties.Wnode.Union1 = 12345 + return nil + } + + // Create a Session instance with initialized Properties + session := &Session{ + Name: "TestSession", + properties: &EventTraceProperties{}, + controlTrace: controlTrace, + } + + err := session.AttachToExistingSession() + + assert.NoError(t, err) + assert.Equal(t, uintptr(12345), session.handler, "Handler should be set to the mock value") +} + +func TestCreateRealtimeSession_StartTraceError(t *testing.T) { + // Mock implementation of startTrace + startTrace := func(traceHandle *uintptr, + instanceName *uint16, + properties *EventTraceProperties) error { + return ERROR_ALREADY_EXISTS + } + + // Create a Session instance + session := &Session{ + Name: "TestSession", + properties: &EventTraceProperties{}, + startTrace: startTrace, + } + + err := session.CreateRealtimeSession() + assert.EqualError(t, err, "session already exists: Cannot create a file when that file already exists.") +} + +func TestCreateRealtimeSession_EnableTraceError(t *testing.T) { + // Mock implementations + startTrace := func(traceHandle *uintptr, + instanceName *uint16, + properties *EventTraceProperties) error { + *traceHandle = 12345 // Mock handler value + return nil + } + + enableTrace := func(traceHandle uintptr, + providerId *windows.GUID, + isEnabled uint32, + level uint8, + matchAnyKeyword uint64, + matchAllKeyword uint64, + enableProperty uint32, + enableParameters *EnableTraceParameters) error { + return ERROR_INVALID_PARAMETER + } + + // Create a Session instance + session := &Session{ + Name: "TestSession", + properties: &EventTraceProperties{}, + startTrace: startTrace, + enableTrace: enableTrace, + } + + err := session.CreateRealtimeSession() + assert.EqualError(t, err, "invalid parameters when enabling session trace: The parameter is incorrect.") +} + +func TestCreateRealtimeSession_Success(t *testing.T) { + // Mock implementations + startTrace := func(traceHandle *uintptr, + instanceName *uint16, + properties *EventTraceProperties) error { + *traceHandle = 12345 // Mock handler value + return nil + } + + enableTrace := func(traceHandle uintptr, + providerId *windows.GUID, + isEnabled uint32, + level uint8, + matchAnyKeyword uint64, + matchAllKeyword uint64, + enableProperty uint32, + enableParameters *EnableTraceParameters) error { + return nil + } + + // Create a Session instance + session := &Session{ + Name: "TestSession", + properties: &EventTraceProperties{}, + startTrace: startTrace, + enableTrace: enableTrace, + } + + err := session.CreateRealtimeSession() + + assert.NoError(t, err) + assert.Equal(t, uintptr(12345), session.handler, "Handler should be set to the mock value") +} + +func TestStopSession_Error(t *testing.T) { + // Mock implementation of closeTrace + closeTrace := func(traceHandle uint64) error { + return ERROR_INVALID_PARAMETER + } + + // Create a Session instance + session := &Session{ + Realtime: true, + NewSession: true, + traceHandler: 12345, // Example handler value + properties: &EventTraceProperties{}, + closeTrace: closeTrace, + } + + err := session.StopSession() + assert.EqualError(t, err, "failed to close trace: The parameter is incorrect.") +} + +func TestStopSession_Success(t *testing.T) { + // Mock implementations + closeTrace := func(traceHandle uint64) error { + return nil + } + + controlTrace := func(traceHandle uintptr, + instanceName *uint16, + properties *EventTraceProperties, + controlCode uint32) error { + // Set a mock handler value + return nil + } + + // Create a Session instance + session := &Session{ + Realtime: true, + NewSession: true, + traceHandler: 12345, // Example handler value + properties: &EventTraceProperties{}, + closeTrace: closeTrace, + controlTrace: controlTrace, + } + + err := session.StopSession() + assert.NoError(t, err) +} diff --git a/x-pack/libbeat/reader/etw/event.go b/x-pack/libbeat/reader/etw/event.go new file mode 100644 index 000000000000..34faa8d21cb7 --- /dev/null +++ b/x-pack/libbeat/reader/etw/event.go @@ -0,0 +1,340 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows + +package etw + +import ( + "errors" + "fmt" + "unsafe" + + "golang.org/x/sys/windows" +) + +// propertyParser is used for parsing properties from raw EVENT_RECORD structures. +type propertyParser struct { + r *EventRecord + info *TraceEventInfo + data []byte + ptrSize uint32 +} + +// GetEventProperties extracts and returns properties from an ETW event record. +func GetEventProperties(r *EventRecord) (map[string]interface{}, error) { + // Handle the case where the event only contains a string. + if r.EventHeader.Flags == EVENT_HEADER_FLAG_STRING_ONLY { + userDataPtr := (*uint16)(unsafe.Pointer(r.UserData)) + return map[string]interface{}{ + "_": utf16AtOffsetToString(uintptr(unsafe.Pointer(userDataPtr)), 0), // Convert the user data from UTF16 to string. + }, nil + } + + // Initialize a new property parser for the event record. + p, err := newPropertyParser(r) + if err != nil { + return nil, fmt.Errorf("failed to parse event properties: %w", err) + } + + // Iterate through each property of the event and format it + properties := make(map[string]interface{}, int(p.info.TopLevelPropertyCount)) + for i := 0; i < int(p.info.TopLevelPropertyCount); i++ { + name := p.getPropertyName(i) + value, err := p.getPropertyValue(i) + if err != nil { + return nil, fmt.Errorf("failed to parse %q value: %w", name, err) + } + properties[name] = value + } + + return properties, nil +} + +// newPropertyParser initializes a new property parser for a given event record. +func newPropertyParser(r *EventRecord) (*propertyParser, error) { + info, err := getEventInformation(r) + if err != nil { + return nil, fmt.Errorf("failed to get event information: %w", err) + } + ptrSize := r.pointerSize() + // Return a new propertyParser instance initialized with event record data and metadata. + return &propertyParser{ + r: r, + info: info, + ptrSize: ptrSize, + data: unsafe.Slice((*uint8)(unsafe.Pointer(r.UserData)), r.UserDataLength), + }, nil +} + +// getEventPropertyInfoAtIndex looks for the EventPropertyInfo object at a specified index. +func (info *TraceEventInfo) getEventPropertyInfoAtIndex(i uint32) *EventPropertyInfo { + if i < info.PropertyCount { + // Calculate the address of the first element in EventPropertyInfoArray. + eventPropertyInfoPtr := uintptr(unsafe.Pointer(&info.EventPropertyInfoArray[0])) + // Adjust the pointer to point to the i-th EventPropertyInfo element. + eventPropertyInfoPtr += uintptr(i) * unsafe.Sizeof(EventPropertyInfo{}) + + return ((*EventPropertyInfo)(unsafe.Pointer(eventPropertyInfoPtr))) + } + return nil +} + +// getEventInformation retrieves detailed metadata about an event record. +func getEventInformation(r *EventRecord) (info *TraceEventInfo, err error) { + // Initially call TdhGetEventInformation to get the required buffer size. + var bufSize uint32 + if err = _TdhGetEventInformation(r, 0, nil, nil, &bufSize); errors.Is(err, ERROR_INSUFFICIENT_BUFFER) { + // Allocate enough memory for TRACE_EVENT_INFO based on the required size. + buff := make([]byte, bufSize) + info = ((*TraceEventInfo)(unsafe.Pointer(&buff[0]))) + // Retrieve the event information into the allocated buffer. + err = _TdhGetEventInformation(r, 0, nil, info, &bufSize) + } + + // Check for errors in retrieving the event information. + if err != nil { + return nil, fmt.Errorf("TdhGetEventInformation failed: %w", err) + } + + return info, nil +} + +// getPropertyName retrieves the name of the i-th event property in the event record. +func (p *propertyParser) getPropertyName(i int) string { + // Convert the UTF16 property name to a Go string. + namePtr := readPropertyName(p, i) + return windows.UTF16PtrToString((*uint16)(namePtr)) +} + +// readPropertyName gets the pointer to the property name in the event information structure. +func readPropertyName(p *propertyParser, i int) unsafe.Pointer { + // Calculate the pointer to the property name using its offset in the event property array. + return unsafe.Add(unsafe.Pointer(p.info), p.info.getEventPropertyInfoAtIndex(uint32(i)).NameOffset) +} + +// getPropertyValue retrieves the value of a specified event property. +func (p *propertyParser) getPropertyValue(i int) (interface{}, error) { + propertyInfo := p.info.getEventPropertyInfoAtIndex(uint32(i)) + + // Determine the size of the property array. + arraySize, err := p.getArraySize(*propertyInfo) + if err != nil { + return nil, fmt.Errorf("failed to get array size: %w", err) + } + + // Initialize a slice to hold the property values. + result := make([]interface{}, arraySize) + for j := 0; j < int(arraySize); j++ { + var ( + value interface{} + err error + ) + // Parse the property value based on its type (simple or structured). + if (propertyInfo.Flags & PropertyStruct) == PropertyStruct { + value, err = p.parseStruct(*propertyInfo) + } else { + value, err = p.parseSimpleType(*propertyInfo) + } + if err != nil { + return nil, err + } + result[j] = value + } + + // Return the entire result set or the single value, based on the property count. + if ((propertyInfo.Flags & PropertyParamCount) == PropertyParamCount) || + (propertyInfo.count() > 1) { + return result, nil + } + return result[0], nil +} + +// getArraySize calculates the size of an array property within an event. +func (p *propertyParser) getArraySize(propertyInfo EventPropertyInfo) (uint32, error) { + // Check if the property's count is specified by another property. + if (propertyInfo.Flags & PropertyParamCount) == PropertyParamCount { + var dataDescriptor PropertyDataDescriptor + // Locate the property containing the array size using the countPropertyIndex. + dataDescriptor.PropertyName = readPropertyName(p, int(propertyInfo.count())) + dataDescriptor.ArrayIndex = 0xFFFFFFFF + // Retrieve the length of the array from the specified property. + return getLengthFromProperty(p.r, &dataDescriptor) + } else { + // If the array size is directly specified, return it. + return uint32(propertyInfo.count()), nil + } +} + +// getLengthFromProperty retrieves the length of a property from an event record. +func getLengthFromProperty(r *EventRecord, dataDescriptor *PropertyDataDescriptor) (uint32, error) { + var length uint32 + // Call TdhGetProperty to get the length of the property specified by the dataDescriptor. + err := _TdhGetProperty( + r, + 0, + nil, + 1, + dataDescriptor, + uint32(unsafe.Sizeof(length)), + (*byte)(unsafe.Pointer(&length)), + ) + if err != nil { + return 0, err + } + return length, nil +} + +// parseStruct extracts and returns the fields from an embedded structure within a property. +func (p *propertyParser) parseStruct(propertyInfo EventPropertyInfo) (map[string]interface{}, error) { + // Determine the start and end indexes of the structure members within the property info. + startIndex := propertyInfo.structStartIndex() + lastIndex := startIndex + propertyInfo.numOfStructMembers() + + // Initialize a map to hold the structure's fields. + structure := make(map[string]interface{}, (lastIndex - startIndex)) + // Iterate through each member of the structure. + for j := startIndex; j < lastIndex; j++ { + name := p.getPropertyName(int(j)) + value, err := p.getPropertyValue(int(j)) + if err != nil { + return nil, fmt.Errorf("failed parse field '%s' of complex property type: %w", name, err) + } + structure[name] = value // Add the field to the structure map. + } + + return structure, nil +} + +// parseSimpleType parses a simple property type using TdhFormatProperty. +func (p *propertyParser) parseSimpleType(propertyInfo EventPropertyInfo) (string, error) { + var mapInfo *EventMapInfo + if propertyInfo.mapNameOffset() > 0 { + // If failed retrieving the map information, returns on error + var err error + mapInfo, err = p.getMapInfo(propertyInfo) + if err != nil { + return "", fmt.Errorf("failed to get map information due to: %w", err) + } + } + + // Get the length of the property. + propertyLength, err := p.getPropertyLength(propertyInfo) + if err != nil { + return "", fmt.Errorf("failed to get property length due to: %w", err) + } + + var userDataConsumed uint16 + + // Set a default buffer size for formatted data. + formattedDataSize := uint32(DEFAULT_PROPERTY_BUFFER_SIZE) + formattedData := make([]byte, int(formattedDataSize)) + + // Retry loop to handle buffer size adjustments. +retryLoop: + for { + var dataPtr *uint8 + if len(p.data) > 0 { + dataPtr = &p.data[0] + } + err := _TdhFormatProperty( + p.info, + mapInfo, + p.ptrSize, + propertyInfo.inType(), + propertyInfo.outType(), + uint16(propertyLength), + uint16(len(p.data)), + dataPtr, + &formattedDataSize, + &formattedData[0], + &userDataConsumed, + ) + + switch { + case err == nil: + // If formatting is successful, break out of the loop. + break retryLoop + case errors.Is(err, ERROR_INSUFFICIENT_BUFFER): + // Increase the buffer size if it's insufficient. + formattedData = make([]byte, formattedDataSize) + continue + case errors.Is(err, ERROR_EVT_INVALID_EVENT_DATA): + // Handle invalid event data error. + // Discarding MapInfo allows us to access + // at least the non-interpreted data. + if mapInfo != nil { + mapInfo = nil + continue + } + return "", fmt.Errorf("TdhFormatProperty failed: %w", err) // Handle unknown error + default: + return "", fmt.Errorf("TdhFormatProperty failed: %w", err) + } + } + // Update the data slice to account for consumed data. + p.data = p.data[userDataConsumed:] + + // Convert the formatted data to string and return. + return windows.UTF16PtrToString((*uint16)(unsafe.Pointer(&formattedData[0]))), nil +} + +// getMapInfo retrieves mapping information for a given property. +func (p *propertyParser) getMapInfo(propertyInfo EventPropertyInfo) (*EventMapInfo, error) { + var mapSize uint32 + // Get the name of the map from the property info. + mapName := (*uint16)(unsafe.Add(unsafe.Pointer(p.info), propertyInfo.mapNameOffset())) + + // First call to get the required size of the map info. + err := _TdhGetEventMapInformation(p.r, mapName, nil, &mapSize) + switch { + case errors.Is(err, ERROR_NOT_FOUND): + // No mapping information available. This is not an error. + return nil, nil + case errors.Is(err, ERROR_INSUFFICIENT_BUFFER): + // Resize the buffer and try again. + default: + return nil, fmt.Errorf("TdhGetEventMapInformation failed to get size: %w", err) + } + + // Allocate buffer and retrieve the actual map information. + buff := make([]byte, int(mapSize)) + mapInfo := ((*EventMapInfo)(unsafe.Pointer(&buff[0]))) + err = _TdhGetEventMapInformation(p.r, mapName, mapInfo, &mapSize) + if err != nil { + return nil, fmt.Errorf("TdhGetEventMapInformation failed: %w", err) + } + + if mapInfo.EntryCount == 0 { + return nil, nil // No entries in the map. + } + + return mapInfo, nil +} + +// getPropertyLength returns the length of a specific property within TraceEventInfo. +func (p *propertyParser) getPropertyLength(propertyInfo EventPropertyInfo) (uint32, error) { + // Check if the length of the property is defined by another property. + if (propertyInfo.Flags & PropertyParamLength) == PropertyParamLength { + var dataDescriptor PropertyDataDescriptor + // Read the property name that contains the length information. + dataDescriptor.PropertyName = readPropertyName(p, int(propertyInfo.length())) + dataDescriptor.ArrayIndex = 0xFFFFFFFF + // Retrieve the length from the specified property. + return getLengthFromProperty(p.r, &dataDescriptor) + } + + inType := propertyInfo.inType() + outType := propertyInfo.outType() + // Special handling for properties representing IPv6 addresses. + // https://docs.microsoft.com/en-us/windows/win32/api/tdh/nf-tdh-tdhformatproperty#remarks + if TdhIntypeBinary == inType && TdhOuttypeIpv6 == outType { + // Return the fixed size of an IPv6 address. + return 16, nil + } + + // Default case: return the length as defined in the property info. + // Note: A length of 0 can indicate a variable-length field (e.g., structure, string). + return uint32(propertyInfo.length()), nil +} diff --git a/x-pack/libbeat/reader/etw/provider.go b/x-pack/libbeat/reader/etw/provider.go new file mode 100644 index 000000000000..e0a20c3facd1 --- /dev/null +++ b/x-pack/libbeat/reader/etw/provider.go @@ -0,0 +1,81 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows + +package etw + +import ( + "errors" + "fmt" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +// utf16AtOffsetToString converts a UTF-16 encoded string +// at a specific offset in a struct to a Go string. +func utf16AtOffsetToString(pstruct uintptr, offset uintptr) string { + // Initialize a slice to store UTF-16 characters. + out := make([]uint16, 0, 64) + + // Start reading at the given offset. + wc := (*uint16)(unsafe.Pointer(pstruct + offset)) + + // Iterate over the UTF-16 characters until a null terminator is encountered. + for i := uintptr(2); *wc != 0; i += 2 { + out = append(out, *wc) + wc = (*uint16)(unsafe.Pointer(pstruct + offset + i)) + } + + // Convert the UTF-16 slice to a Go string and return. + return syscall.UTF16ToString(out) +} + +// guidFromProviderName searches for a provider by name and returns its GUID. +func guidFromProviderName(providerName string) (windows.GUID, error) { + // Returns if the provider name is empty. + if providerName == "" { + return windows.GUID{}, fmt.Errorf("empty provider name") + } + + var buf *ProviderEnumerationInfo + size := uint32(1) + + // Attempt to retrieve provider information with a buffer that increases in size until it's sufficient. + for { + tmp := make([]byte, size) + buf = (*ProviderEnumerationInfo)(unsafe.Pointer(&tmp[0])) + if err := enumerateProvidersFunc(buf, &size); !errors.Is(err, ERROR_INSUFFICIENT_BUFFER) { + break + } + } + + if buf.NumberOfProviders == 0 { + return windows.GUID{}, fmt.Errorf("no providers found") + } + + // Iterate through the list of providers to find a match by name. + startProvEnumInfo := uintptr(unsafe.Pointer(buf)) + it := uintptr(unsafe.Pointer(&buf.TraceProviderInfoArray[0])) + for i := uintptr(0); i < uintptr(buf.NumberOfProviders); i++ { + pInfo := (*TraceProviderInfo)(unsafe.Pointer(it + i*unsafe.Sizeof(buf.TraceProviderInfoArray[0]))) + name := utf16AtOffsetToString(startProvEnumInfo, uintptr(pInfo.ProviderNameOffset)) + + // If a match is found, return the corresponding GUID. + if name == providerName { + return pInfo.ProviderGuid, nil + } + } + + // No matching provider is found. + return windows.GUID{}, fmt.Errorf("unable to find GUID from provider name") +} + +// IsGUIDValid checks if GUID contains valid data +// (any of the fields in the GUID are non-zero) +func IsGUIDValid(guid windows.GUID) bool { + return guid.Data1 != 0 || guid.Data2 != 0 || guid.Data3 != 0 || guid.Data4 != [8]byte{} +} diff --git a/x-pack/libbeat/reader/etw/provider_test.go b/x-pack/libbeat/reader/etw/provider_test.go new file mode 100644 index 000000000000..d8c561ef3e4f --- /dev/null +++ b/x-pack/libbeat/reader/etw/provider_test.go @@ -0,0 +1,199 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows + +package etw + +import ( + "encoding/binary" + "syscall" + "testing" + "unsafe" + + "github.com/stretchr/testify/assert" + "golang.org/x/sys/windows" +) + +func TestUTF16AtOffsetToString(t *testing.T) { + // Create a UTF-16 string + sampleText := "This is a string test!" + utf16Str, _ := syscall.UTF16FromString(sampleText) + + // Convert it to uintptr (simulate as if it's part of a larger struct) + ptr := uintptr(unsafe.Pointer(&utf16Str[0])) + + // Test the function + result := utf16AtOffsetToString(ptr, 0) + assert.Equal(t, sampleText, result, "The converted string should match the original") + + // Test with offset (skip the first character) + offset := unsafe.Sizeof(utf16Str[0]) // Size of one UTF-16 character + resultWithOffset := utf16AtOffsetToString(ptr, offset) + assert.Equal(t, sampleText[1:], resultWithOffset, "The converted string with offset should skip the first character") +} + +func TestGUIDFromProviderName_EmptyName(t *testing.T) { + guid, err := guidFromProviderName("") + assert.EqualError(t, err, "empty provider name") + assert.Equal(t, windows.GUID{}, guid, "GUID should be empty for an empty provider name") +} + +func TestGUIDFromProviderName_EmptyProviderList(t *testing.T) { + // Defer restoration of the original function + t.Cleanup(func() { + enumerateProvidersFunc = _TdhEnumerateProviders + }) + + // Define a mock provider name and GUID for testing. + mockProviderName := "NonExistentProvider" + + enumerateProvidersFunc = func(pBuffer *ProviderEnumerationInfo, pBufferSize *uint32) error { + // Check if the buffer size is sufficient + requiredSize := uint32(unsafe.Sizeof(ProviderEnumerationInfo{})) + uint32(unsafe.Sizeof(TraceProviderInfo{}))*0 // As there are no providers + if *pBufferSize < requiredSize { + // Set the size required and return the error + *pBufferSize = requiredSize + return ERROR_INSUFFICIENT_BUFFER + } + + // Empty list of providers + *pBuffer = ProviderEnumerationInfo{ + NumberOfProviders: 0, + TraceProviderInfoArray: [anysizeArray]TraceProviderInfo{}, + } + return nil + } + + guid, err := guidFromProviderName(mockProviderName) + assert.EqualError(t, err, "no providers found") + assert.Equal(t, windows.GUID{}, guid, "GUID should be empty when the provider is not found") +} + +func TestGUIDFromProviderName_GUIDNotFound(t *testing.T) { + // Defer restoration of the original function + t.Cleanup(func() { + enumerateProvidersFunc = _TdhEnumerateProviders + }) + + // Define a mock provider name and GUID for testing. + mockProviderName := "NonExistentProvider" + realProviderName := "ExistentProvider" + mockGUID := windows.GUID{Data1: 1234, Data2: 5678} + + enumerateProvidersFunc = func(pBuffer *ProviderEnumerationInfo, pBufferSize *uint32) error { + // Convert provider name to UTF-16 + utf16ProviderName, _ := syscall.UTF16FromString(realProviderName) + + // Calculate size needed for the provider name string + nameSize := (len(utf16ProviderName) + 1) * 2 // +1 for null-terminator + + requiredSize := uint32(unsafe.Sizeof(ProviderEnumerationInfo{})) + uint32(unsafe.Sizeof(TraceProviderInfo{})) + uint32(nameSize) + if *pBufferSize < requiredSize { + *pBufferSize = requiredSize + return ERROR_INSUFFICIENT_BUFFER + } + + // Calculate the offset for the provider name + // It's placed after ProviderEnumerationInfo and TraceProviderInfo + nameOffset := unsafe.Sizeof(ProviderEnumerationInfo{}) + unsafe.Sizeof(TraceProviderInfo{}) + + // Convert pBuffer to a byte slice starting at the calculated offset for the name + byteBuffer := (*[1 << 30]byte)(unsafe.Pointer(pBuffer))[:] + // Copy the UTF-16 encoded name into the buffer + for i, char := range utf16ProviderName { + binary.LittleEndian.PutUint16(byteBuffer[nameOffset+(uintptr(i)*2):], char) + } + + // Create and populate the ProviderEnumerationInfo struct + *pBuffer = ProviderEnumerationInfo{ + NumberOfProviders: 1, + TraceProviderInfoArray: [anysizeArray]TraceProviderInfo{ + { + ProviderGuid: mockGUID, + ProviderNameOffset: uint32(nameOffset), + }, + }, + } + return nil + } + + guid, err := guidFromProviderName(mockProviderName) + assert.EqualError(t, err, "unable to find GUID from provider name") + assert.Equal(t, windows.GUID{}, guid, "GUID should be empty when the provider is not found") +} + +func TestGUIDFromProviderName_Success(t *testing.T) { + // Defer restoration of the original function + t.Cleanup(func() { + enumerateProvidersFunc = _TdhEnumerateProviders + }) + + // Define a mock provider name and GUID for testing. + mockProviderName := "MockProvider" + mockGUID := windows.GUID{Data1: 1234, Data2: 5678} + + enumerateProvidersFunc = func(pBuffer *ProviderEnumerationInfo, pBufferSize *uint32) error { + // Convert provider name to UTF-16 + utf16ProviderName, _ := syscall.UTF16FromString(mockProviderName) + + // Calculate size needed for the provider name string + nameSize := (len(utf16ProviderName) + 1) * 2 // +1 for null-terminator + + requiredSize := uint32(unsafe.Sizeof(ProviderEnumerationInfo{})) + uint32(unsafe.Sizeof(TraceProviderInfo{})) + uint32(nameSize) + if *pBufferSize < requiredSize { + *pBufferSize = requiredSize + return ERROR_INSUFFICIENT_BUFFER + } + + // Calculate the offset for the provider name + // It's placed after ProviderEnumerationInfo and TraceProviderInfo + nameOffset := unsafe.Sizeof(ProviderEnumerationInfo{}) + unsafe.Sizeof(TraceProviderInfo{}) + + // Convert pBuffer to a byte slice starting at the calculated offset for the name + byteBuffer := (*[1 << 30]byte)(unsafe.Pointer(pBuffer))[:] + // Copy the UTF-16 encoded name into the buffer + for i, char := range utf16ProviderName { + binary.LittleEndian.PutUint16(byteBuffer[nameOffset+(uintptr(i)*2):], char) + } + + // Create and populate the ProviderEnumerationInfo struct + *pBuffer = ProviderEnumerationInfo{ + NumberOfProviders: 1, + TraceProviderInfoArray: [anysizeArray]TraceProviderInfo{ + { + ProviderGuid: mockGUID, + ProviderNameOffset: uint32(nameOffset), + }, + }, + } + return nil + } + + // Run the test + guid, err := guidFromProviderName(mockProviderName) + assert.NoError(t, err) + assert.Equal(t, mockGUID, guid, "GUID should match the mock GUID") +} + +func TestIsGUIDValid_True(t *testing.T) { + // Valid GUID + validGUID := windows.GUID{ + Data1: 0xeb79061a, + Data2: 0xa566, + Data3: 0x4698, + Data4: [8]byte{0x12, 0x34, 0x3e, 0xd2, 0x80, 0x70, 0x33, 0xa0}, + } + + valid := IsGUIDValid(validGUID) + assert.True(t, valid, "IsGUIDValid should return true for a valid GUID") +} + +func TestIsGUIDValid_False(t *testing.T) { + // Invalid GUID (all zeros) + invalidGUID := windows.GUID{} + + valid := IsGUIDValid(invalidGUID) + assert.False(t, valid, "IsGUIDValid should return false for an invalid GUID") +} diff --git a/x-pack/libbeat/reader/etw/session.go b/x-pack/libbeat/reader/etw/session.go new file mode 100644 index 000000000000..3a8e7be51d7c --- /dev/null +++ b/x-pack/libbeat/reader/etw/session.go @@ -0,0 +1,250 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows + +package etw + +import ( + "errors" + "fmt" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +// For testing purposes we create a variable to store the function to call +// When running tests, these variables point to a mock function +var ( + guidFromProviderNameFunc = guidFromProviderName + setSessionGUIDFunc = setSessionGUID +) + +type Session struct { + // Name is the identifier for the session. + // It is used to identify the session in logs and also for Windows processes. + Name string + // GUID is the provider GUID to configure the session. + GUID windows.GUID + // properties of the session that are initialized in newSessionProperties() + // See https://learn.microsoft.com/en-us/windows/win32/api/evntrace/ns-evntrace-event_trace_properties for more information + properties *EventTraceProperties + // handler of the event tracing session for which the provider is being configured. + // It is obtained from StartTrace when a new trace is started. + // This handler is needed to enable, query or stop the trace. + handler uintptr + // Realtime is a flag to know if the consumer reads from a logfile or real-time session. + Realtime bool // Real-time flag + // NewSession is a flag to indicate whether a new session has been created or attached to an existing one. + NewSession bool + // TraceLevel sets the maximum level of events that we want the provider to write. + traceLevel uint8 + // matchAnyKeyword is a 64-bit bitmask of keywords that determine the categories of events that we want the provider to write. + // The provider writes an event if the event's keyword bits match any of the bits set in this value + // or if the event has no keyword bits set, in addition to meeting the level and matchAllKeyword criteria. + matchAnyKeyword uint64 + // matchAllKeyword is a 64-bit bitmask of keywords that restricts the events that we want the provider to write. + // The provider typically writes an event if the event's keyword bits match all of the bits set in this value + // or if the event has no keyword bits set, in addition to meeting the level and matchAnyKeyword criteria. + matchAllKeyword uint64 + // traceHandler is the trace processing handle. + // It is used to control the trace that receives and processes events. + traceHandler uint64 + // Callback is the pointer to EventRecordCallback which receives and processes event trace events. + Callback func(*EventRecord) uintptr + // BufferCallback is the pointer to BufferCallback which processes retrieved metadata about the ETW buffers (optional). + BufferCallback func(*EventTraceLogfile) uintptr + + // Pointers to functions that make calls to the Windows API. + // In tests, these pointers can be replaced with mock functions to simulate API behavior without making actual calls to the Windows API. + startTrace func(*uintptr, *uint16, *EventTraceProperties) error + controlTrace func(traceHandle uintptr, instanceName *uint16, properties *EventTraceProperties, controlCode uint32) error + enableTrace func(traceHandle uintptr, providerId *windows.GUID, isEnabled uint32, level uint8, matchAnyKeyword uint64, matchAllKeyword uint64, enableProperty uint32, enableParameters *EnableTraceParameters) error + closeTrace func(traceHandle uint64) error + openTrace func(elf *EventTraceLogfile) (uint64, error) + processTrace func(handleArray *uint64, handleCount uint32, startTime *FileTime, endTime *FileTime) error +} + +// setSessionName determines the session name based on the provided configuration. +func setSessionName(conf Config) string { + // Iterate through potential session name values, returning the first non-empty one. + for _, value := range []string{conf.Logfile, conf.Session, conf.SessionName} { + if value != "" { + return value + } + } + + if conf.ProviderName != "" { + return fmt.Sprintf("Elastic-%s", conf.ProviderName) + } + + return fmt.Sprintf("Elastic-%s", conf.ProviderGUID) +} + +// setSessionGUID determines the session GUID based on the provided configuration. +func setSessionGUID(conf Config) (windows.GUID, error) { + var guid windows.GUID + var err error + + // If ProviderGUID is not set in the configuration, attempt to resolve it using the provider name. + if conf.ProviderGUID == "" { + guid, err = guidFromProviderNameFunc(conf.ProviderName) + if err != nil { + return windows.GUID{}, fmt.Errorf("error resolving GUID: %w", err) + } + } else { + // If ProviderGUID is set, parse it into a GUID structure. + guid, err = windows.GUIDFromString(conf.ProviderGUID) + if err != nil { + return windows.GUID{}, fmt.Errorf("error parsing Windows GUID: %w", err) + } + } + + return guid, nil +} + +// getTraceLevel converts a string representation of a trace level +// to its corresponding uint8 constant value +func getTraceLevel(level string) uint8 { + switch level { + case "critical": + return TRACE_LEVEL_CRITICAL + case "error": + return TRACE_LEVEL_ERROR + case "warning": + return TRACE_LEVEL_WARNING + case "information": + return TRACE_LEVEL_INFORMATION + case "verbose": + return TRACE_LEVEL_VERBOSE + default: + return TRACE_LEVEL_INFORMATION + } +} + +// newSessionProperties initializes and returns a pointer to EventTraceProperties +// with the necessary settings for starting an ETW session. +// See https://learn.microsoft.com/en-us/windows/win32/api/evntrace/ns-evntrace-event_trace_properties +func newSessionProperties(sessionName string) *EventTraceProperties { + // Calculate buffer size for session properties. + sessionNameSize := (len(sessionName) + 1) * 2 + bufSize := sessionNameSize + int(unsafe.Sizeof(EventTraceProperties{})) + + // Allocate buffer and cast to EventTraceProperties. + propertiesBuf := make([]byte, bufSize) + sessionProperties := (*EventTraceProperties)(unsafe.Pointer(&propertiesBuf[0])) + + // Initialize mandatory fields of the EventTraceProperties struct. + // Filled based on https://learn.microsoft.com/en-us/windows/win32/etw/wnode-header + sessionProperties.Wnode.BufferSize = uint32(bufSize) + sessionProperties.Wnode.Guid = windows.GUID{} // GUID not required for non-private/kernel sessions + // ClientContext is used for timestamp resolution + // Not used unless adding PROCESS_TRACE_MODE_RAW_TIMESTAMP flag to EVENT_TRACE_LOGFILE struct + // See https://learn.microsoft.com/en-us/windows/win32/etw/wnode-header + sessionProperties.Wnode.ClientContext = 1 + sessionProperties.Wnode.Flags = WNODE_FLAG_TRACED_GUID + // Set logging mode to real-time + // See https://learn.microsoft.com/en-us/windows/win32/etw/logging-mode-constants + sessionProperties.LogFileMode = EVENT_TRACE_REAL_TIME_MODE + sessionProperties.LogFileNameOffset = 0 // Can be specified to log to a file as well as to a real-time session + sessionProperties.BufferSize = 64 // Default buffer size, can be configurable + sessionProperties.LoggerNameOffset = uint32(unsafe.Sizeof(EventTraceProperties{})) // Offset to the logger name + + return sessionProperties +} + +// NewSession initializes and returns a new ETW Session based on the provided configuration. +func NewSession(conf Config) (Session, error) { + var session Session + var err error + + // Assign ETW Windows API functions + session.startTrace = _StartTrace + session.controlTrace = _ControlTrace + session.enableTrace = _EnableTraceEx2 + session.openTrace = _OpenTrace + session.processTrace = _ProcessTrace + session.closeTrace = _CloseTrace + + session.Name = setSessionName(conf) + session.Realtime = true + + // If a current session is configured, set up the session properties and return. + if conf.Session != "" { + session.properties = newSessionProperties(session.Name) + return session, nil + } else if conf.Logfile != "" { + // If a logfile is specified, set up for non-realtime session. + session.Realtime = false + return session, nil + } + + session.NewSession = true // Indicate this is a new session + + session.GUID, err = setSessionGUIDFunc(conf) + if err != nil { + return Session{}, err + } + + // Initialize additional session properties. + session.properties = newSessionProperties(session.Name) + session.traceLevel = getTraceLevel(conf.TraceLevel) + session.matchAnyKeyword = conf.MatchAnyKeyword + session.matchAllKeyword = conf.MatchAllKeyword + + return session, nil +} + +// StartConsumer initializes and starts the ETW event tracing session. +func (s *Session) StartConsumer() error { + var elf EventTraceLogfile + var err error + + // Configure EventTraceLogfile based on the session type (realtime or not). + if !s.Realtime { + elf.LogFileMode = PROCESS_TRACE_MODE_EVENT_RECORD + logfilePtr, err := syscall.UTF16PtrFromString(s.Name) + if err != nil { + return fmt.Errorf("failed to convert logfile name: %w", err) + } + elf.LogFileName = logfilePtr + } else { + elf.LogFileMode = PROCESS_TRACE_MODE_EVENT_RECORD | PROCESS_TRACE_MODE_REAL_TIME + sessionPtr, err := syscall.UTF16PtrFromString(s.Name) + if err != nil { + return fmt.Errorf("failed to convert session name: %w", err) + } + elf.LoggerName = sessionPtr + } + + // Set callback and context for the session. + if s.Callback == nil { + return fmt.Errorf("error loading callback") + } + elf.Callback = syscall.NewCallback(s.Callback) + elf.Context = 0 + + // Open an ETW trace processing handle for consuming events + // from an ETW real-time trace session or an ETW log file. + s.traceHandler, err = s.openTrace(&elf) + + switch { + case err == nil: + + // Handle specific errors for trace opening. + case errors.Is(err, ERROR_BAD_PATHNAME): + return fmt.Errorf("invalid log source when opening trace: %w", err) + case errors.Is(err, ERROR_ACCESS_DENIED): + return fmt.Errorf("access denied when opening trace: %w", err) + default: + return fmt.Errorf("failed to open trace: %w", err) + } + // Process the trace. This function blocks until processing ends. + if err := s.processTrace(&s.traceHandler, 1, nil, nil); err != nil { + return fmt.Errorf("failed to process trace: %w", err) + } + + return nil +} diff --git a/x-pack/libbeat/reader/etw/session_test.go b/x-pack/libbeat/reader/etw/session_test.go new file mode 100644 index 000000000000..005b9839d5c6 --- /dev/null +++ b/x-pack/libbeat/reader/etw/session_test.go @@ -0,0 +1,338 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows + +package etw + +import ( + "fmt" + "testing" + "unsafe" + + "github.com/stretchr/testify/assert" + "golang.org/x/sys/windows" +) + +// TestSetSessionName tests the setSessionName function with various configurations. +func TestSetSessionName(t *testing.T) { + testCases := []struct { + name string + config Config + expectedName string + }{ + { + name: "ProviderNameSet", + config: Config{ + ProviderName: "Provider1", + }, + expectedName: "Elastic-Provider1", + }, + { + name: "SessionNameSet", + config: Config{ + SessionName: "Session1", + }, + expectedName: "Session1", + }, + { + name: "LogFileSet", + config: Config{ + Logfile: "LogFile1.etl", + }, + expectedName: "LogFile1.etl", + }, + { + name: "FallbackToProviderGUID", + config: Config{ + ProviderGUID: "12345", + }, + expectedName: "Elastic-12345", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + sessionName := setSessionName(tc.config) + assert.Equal(t, tc.expectedName, sessionName, "The session name should be correctly determined") + }) + } +} + +func mockGUIDFromProviderName(providerName string) (windows.GUID, error) { + // Return a mock GUID regardless of the input + return windows.GUID{Data1: 0x12345678, Data2: 0x1234, Data3: 0x5678, Data4: [8]byte{0x9A, 0xBC, 0xDE, 0xF0, 0x12, 0x34, 0x56, 0x78}}, nil +} + +func TestSetSessionGUID_ProviderName(t *testing.T) { + // Defer restoration of original function + t.Cleanup(func() { + guidFromProviderNameFunc = guidFromProviderName + }) + + // Replace with mock function + guidFromProviderNameFunc = mockGUIDFromProviderName + + conf := Config{ProviderName: "Provider1"} + expectedGUID := windows.GUID{Data1: 0x12345678, Data2: 0x1234, Data3: 0x5678, Data4: [8]byte{0x9A, 0xBC, 0xDE, 0xF0, 0x12, 0x34, 0x56, 0x78}} + + guid, err := setSessionGUID(conf) + assert.NoError(t, err) + assert.Equal(t, expectedGUID, guid, "The GUID should match the mock GUID") +} + +func TestSetSessionGUID_ProviderGUID(t *testing.T) { + // Example GUID string + guidString := "{12345678-1234-5678-1234-567812345678}" + + // Configuration with a set ProviderGUID + conf := Config{ProviderGUID: guidString} + + // Expected GUID based on the GUID string + expectedGUID := windows.GUID{Data1: 0x12345678, Data2: 0x1234, Data3: 0x5678, Data4: [8]byte{0x12, 0x34, 0x56, 0x78, 0x12, 0x34, 0x56, 0x78}} + + guid, err := setSessionGUID(conf) + + assert.NoError(t, err) + assert.Equal(t, expectedGUID, guid, "The GUID should match the expected value") +} + +func TestGetTraceLevel(t *testing.T) { + testCases := []struct { + name string + level string + expectedCode uint8 + }{ + {"CriticalLevel", "critical", TRACE_LEVEL_CRITICAL}, + {"ErrorLevel", "error", TRACE_LEVEL_ERROR}, + {"WarningLevel", "warning", TRACE_LEVEL_WARNING}, + {"InformationLevel", "information", TRACE_LEVEL_INFORMATION}, + {"VerboseLevel", "verbose", TRACE_LEVEL_VERBOSE}, + {"DefaultLevel", "unknown", TRACE_LEVEL_INFORMATION}, // Default case + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := getTraceLevel(tc.level) + assert.Equal(t, tc.expectedCode, result, "Trace level code should match the expected value") + }) + } +} + +func TestNewSessionProperties(t *testing.T) { + testCases := []struct { + name string + sessionName string + expectedSize uint32 + }{ + {"EmptyName", "", 2 + uint32(unsafe.Sizeof(EventTraceProperties{}))}, + {"NormalName", "Session1", 18 + uint32(unsafe.Sizeof(EventTraceProperties{}))}, + // Additional test cases can be added here + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + props := newSessionProperties(tc.sessionName) + + assert.Equal(t, tc.expectedSize, props.Wnode.BufferSize, "BufferSize should match expected value") + assert.Equal(t, windows.GUID{}, props.Wnode.Guid, "GUID should be empty") + assert.Equal(t, uint32(1), props.Wnode.ClientContext, "ClientContext should be 1") + assert.Equal(t, uint32(WNODE_FLAG_TRACED_GUID), props.Wnode.Flags, "Flags should match WNODE_FLAG_TRACED_GUID") + assert.Equal(t, uint32(EVENT_TRACE_REAL_TIME_MODE), props.LogFileMode, "LogFileMode should be set to real-time") + assert.Equal(t, uint32(0), props.LogFileNameOffset, "LogFileNameOffset should be 0") + assert.Equal(t, uint32(64), props.BufferSize, "BufferSize should be 64") + assert.Equal(t, uint32(unsafe.Sizeof(EventTraceProperties{})), props.LoggerNameOffset, "LoggerNameOffset should be the size of EventTraceProperties") + }) + } +} + +func TestNewSession_ProviderName(t *testing.T) { + // Defer restoration of original function + t.Cleanup(func() { + setSessionGUIDFunc = setSessionGUID + }) + + // Override setSessionGUIDFunc with mock + setSessionGUIDFunc = func(conf Config) (windows.GUID, error) { + return windows.GUID{ + Data1: 0x12345678, + Data2: 0x1234, + Data3: 0x5678, + Data4: [8]byte{0x9A, 0xBC, 0xDE, 0xF0, 0x12, 0x34, 0x56, 0x78}, + }, nil + } + + expectedGUID := windows.GUID{ + Data1: 0x12345678, + Data2: 0x1234, + Data3: 0x5678, + Data4: [8]byte{0x9A, 0xBC, 0xDE, 0xF0, 0x12, 0x34, 0x56, 0x78}, + } + + conf := Config{ + ProviderName: "Provider1", + SessionName: "Session1", + TraceLevel: "warning", + MatchAnyKeyword: 0xffffffffffffffff, + MatchAllKeyword: 0, + } + session, err := NewSession(conf) + + assert.NoError(t, err) + assert.Equal(t, "Session1", session.Name, "SessionName should match expected value") + assert.Equal(t, expectedGUID, session.GUID, "The GUID in the session should match the expected GUID") + assert.Equal(t, uint8(3), session.traceLevel, "TraceLevel should be 3 (warning)") + assert.Equal(t, true, session.NewSession) + assert.Equal(t, true, session.Realtime) + assert.NotNil(t, session.properties) +} + +func TestNewSession_GUIDError(t *testing.T) { + // Defer restoration of original function + t.Cleanup(func() { + setSessionGUIDFunc = setSessionGUID + }) + + // Override setSessionGUIDFunc with mock + setSessionGUIDFunc = func(conf Config) (windows.GUID, error) { + // Return an empty GUID and an error + return windows.GUID{}, fmt.Errorf("mock error") + } + + conf := Config{ + ProviderName: "Provider1", + SessionName: "Session1", + TraceLevel: "warning", + MatchAnyKeyword: 0xffffffffffffffff, + MatchAllKeyword: 0, + } + session, err := NewSession(conf) + + assert.EqualError(t, err, "mock error") + expectedSession := Session{} + assert.Equal(t, expectedSession, session, "Session should be its zero value when an error occurs") + +} + +func TestNewSession_AttachSession(t *testing.T) { + // Test case + conf := Config{ + Session: "Session1", + SessionName: "TestSession", + TraceLevel: "verbose", + MatchAnyKeyword: 0xffffffffffffffff, + MatchAllKeyword: 0, + } + session, err := NewSession(conf) + + assert.NoError(t, err) + assert.Equal(t, "Session1", session.Name, "SessionName should match expected value") + assert.Equal(t, false, session.NewSession) + assert.Equal(t, true, session.Realtime) + assert.NotNil(t, session.properties) +} + +func TestNewSession_Logfile(t *testing.T) { + // Test case + conf := Config{ + Logfile: "LogFile1.etl", + TraceLevel: "verbose", + MatchAnyKeyword: 0xffffffffffffffff, + MatchAllKeyword: 0, + } + session, err := NewSession(conf) + + assert.NoError(t, err) + assert.Equal(t, "LogFile1.etl", session.Name, "SessionName should match expected value") + assert.Equal(t, false, session.NewSession) + assert.Equal(t, false, session.Realtime) + assert.Nil(t, session.properties) +} + +func TestStartConsumer_CallbackNull(t *testing.T) { + // Create a Session instance + session := &Session{ + Name: "TestSession", + Realtime: false, + BufferCallback: nil, + Callback: nil, + } + + err := session.StartConsumer() + assert.EqualError(t, err, "error loading callback") +} + +func TestStartConsumer_OpenTraceError(t *testing.T) { + // Mock implementation of openTrace + openTrace := func(elf *EventTraceLogfile) (uint64, error) { + return 0, ERROR_ACCESS_DENIED // Mock a valid session handler + } + + // Create a Session instance + session := &Session{ + Name: "TestSession", + Realtime: false, + BufferCallback: nil, + Callback: func(*EventRecord) uintptr { + return 1 + }, + openTrace: openTrace, + } + + err := session.StartConsumer() + assert.EqualError(t, err, "access denied when opening trace: Access is denied.") +} + +func TestStartConsumer_ProcessTraceError(t *testing.T) { + // Mock implementations + openTrace := func(elf *EventTraceLogfile) (uint64, error) { + return 12345, nil // Mock a valid session handler + } + + processTrace := func(handleArray *uint64, handleCount uint32, startTime *FileTime, endTime *FileTime) error { + return ERROR_INVALID_PARAMETER + } + + // Create a Session instance + session := &Session{ + Name: "TestSession", + Realtime: true, + BufferCallback: nil, + Callback: func(*EventRecord) uintptr { + return 1 + }, + openTrace: openTrace, + processTrace: processTrace, + } + + err := session.StartConsumer() + assert.EqualError(t, err, "failed to process trace: The parameter is incorrect.") +} + +func TestStartConsumer_Success(t *testing.T) { + // Mock implementations + openTrace := func(elf *EventTraceLogfile) (uint64, error) { + return 12345, nil // Mock a valid session handler + } + + processTrace := func(handleArray *uint64, handleCount uint32, startTime *FileTime, endTime *FileTime) error { + return nil + } + + // Create a Session instance + session := &Session{ + Name: "TestSession", + Realtime: true, + BufferCallback: nil, + Callback: func(*EventRecord) uintptr { + return 1 + }, + openTrace: openTrace, + processTrace: processTrace, + } + + err := session.StartConsumer() + assert.NoError(t, err) + assert.Equal(t, uint64(12345), session.traceHandler, "traceHandler should be set to the mock value") +} diff --git a/x-pack/libbeat/reader/etw/syscall_advapi32.go b/x-pack/libbeat/reader/etw/syscall_advapi32.go new file mode 100644 index 000000000000..fe44b0022a46 --- /dev/null +++ b/x-pack/libbeat/reader/etw/syscall_advapi32.go @@ -0,0 +1,318 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows + +package etw + +import ( + "errors" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + advapi32 = windows.NewLazySystemDLL("advapi32.dll") + // Controller + startTraceW = advapi32.NewProc("StartTraceW") + enableTraceEx2 = advapi32.NewProc("EnableTraceEx2") // Manifest-based providers and filtering + controlTraceW = advapi32.NewProc("ControlTraceW") + // Consumer + openTraceW = advapi32.NewProc("OpenTraceW") + processTrace = advapi32.NewProc("ProcessTrace") + closeTrace = advapi32.NewProc("CloseTrace") +) + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/ns-evntrace-event_trace +type EventTrace struct { + Header EventTraceHeader + InstanceId uint32 + ParentInstanceId uint32 + ParentGuid windows.GUID + MofData uintptr + MofLength uint32 + UnionCtx uint32 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/ns-evntrace-event_trace_header +type EventTraceHeader struct { + Size uint16 + Union1 uint16 + Union2 uint32 + ThreadId uint32 + ProcessId uint32 + TimeStamp int64 + Union3 [16]byte + Union4 uint64 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/ns-evntrace-event_trace_properties +type EventTraceProperties struct { + Wnode WnodeHeader + BufferSize uint32 + MinimumBuffers uint32 + MaximumBuffers uint32 + MaximumFileSize uint32 + LogFileMode uint32 + FlushTimer uint32 + EnableFlags uint32 + AgeLimit int32 + NumberOfBuffers uint32 + FreeBuffers uint32 + EventsLost uint32 + BuffersWritten uint32 + LogBuffersLost uint32 + RealTimeBuffersLost uint32 + LoggerThreadId syscall.Handle + LogFileNameOffset uint32 + LoggerNameOffset uint32 +} + +// https://learn.microsoft.com/en-us/windows/win32/etw/wnode-header +type WnodeHeader struct { + BufferSize uint32 + ProviderId uint32 + Union1 uint64 + Union2 int64 + Guid windows.GUID + ClientContext uint32 + Flags uint32 +} + +// Used to enable a provider via EnableTraceEx2 +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/ns-evntrace-enable_trace_parameters +type EnableTraceParameters struct { + Version uint32 + EnableProperty uint32 + ControlFlags uint32 + SourceId windows.GUID + EnableFilterDesc *EventFilterDescriptor + FilterDescrCount uint32 +} + +// Defines the filter data that a session passes +// to the provider's enable callback function +// https://learn.microsoft.com/en-us/windows/win32/api/evntprov/ns-evntprov-event_filter_descriptor +type EventFilterDescriptor struct { + Ptr uint64 + Size uint32 + Type uint32 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/ns-evntrace-event_trace_logfilew +type EventTraceLogfile struct { + LogFileName *uint16 // Logfile + LoggerName *uint16 // Real-time session + CurrentTime int64 + BuffersRead uint32 + LogFileMode uint32 + CurrentEvent EventTrace + LogfileHeader TraceLogfileHeader + BufferCallback uintptr + BufferSize uint32 + Filled uint32 + EventsLost uint32 + // Receive events (EventRecordCallback (TDH) or EventCallback) + // Tip: New code should use EventRecordCallback instead of EventCallback. + // The EventRecordCallback receives an EVENT_RECORD which contains + // more complete event information + Callback uintptr + IsKernelTrace uint32 + Context uintptr +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/ns-evntrace-trace_logfile_header +type TraceLogfileHeader struct { + BufferSize uint32 + VersionUnion uint32 + ProviderVersion uint32 + NumberOfProcessors uint32 + EndTime int64 + TimerResolution uint32 + MaximumFileSize uint32 + LogFileMode uint32 + BuffersWritten uint32 + Union1 [16]byte + LoggerName *uint16 + LogFileName *uint16 + TimeZone windows.Timezoneinformation + BootTime int64 + PerfFreq int64 + StartTime int64 + ReservedFlags uint32 + BuffersLost uint32 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/minwinbase/ns-minwinbase-filetime +type FileTime struct { + dwLowDateTime uint32 + dwHighDateTime uint32 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/minwinbase/ns-minwinbase-systemtime +type SystemTime struct { + Year uint16 + Month uint16 + DayOfWeek uint16 + Day uint16 + Hour uint16 + Minute uint16 + Second uint16 + Milliseconds uint16 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/nf-evntrace-enabletrace +const ( + TRACE_LEVEL_NONE = 0 + TRACE_LEVEL_CRITICAL = 1 + TRACE_LEVEL_FATAL = 1 + TRACE_LEVEL_ERROR = 2 + TRACE_LEVEL_WARNING = 3 + TRACE_LEVEL_INFORMATION = 4 + TRACE_LEVEL_VERBOSE = 5 +) + +// https://learn.microsoft.com/en-us/windows/win32/api/evntprov/nc-evntprov-penablecallback +const ( + EVENT_CONTROL_CODE_DISABLE_PROVIDER = 0 + EVENT_CONTROL_CODE_ENABLE_PROVIDER = 1 + EVENT_CONTROL_CODE_CAPTURE_STATE = 2 +) + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/nf-evntrace-controltracea +const ( + EVENT_TRACE_CONTROL_QUERY = 0 + EVENT_TRACE_CONTROL_STOP = 1 + EVENT_TRACE_CONTROL_UPDATE = 2 + EVENT_TRACE_CONTROL_FLUSH = 3 +) + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/ns-evntrace-event_trace_logfilea +const ( + PROCESS_TRACE_MODE_REAL_TIME = 0x00000100 + PROCESS_TRACE_MODE_RAW_TIMESTAMP = 0x00001000 + PROCESS_TRACE_MODE_EVENT_RECORD = 0x10000000 +) + +const INVALID_PROCESSTRACE_HANDLE = 0xFFFFFFFFFFFFFFFF + +// https://learn.microsoft.com/en-us/windows/win32/debug/system-error-codes +const ( + ERROR_ACCESS_DENIED syscall.Errno = 5 + ERROR_INVALID_HANDLE syscall.Errno = 6 + ERROR_BAD_LENGTH syscall.Errno = 24 + ERROR_INVALID_PARAMETER syscall.Errno = 87 + ERROR_INSUFFICIENT_BUFFER syscall.Errno = 122 + ERROR_BAD_PATHNAME syscall.Errno = 161 + ERROR_ALREADY_EXISTS syscall.Errno = 183 + ERROR_NOT_FOUND syscall.Errno = 1168 + ERROR_NO_SYSTEM_RESOURCES syscall.Errno = 1450 + ERROR_TIMEOUT syscall.Errno = 1460 + ERROR_WMI_INSTANCE_NOT_FOUND syscall.Errno = 4201 + ERROR_CTX_CLOSE_PENDING syscall.Errno = 7007 + ERROR_EVT_INVALID_EVENT_DATA syscall.Errno = 15005 +) + +// https://learn.microsoft.com/en-us/windows/win32/etw/logging-mode-constants (to extend modes) +// https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/wmistr/ns-wmistr-_wnode_header (to extend flags) +const ( + WNODE_FLAG_ALL_DATA = 0x00000001 + WNODE_FLAG_TRACED_GUID = 0x00020000 + EVENT_TRACE_REAL_TIME_MODE = 0x00000100 +) + +// Wrappers + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/nf-evntrace-starttracew +func _StartTrace(traceHandle *uintptr, + instanceName *uint16, + properties *EventTraceProperties) error { + r0, _, _ := startTraceW.Call( + uintptr(unsafe.Pointer(traceHandle)), + uintptr(unsafe.Pointer(instanceName)), + uintptr(unsafe.Pointer(properties))) + if r0 == 0 { + return nil + } + return syscall.Errno(r0) +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/nf-evntrace-enabletraceex2 +func _EnableTraceEx2(traceHandle uintptr, + providerId *windows.GUID, + isEnabled uint32, + level uint8, + matchAnyKeyword uint64, + matchAllKeyword uint64, + enableProperty uint32, + enableParameters *EnableTraceParameters) error { + r0, _, _ := enableTraceEx2.Call( + traceHandle, + uintptr(unsafe.Pointer(providerId)), + uintptr(isEnabled), + uintptr(level), + uintptr(matchAnyKeyword), + uintptr(matchAllKeyword), + uintptr(enableProperty), + uintptr(unsafe.Pointer(enableParameters))) + if r0 == 0 { + return nil + } + return syscall.Errno(r0) +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/nf-evntrace-controltracew +func _ControlTrace(traceHandle uintptr, + instanceName *uint16, + properties *EventTraceProperties, + controlCode uint32) error { + r0, _, _ := controlTraceW.Call( + traceHandle, + uintptr(unsafe.Pointer(instanceName)), + uintptr(unsafe.Pointer(properties)), + uintptr(controlCode)) + if r0 == 0 { + return nil + } + return syscall.Errno(r0) +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/nf-evntrace-opentracew +func _OpenTrace(logfile *EventTraceLogfile) (uint64, error) { + r0, _, err := openTraceW.Call( + uintptr(unsafe.Pointer(logfile))) + var errno syscall.Errno + if errors.As(err, &errno) && errno == 0 { + return uint64(r0), nil + } + return uint64(r0), err +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/nf-evntrace-processtrace +func _ProcessTrace(handleArray *uint64, + handleCount uint32, + startTime *FileTime, + endTime *FileTime) error { + r0, _, _ := processTrace.Call( + uintptr(unsafe.Pointer(handleArray)), + uintptr(handleCount), + uintptr(unsafe.Pointer(startTime)), + uintptr(unsafe.Pointer(endTime))) + if r0 == 0 { + return nil + } + return syscall.Errno(r0) +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntrace/nf-evntrace-closetrace +func _CloseTrace(traceHandle uint64) error { + r0, _, _ := closeTrace.Call( + uintptr(traceHandle)) + if r0 == 0 { + return nil + } + return syscall.Errno(r0) +} diff --git a/x-pack/libbeat/reader/etw/syscall_tdh.go b/x-pack/libbeat/reader/etw/syscall_tdh.go new file mode 100644 index 000000000000..73551ee123e2 --- /dev/null +++ b/x-pack/libbeat/reader/etw/syscall_tdh.go @@ -0,0 +1,323 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows + +package etw + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + tdh = windows.NewLazySystemDLL("tdh.dll") + tdhEnumerateProviders = tdh.NewProc("TdhEnumerateProviders") + tdhGetEventInformation = tdh.NewProc("TdhGetEventInformation") + tdhGetEventMapInformation = tdh.NewProc("TdhGetEventMapInformation") + tdhFormatProperty = tdh.NewProc("TdhFormatProperty") + tdhGetProperty = tdh.NewProc("TdhGetProperty") +) + +const anysizeArray = 1 +const DEFAULT_PROPERTY_BUFFER_SIZE = 256 + +// https://learn.microsoft.com/en-us/windows/win32/api/tdh/ns-tdh-provider_enumeration_info +type ProviderEnumerationInfo struct { + NumberOfProviders uint32 + Reserved uint32 + TraceProviderInfoArray [anysizeArray]TraceProviderInfo +} + +// https://learn.microsoft.com/en-us/windows/win32/api/tdh/ns-tdh-trace_provider_info +type TraceProviderInfo struct { + ProviderGuid windows.GUID + SchemaSource uint32 + ProviderNameOffset uint32 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntcons/ns-evntcons-event_record +type EventRecord struct { + EventHeader EventHeader + BufferContext EtwBufferContext + ExtendedDataCount uint16 + UserDataLength uint16 + ExtendedData *EventHeaderExtendedDataItem + UserData uintptr // Event data + UserContext uintptr +} + +// https://learn.microsoft.com/en-us/windows/win32/api/relogger/ns-relogger-event_header +const ( + EVENT_HEADER_FLAG_STRING_ONLY = 0x0004 + EVENT_HEADER_FLAG_32_BIT_HEADER = 0x0020 + EVENT_HEADER_FLAG_64_BIT_HEADER = 0x0040 +) + +// https://learn.microsoft.com/en-us/windows/win32/api/relogger/ns-relogger-event_header +type EventHeader struct { + Size uint16 + HeaderType uint16 + Flags uint16 + EventProperty uint16 + ThreadId uint32 + ProcessId uint32 + TimeStamp int64 + ProviderId windows.GUID + EventDescriptor EventDescriptor + Time int64 + ActivityId windows.GUID +} + +func (e *EventRecord) pointerSize() uint32 { + if e.EventHeader.Flags&EVENT_HEADER_FLAG_32_BIT_HEADER == EVENT_HEADER_FLAG_32_BIT_HEADER { + return 4 + } + return 8 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntprov/ns-evntprov-event_descriptor +type EventDescriptor struct { + Id uint16 + Version uint8 + Channel uint8 + Level uint8 + Opcode uint8 + Task uint16 + Keyword uint64 +} + +// https://learn.microsoft.com/en-us/windows/desktop/api/relogger/ns-relogger-etw_buffer_context +type EtwBufferContext struct { + Union uint16 + LoggerId uint16 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/evntcons/ns-evntcons-event_header_extended_data_item +type EventHeaderExtendedDataItem struct { + Reserved1 uint16 + ExtType uint16 + InternalStruct uint16 + DataSize uint16 + DataPtr uint64 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/tdh/ns-tdh-tdh_context +type TdhContext struct { + ParameterValue uint32 + ParameterType int32 + ParameterSize uint32 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/tdh/ns-tdh-trace_event_info +type TraceEventInfo struct { + ProviderGUID windows.GUID + EventGUID windows.GUID + EventDescriptor EventDescriptor + DecodingSource DecodingSource + ProviderNameOffset uint32 + LevelNameOffset uint32 + ChannelNameOffset uint32 + KeywordsNameOffset uint32 + TaskNameOffset uint32 + OpcodeNameOffset uint32 + EventMessageOffset uint32 + ProviderMessageOffset uint32 + BinaryXMLOffset uint32 + BinaryXMLSize uint32 + ActivityIDNameOffset uint32 + RelatedActivityIDNameOffset uint32 + PropertyCount uint32 + TopLevelPropertyCount uint32 + Flags TemplateFlags + EventPropertyInfoArray [anysizeArray]EventPropertyInfo +} + +// https://learn.microsoft.com/en-us/windows/desktop/api/tdh/ns-tdh-event_property_info +type EventPropertyInfo struct { + Flags PropertyFlags + NameOffset uint32 + TypeUnion struct { + u1 uint16 + u2 uint16 + u3 uint32 + } + CountUnion uint16 + LengthUnion uint16 + ResTagUnion uint32 +} + +func (i *EventPropertyInfo) count() uint16 { + return i.CountUnion +} + +func (i *EventPropertyInfo) length() uint16 { + return i.LengthUnion +} + +func (i *EventPropertyInfo) inType() uint16 { + return i.TypeUnion.u1 +} + +func (i *EventPropertyInfo) outType() uint16 { + return i.TypeUnion.u2 +} + +func (i *EventPropertyInfo) structStartIndex() uint16 { + return i.inType() +} + +func (i *EventPropertyInfo) numOfStructMembers() uint16 { + return i.outType() +} + +func (i *EventPropertyInfo) mapNameOffset() uint32 { + return i.TypeUnion.u3 +} + +const ( + TdhIntypeBinary = 14 + TdhOuttypeIpv6 = 24 +) + +type DecodingSource int32 +type TemplateFlags int32 + +type PropertyFlags int32 + +// https://learn.microsoft.com/en-us/windows/win32/api/tdh/ne-tdh-property_flags +const ( + PropertyStruct = PropertyFlags(0x1) + PropertyParamLength = PropertyFlags(0x2) + PropertyParamCount = PropertyFlags(0x4) +) + +// https://learn.microsoft.com/en-us/windows/win32/api/tdh/ns-tdh-event_map_info +type EventMapInfo struct { + NameOffset uint32 + Flag MapFlags + EntryCount uint32 + Union uint32 + MapEntryArray [anysizeArray]EventMapEntry +} + +type MapFlags int32 + +// https://learn.microsoft.com/en-us/windows/win32/api/tdh/ns-tdh-event_map_entry +type EventMapEntry struct { + OutputOffset uint32 + Union uint32 +} + +// https://learn.microsoft.com/en-us/windows/desktop/api/tdh/ns-tdh-property_data_descriptor +type PropertyDataDescriptor struct { + PropertyName unsafe.Pointer + ArrayIndex uint32 + Reserved uint32 +} + +// enumerateProvidersFunc is used to replace the pointer to the function in unit tests +var enumerateProvidersFunc = _TdhEnumerateProviders + +// https://learn.microsoft.com/en-us/windows/win32/api/tdh/nf-tdh-tdhenumerateproviders +func _TdhEnumerateProviders( + pBuffer *ProviderEnumerationInfo, + pBufferSize *uint32) error { + r0, _, _ := tdhEnumerateProviders.Call( + uintptr(unsafe.Pointer(pBuffer)), + uintptr(unsafe.Pointer(pBufferSize))) + if r0 == 0 { + return nil + } + return syscall.Errno(r0) +} + +// https://learn.microsoft.com/en-us/windows/win32/api/tdh/nf-tdh-tdhgeteventinformation +func _TdhGetEventInformation(pEvent *EventRecord, + tdhContextCount uint32, + pTdhContext *TdhContext, + pBuffer *TraceEventInfo, + pBufferSize *uint32) error { + r0, _, _ := tdhGetEventInformation.Call( + uintptr(unsafe.Pointer(pEvent)), + uintptr(tdhContextCount), + uintptr(unsafe.Pointer(pTdhContext)), + uintptr(unsafe.Pointer(pBuffer)), + uintptr(unsafe.Pointer(pBufferSize))) + if r0 == 0 { + return nil + } + return syscall.Errno(r0) +} + +// https://learn.microsoft.com/en-us/windows/win32/api/tdh/nf-tdh-tdhformatproperty +func _TdhFormatProperty( + eventInfo *TraceEventInfo, + mapInfo *EventMapInfo, + pointerSize uint32, + propertyInType uint16, + propertyOutType uint16, + propertyLength uint16, + userDataLength uint16, + userData *byte, + bufferSize *uint32, + buffer *uint8, + userDataConsumed *uint16) error { + r0, _, _ := tdhFormatProperty.Call( + uintptr(unsafe.Pointer(eventInfo)), + uintptr(unsafe.Pointer(mapInfo)), + uintptr(pointerSize), + uintptr(propertyInType), + uintptr(propertyOutType), + uintptr(propertyLength), + uintptr(userDataLength), + uintptr(unsafe.Pointer(userData)), + uintptr(unsafe.Pointer(bufferSize)), + uintptr(unsafe.Pointer(buffer)), + uintptr(unsafe.Pointer(userDataConsumed))) + if r0 == 0 { + return nil + } + return syscall.Errno(r0) +} + +// https://learn.microsoft.com/en-us/windows/win32/api/tdh/nf-tdh-tdhgetproperty +func _TdhGetProperty(pEvent *EventRecord, + tdhContextCount uint32, + pTdhContext *TdhContext, + propertyDataCount uint32, + pPropertyData *PropertyDataDescriptor, + bufferSize uint32, + pBuffer *byte) error { + r0, _, _ := tdhGetProperty.Call( + uintptr(unsafe.Pointer(pEvent)), + uintptr(tdhContextCount), + uintptr(unsafe.Pointer(pTdhContext)), + uintptr(propertyDataCount), + uintptr(unsafe.Pointer(pPropertyData)), + uintptr(bufferSize), + uintptr(unsafe.Pointer(pBuffer))) + if r0 == 0 { + return nil + } + return syscall.Errno(r0) +} + +// https://learn.microsoft.com/en-us/windows/win32/api/tdh/nf-tdh-tdhgeteventmapinformation +func _TdhGetEventMapInformation(pEvent *EventRecord, + pMapName *uint16, + pBuffer *EventMapInfo, + pBufferSize *uint32) error { + r0, _, _ := tdhGetEventMapInformation.Call( + uintptr(unsafe.Pointer(pEvent)), + uintptr(unsafe.Pointer(pMapName)), + uintptr(unsafe.Pointer(pBuffer)), + uintptr(unsafe.Pointer(pBufferSize))) + if r0 == 0 { + return nil + } + return syscall.Errno(r0) +} From 7764521270e6f7638196b210d8bb4523db6ae1f8 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 7 Feb 2024 21:43:32 -0500 Subject: [PATCH 128/129] Add MSI installer change to 8.12 release notes. (#37915) (#37916) (cherry picked from commit 2414d9579af907d3ea4dcc99e59ab9c6428d8324) Co-authored-by: Craig MacKenzie --- CHANGELOG.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index cb5aa3b6354b..8b8e799f8af1 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -74,6 +74,7 @@ If you are not using the Elasticsearch output, set `queue.mem.flush.timeout: 1s` *Heartbeat* - Decrease the ES default timeout to 10 for the load monitor state requests. +- Windows MSI installers now store configuration in C:\Program Files instead of C:\ProgramData. https://github.com/elastic/elastic-stack-installers/pull/209 *Osquerybeat* From b7fc69a3ee3211300be9432a622a2ff0d26f45ff Mon Sep 17 00:00:00 2001 From: Chris Berkhout Date: Thu, 8 Feb 2024 10:08:15 +0100 Subject: [PATCH 129/129] [filebeat][threatintel] MISP pagination fixes (#37898) Update the HTTP JSON input configuration for the Threat Intel module's misp fileset with pagination fixes that were done earlier in the Agent-based MISP integration, in these PRs: - Fix timestamp format sent to API https://github.com/elastic/integrations/pull/6482 - Fix duplicate requests for page 1 https://github.com/elastic/integrations/pull/6495 - Keep the same timestamp for later pages https://github.com/elastic/integrations/pull/6649 - Pagination fixes https://github.com/elastic/integrations/pull/9073 --- CHANGELOG.next.asciidoc | 1 + .../module/threatintel/misp/config/config.yml | 25 ++++++++++++++++--- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 8281f7b79ecb..5c9d49a5e1c5 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -89,6 +89,7 @@ fields added to events containing the Beats version. {pull}37553[37553] - Fix m365_defender cursor value and query building. {pull}37116[37116] - Fix TCP/UDP metric queue length parsing base. {pull}37714[37714] - Update github.com/lestrrat-go/jwx dependency. {pull}37799[37799] +- [threatintel] MISP pagination fixes {pull}37898[37898] *Heartbeat* diff --git a/x-pack/filebeat/module/threatintel/misp/config/config.yml b/x-pack/filebeat/module/threatintel/misp/config/config.yml index 3bd5aac30ec8..9ad66efcf545 100644 --- a/x-pack/filebeat/module/threatintel/misp/config/config.yml +++ b/x-pack/filebeat/module/threatintel/misp/config/config.yml @@ -32,8 +32,20 @@ request.transforms: value: json - set: target: body.timestamp - value: '[[.cursor.timestamp]]' - default: '[[ formatDate (now (parseDuration "-{{ .first_interval }}")) "UnixDate" ]]' + value: >- + [[- if index .cursor "timestamp" -]] + [[- .cursor.timestamp -]] + [[- else -]] + [[- .last_response.url.params.Get "timestamp" -]] + [[- end -]] + default: '[[ (now (parseDuration "-{{ .first_interval }}")).Unix ]]' +- set: + target: body.order + value: timestamp +- set: + # Ignored by MISP, set as a workaround to make it available in response.pagination. + target: url.params.timestamp + value: '[[.body.timestamp]]' response.split: target: body.response @@ -51,8 +63,15 @@ response.request_body_on_pagination: true response.pagination: - set: target: body.page - value: '[[if (ne (len .last_response.body.response) 0)]][[add .last_response.page 1]][[end]]' + # Add 2 because the httpjson page counter is zero-based while the MISP page parameter starts at 1. + value: '[[if (ne (len .last_response.body.response) 0)]][[add .last_response.page 2]][[end]]' fail_on_template_error: true +- set: + target: body.timestamp + value: '[[.last_response.url.params.Get "timestamp"]]' +- set: + target: url.params.timestamp + value: '[[.last_response.url.params.Get "timestamp"]]' cursor: timestamp: value: '[[.last_event.Event.timestamp]]'