diff --git a/.ci/bump-golang-7.17.yml b/.ci/bump-golang-7.17.yml new file mode 100644 index 00000000000..c447bca878e --- /dev/null +++ b/.ci/bump-golang-7.17.yml @@ -0,0 +1,144 @@ +--- +name: Bump golang-version to latest version in 7.17 +pipelineid: "bump-golang-version-7.17" + +scms: + githubConfig: + kind: github + spec: + user: '{{ requiredEnv "GIT_USER" }}' + email: '{{ requiredEnv "GIT_EMAIL" }}' + owner: elastic + repository: beats + token: '{{ requiredEnv "GITHUB_TOKEN" }}' + username: '{{ requiredEnv "GIT_USER" }}' + branch: 7.17 + +actions: + beats: + kind: github/pullrequest + scmid: githubConfig + sourceid: latestGoVersion + title: '[Automation][7.17] Bump Golang version to {{ source "latestGoVersion" }}' + spec: + automerge: false + labels: + - dependencies + - backport-skip + description: | + Generated automatically with {{ requiredEnv "JOB_URL" }} + +sources: + minor: + name: Get minor version in .go-version + kind: shell + transformers: + - findsubmatch: + pattern: '^\d+.(\d+).\d+$' + captureindex: 1 + spec: + command: cat .go-version + + latestGoVersion: + name: Get Latest Go Release + kind: githubrelease + dependson: + - minor + transformers: + - trimprefix: v + spec: + owner: elastic + repository: golang-crossbuild + token: '{{ requiredEnv "GITHUB_TOKEN" }}' + username: '{{ requiredEnv "GIT_USER" }}' + versionfilter: + kind: regex + pattern: v1\.{{ source "minor" }}\.(\d*)$ + + gomod: + dependson: + - latestGoVersion + name: Get version in go.mod format + kind: shell + transformers: + - findsubmatch: + pattern: '^(\d+.\d+).\d+' + captureindex: 1 + spec: + command: echo {{ source "latestGoVersion" }} + +conditions: + dockerTag: + name: Is docker image golang:{{ source "latestGoVersion" }} published + kind: dockerimage + spec: + image: golang + tag: '{{ source "latestGoVersion" }}' + sourceid: latestGoVersion + + goDefaultVersion-check: + name: Check if defined golang version differs + kind: shell + sourceid: latestGoVersion + spec: + command: 'grep -v -q {{ source "latestGoVersion" }} .go-version #' + +targets: + update-go-version: + name: "Update .go-version" + sourceid: latestGoVersion + scmid: githubConfig + kind: file + spec: + content: '{{ source "latestGoVersion" }}' + file: .go-version + matchpattern: '\d+.\d+.\d+' + update-golang.ci: + name: "Update .golangci.yml" + sourceid: latestGoVersion + scmid: githubConfig + kind: file + spec: + content: '{{ source "latestGoVersion" }}' + file: .golangci.yml + matchpattern: '\d+.\d+.\d+' + update-version.asciidoc: + name: "Update version.asciidoc" + sourceid: latestGoVersion + scmid: githubConfig + kind: file + spec: + content: ':go-version: {{ source "latestGoVersion" }}' + file: libbeat/docs/version.asciidoc + matchpattern: ':go-version: \d+.\d+.\d+' + update-dockerfiles: + name: "Update from dockerfiles" + sourceid: latestGoVersion + scmid: githubConfig + kind: file + spec: + content: 'FROM golang:{{ source "latestGoVersion" }}' + # This list differs from the main branch, this is the main reason we have a separate job + files: + - ./metricbeat/Dockerfile + - ./metricbeat/module/vsphere/_meta/Dockerfile + - ./metricbeat/module/nats/_meta/Dockerfile + - ./metricbeat/module/http/_meta/Dockerfile + - ./filebeat/Dockerfile + - ./auditbeat/Dockerfile + - ./heartbeat/Dockerfile + - ./packetbeat/Dockerfile + - ./libbeat/Dockerfile + - ./x-pack/metricbeat/module/stan/_meta/Dockerfile + - ./x-pack/functionbeat/Dockerfile + - ./x-pack/libbeat/Dockerfile + matchpattern: 'FROM golang:\d+.\d+.\d+' + update-gomod: + name: "Update go.mod" + sourceid: gomod + scmid: githubConfig + kind: file + spec: + content: 'go {{ source "gomod" }}' + file: go.mod + matchpattern: 'go \d+.\d+' diff --git a/.ci/bump-golang.yml b/.ci/bump-golang.yml index c4e33ee231c..668e1137363 100644 --- a/.ci/bump-golang.yml +++ b/.ci/bump-golang.yml @@ -124,6 +124,13 @@ targets: - ./metricbeat/Dockerfile - ./packetbeat/Dockerfile - ./x-pack/functionbeat/Dockerfile + - ./metricbeat/module/nats/_meta/Dockerfile + - ./metricbeat/module/http/_meta/Dockerfile + - ./metricbeat/module/vsphere/_meta/Dockerfile + - ./dev-tools/kubernetes/metricbeat/Dockerfile.debug + - ./dev-tools/kubernetes/filebeat/Dockerfile.debug + - ./dev-tools/kubernetes/heartbeat/Dockerfile.debug + - ./x-pack/metricbeat/module/stan/_meta/Dockerfile matchpattern: 'FROM golang:\d+.\d+.\d+' update-gomod: name: "Update go.mod" diff --git a/.ci/jobs/docker-compose.yml b/.ci/jobs/docker-compose.yml new file mode 100644 index 00000000000..e9fc43ff704 --- /dev/null +++ b/.ci/jobs/docker-compose.yml @@ -0,0 +1,23 @@ +version: '2.3' +services: + # This is a proxy used to block beats until all services are healthy. + # See: https://github.com/docker/compose/issues/4369 + proxy_dep: + image: busybox + depends_on: + localstack: { condition: service_healthy } + + localstack: + container_name: "${localstack_integration_test_container}" + image: localstack/localstack:2.1.0 # Latest stable release + ports: + - "127.0.0.1:4566:4566" # LocalStack Gateway + environment: + - DEBUG=1 + - DOCKER_HOST=unix:///var/run/docker.sock + - LOCALSTACK_HOST=localhost + - S3_HOSTNAME=localhost + - PROVIDER_OVERRIDE_S3=asf + volumes: + - "${LOCALSTACK_VOLUME_DIR:-./volume}:/var/lib/localstack" + - "/var/run/docker.sock:/var/run/docker.sock" diff --git a/.ci/scripts/docker-services-cleanup.sh b/.ci/scripts/docker-services-cleanup.sh new file mode 100755 index 00000000000..cc182413a2e --- /dev/null +++ b/.ci/scripts/docker-services-cleanup.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +set -exuo pipefail + +${HOME}/bin/docker-compose -f .ci/jobs/docker-compose.yml down -v + +exit $? diff --git a/.ci/scripts/install-docker-services.sh b/.ci/scripts/install-docker-services.sh new file mode 100755 index 00000000000..420362f8355 --- /dev/null +++ b/.ci/scripts/install-docker-services.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +set -exuo pipefail + +${HOME}/bin/docker-compose -f .ci/jobs/docker-compose.yml up -d + +exit $? diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 13edaaaf091..2ee0bafb361 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -155,6 +155,7 @@ CHANGELOG* /x-pack/filebeat/module/proofpoint @elastic/security-external-integrations /x-pack/filebeat/module/rabbitmq @elastic/obs-infraobs-integrations /x-pack/filebeat/module/radware @elastic/security-external-integrations +/x-pack/filebeat/module/salesforce @elastic/obs-infraobs-integrations /x-pack/filebeat/module/snort @elastic/security-external-integrations /x-pack/filebeat/module/snyk @elastic/security-external-integrations /x-pack/filebeat/module/sonicwall @elastic/security-external-integrations diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index ef4ecda8836..93e40cb3be6 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -34,6 +34,7 @@ List here all the items you have verified BEFORE sending this PR. Please DO NOT - [ ] I have made corresponding change to the default configuration files - [ ] I have added tests that prove my fix is effective or that my feature works - [ ] I have added an entry in `CHANGELOG.next.asciidoc` or `CHANGELOG-developer.next.asciidoc`. +- [ ] I have made my commit title and message explanatory about the purpose and the reason of the change ## Author's Checklist diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 0238179f29c..fe78effef90 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -41,11 +41,13 @@ updates: - dependabot - Team:Security-External Integrations allow: - # Skip github.com/elastic/mito because it requires documentation updates. - dependency-name: github.com/elastic/go-libaudit/* - dependency-name: github.com/elastic/go-perf - dependency-name: github.com/elastic/go-seccomp-bpf - dependency-name: github.com/elastic/toutoumomoma + ignore: + # Skip github.com/elastic/mito because it requires documentation updates. + - dependency-name: github.com/elastic/mito reviewers: - elastic/security-external-integrations open-pull-requests-limit: 2 diff --git a/.github/workflows/bump-golang.yml b/.github/workflows/bump-golang.yml index edc295724c2..f0cdb0607f9 100644 --- a/.github/workflows/bump-golang.yml +++ b/.github/workflows/bump-golang.yml @@ -2,8 +2,9 @@ name: bump-golang on: + workflow_dispatch: schedule: - - cron: '0 20 * * 6' + - cron: "0 20 * * 6" permissions: contents: read @@ -15,7 +16,6 @@ jobs: bump: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - uses: elastic/apm-pipeline-library/.github/actions/updatecli@current @@ -24,3 +24,10 @@ jobs: vaultRoleId: ${{ secrets.VAULT_ROLE_ID }} vaultSecretId: ${{ secrets.VAULT_SECRET_ID }} pipeline: ./.ci/bump-golang.yml + + - uses: elastic/apm-pipeline-library/.github/actions/updatecli@current + with: + vaultUrl: ${{ secrets.VAULT_ADDR }} + vaultRoleId: ${{ secrets.VAULT_ROLE_ID }} + vaultSecretId: ${{ secrets.VAULT_SECRET_ID }} + pipeline: ./.ci/bump-golang-7.17.yml diff --git a/.go-version b/.go-version index 88ebadf2c32..e54f3135a7d 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.19.10 +1.19.12 diff --git a/.golangci.yml b/.golangci.yml index 2cd359352da..2e6e8ba290f 100755 --- a/.golangci.yml +++ b/.golangci.yml @@ -62,6 +62,7 @@ linters: - unconvert # Remove unnecessary type conversions - wastedassign # wastedassign finds wasted assignment statements. - gomodguard # check for blocked dependencies + - prealloc # Finds slice declarations that could potentially be pre-allocated # all available settings of specific linters linters-settings: @@ -108,7 +109,7 @@ linters-settings: gosimple: # Select the Go version to target. The default is '1.13'. - go: "1.19.10" + go: "1.19.12" nakedret: # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 @@ -126,19 +127,19 @@ linters-settings: staticcheck: # Select the Go version to target. The default is '1.13'. - go: "1.19.10" + go: "1.19.12" checks: ["all"] stylecheck: # Select the Go version to target. The default is '1.13'. - go: "1.19.10" + go: "1.19.12" # Disabled: # ST1005: error strings should not be capitalized checks: ["all", "-ST1005"] unused: # Select the Go version to target. The default is '1.13'. - go: "1.19.10" + go: "1.19.12" gosec: excludes: @@ -147,3 +148,17 @@ linters-settings: - G401 # Detect the usage of DES, RC4, MD5 or SHA1: Used in non-crypto contexts. - G501 # Import blocklist: crypto/md5: Used in non-crypto contexts. - G505 # Import blocklist: crypto/sha1: Used in non-crypto contexts. + + prealloc: + # IMPORTANT: we don't recommend using this linter before doing performance profiling. + # For most programs usage of prealloc will be a premature optimization. + + # Report pre-allocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them. + # Default: true + simple: false + # Report pre-allocation suggestions on range loops. + # Default: true + range-loops: true + # Report pre-allocation suggestions on for loops. + # Default: false + for-loops: true diff --git a/.mergify.yml b/.mergify.yml index 8f93dc9301c..e0ce720375e 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -250,3 +250,16 @@ pull_request_rules: labels: - "backport" title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" + - name: backport patches to 8.9 branch + conditions: + - merged + - label=backport-v8.9.0 + actions: + backport: + assignees: + - "{{ author }}" + branches: + - "8.9" + labels: + - "backport" + title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" diff --git a/CHANGELOG-developer.next.asciidoc b/CHANGELOG-developer.next.asciidoc index fc28efdbc5f..d7323a2081b 100644 --- a/CHANGELOG-developer.next.asciidoc +++ b/CHANGELOG-developer.next.asciidoc @@ -83,6 +83,7 @@ The list below covers the major changes between 7.0.0-rc2 and main only. - Fix the multiple host support for mongodb module {pull}34624[34624] - Skip HTTPJSON flakey test. {issue}34929[34929] {pull}35138[35138] - Fix ingest pipeline for panw module to parse url scheme correctly {pull}35757[35757] +- Renamed an httpjson input metric to follow naming conventions. `httpjson_interval_pages_total` was renamed to `httpjson_interval_pages` because the `_total` suffix is reserved for counters. {issue}35933[35933] {pull}36169[36169] ==== Added @@ -162,6 +163,8 @@ The list below covers the major changes between 7.0.0-rc2 and main only. - Add benchmarking to HTTPJSON input testing. {pull}35138[35138] - Allow non-AWS endpoints for testing Filebeat awss3 input. {issue}35496[35496] {pull}35520[35520] - Add AUTH (username) and SSL/TLS support for Redis module {pull}35240[35240] +- Pin PyYAML version to 5.3.1 to avoid CI errors temporarily {pull}36091[36091] +- Skip dependabot updates for github.com/elastic/mito. {pull}36158[36158] ==== Deprecated diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index f49c6480c14..0f651fc31a7 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -3,6 +3,145 @@ :issue: https://github.com/elastic/beats/issues/ :pull: https://github.com/elastic/beats/pull/ +[[release-notes-8.9.0]] +=== Beats version 8.9.0 +https://github.com/elastic/beats/compare/v8.8.2\...v8.9.0[View commits] + +==== Bugfixes + +*Affecting all Beats* + +- Do not print context cancelled error message when running under agent {pull}36006[36006] +- Fix recovering from invalid output configuration when running under Elastic-Agent {pull}36016[36016] + +*Filebeat* + +- Add support in s3 input for JSON with array of objects. {pull}35475[35475] +- RFC5424 syslog timestamps with offset 'Z' will be treated as UTC rather than using the default timezone. {pull}35360[35360] +- Fixed a minor code error in the GCS input scheduler where a config value was being used directly instead of the source struct. {pull}35729[35729] +- Fix CEL input JSON marshalling of nested objects. {issue}35763[35763] {pull}35774[35774] +- Fix metric collection in GCPPubSub input. {pull}35773[35773] +- Fix end point deregistration in http_endpoint input. {issue}35899[35899] {pull}35903[35903] +- Fix duplicate ID panic in filestream metrics. {issue}35964[35964] {pull}35972[35972] +- Improve error reporting and fix IPv6 handling of TCP and UDP metric collection. {pull}35996[35996] +- Fix handling of NUL-terminated log lines in Fortinet Firewall module. {issue}36026[36026] {pull}36027[36027] +- Make redact field configuration recommended in CEL input and log warning if missing. {pull}36008[36008] +- Fix handling of region name configuration in awss3 input {pull}36034[36034] + +*Heartbeat* + +- Update gval version. {pull}35636[35636] +- Filter dev flags for ui monitors inside synthetics_args. {pull}35788[35788] +- Fix temp dir running out of space with project monitors. {issue}35843[35843] + +*Metricbeat* + +- Fix no error logs displayed in CloudWatch EC2, RDS and SQS metadata {issue}34985[34985] {pull}35035[35035] +- Remove Beta warning from IIS application_pool metricset {pull}35480[35480] +- Improve documentation for ActiveMQ module {issue}35113[35113] {pull}35558[35558] +- Resolve statsd module's prematurely halting of metrics parsing upon encountering an invalid packet. {pull}35075[35075] + +*Packetbeat* + +- Fix double channel close panic when reloading. {pull}35324[35324] + +*Winlogbeat* + +- Prevent panic on closing iterators on empty channels in experimental API. {issue}33966[33966] {pull}35423[35423] + +==== Added + +*Affecting all Beats* + +- Add Hetzner Cloud as a provider for `add_cloud_metadata`. {pull}35456[35456] +- Upgrade version of elastic-agent-autodiscover to v0.6.1 for improved memory consumption on k8s. {pull}35483[35483] +- Added `orchestrator.cluster.id` and `orchestrator.cluster.name` fields to the add_cloud_metadata processor, AWS cloud provider. {pull}35182[35182] +- Lowercase reported hostnames per Elastic Common Schema (ECS) guidelines for the host.name field. Upgraded github.com/elastic/go-sysinfo to 1.11.0. {pull}35652[35652] + +*Filebeat* + +- Added support for decoding apache parquet files in awss3 input. {issue}34662[34662] {pull}35578[35578] +- Add support for CRC validation in Filebeat's HTTP endpoint input. {pull}35204[35204] +- Add support for CRC validation in Zoom module. {pull}35604[35604] +- Add execution budget to CEL input. {pull}35409[35409] +- Add XML decoding support to HTTPJSON. {issue}34438[34438] {pull}35235[35235] +- Add delegated account support when using Google ADC in `httpjson` input. {pull}35507[35507] +- Add metrics for filestream input. {pull}35529[35529] +- Add support for collecting `httpjson` metrics. {pull}35392[35392] +- Add XML decoding support to CEL. {issue}34438[34438] {pull}35372[35372] +- Mark CEL input as GA. {pull}35559[35559] +- Add metrics for gcp-pubsub input. {pull}35614[35614] +- Allow non-AWS endpoints for awss3 input. {issue}35496[35496] {pull}35520[35520] +- Add Okta input package for entity analytics. {pull}35611[35611] +- Expose harvester metrics from filestream input {pull}35835[35835] {issue}33771[33771] +- Add device support for Azure AD entity analytics. {pull}35807[35807] + +*Libbeat* +- Added support for Apache Parquet file reader. {issue}34662[34662] {pull}35183[35183] + +*Metricbeat* + +- Add GCP Redis metadata {pull}33701[33701] +- Migrate Azure Billing, Monitor, and Storage metricsets to the newer SDK. {pull}33585[33585] +- Add support for float64 values parsing for statsd metrics of counter type. {pull}35099[35099] + +*Packetbeat* + +- Added `packetbeat.interfaces.fanout_group` to allow a Packetbeat sniffer to join an AF_PACKET fanout group. {issue}35451[35451] {pull}35453[35453] + +*Winlogbeat* + +- Set `host.os.type` and `host.os.family` to "windows" if not already set. {pull}35435[35435] +- Handle empty DNS answer data in QueryResults for the Sysmon Pipeline {pull}35207[35207] + +[[release-notes-8.8.2]] +=== Beats version 8.8.2 +https://github.com/elastic/beats/compare/v8.8.1\...v8.8.2[View commits] + +==== Bugfixes + +*Affecting all Beats* + +- Make sure k8s watchers are closed when closing k8s meta processor. {pull}35630[35630] +- Upgraded Apache Arrow library used in `x-pack/libbeat/reader/parquet` from v11 to v12.0.1 in order to fix cross-compilation issues. {pull}35640[35640] +- Fix panic when the disk queue's MaxRetryInterval configuration is specified, but RetryInterval is not. {pull}35820[35820] + +*Filebeat* + +- Fix syslog message parsing for fortinet.firewall to take into account quoted values. {pull}35522[35522] +- [Filebeat GCS input] Fixed an issue where `bucket_timeout` was being applied to the entire bucket poll interval and not individual bucket object read operations. Fixed a map write concurrency issue arising from data races when using a high number of workers. Fixed the flaky tests that were present in the GCS test suit. {pull}35605[35605] +- Fix handling of IPv6 unspecified addresses in TCP input. {issue}35064[35064] {pull}35637[35637] +- Improve error reporting and fix IPv6 handling of TCP and UDP metric collection. {pull}35772[35772] +- Fix input reload on autodiscover. {issue}34388[34388] {pull}35645[35645] + +*Heartbeat* + +- Fix serialization of processors when running diagnostics. {pull}35698[35698] + +*Metricbeat* + +- Fix calculation of the `host.cpu.usage` metric for EC2. {pull}35717[35717] + +==== Added + +*Affecting all Beats* + +- Update Go version to 1.19.10. {pull}35751[35751] + +*Filebeat* + +- [GCS] Added scheduler debug logs and improved the context passing mechanism by removing them from struct params and passing them as function arguments. {pull}35674[35674] + +*Packetbeat* + +- Add AF_PACKET metrics. {issue}35428[35428] {pull}35489[35489] + +==== Deprecated + +*Heartbeat* + +- Removed support for `zip_url` and `local` browser sources. {pull}35429[35429] + [[release-notes-8.8.1]] === Beats version 8.8.1 https://github.com/elastic/beats/compare/v8.8.0\...v8.8.1[View commits] diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 0a4f3490fd5..6ca484f2789 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -9,14 +9,13 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] ==== Breaking changes *Affecting all Beats* -- Update Go version to 1.19.10 {pull}35751[35751] - Fix status reporting to Elastic-Agent when output configuration is invalid running under Elastic-Agent {pull}35719[35719] +- Upgrade Go to 1.20.7 {pull}36233[36233] *Auditbeat* *Filebeat* -- Fix input reload on autodiscover {issue}34388[34388] {pull}35645[35645] *Heartbeat* @@ -45,23 +44,9 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Affecting all Beats* - Support for multiline zookeeper logs {issue}2496[2496] -- Allow `clock_nanosleep` in the default seccomp profiles for amd64 and 386. Newer versions of glibc (e.g. 2.31) require it. {issue}33792[33792] -- Disable lockfile when running under elastic-agent. {pull}33988[33988] -- Fix lockfile logic, retry locking {pull}34194[34194] - Add checks to ensure reloading of units if the configuration actually changed. {pull}34346[34346] - Fix namespacing on self-monitoring {pull}32336[32336] -- Fix race condition when stopping runners {pull}32433[32433] -- Fix concurrent map writes when system/process code called from reporter code {pull}32491[32491] -- Log errors from the Elastic Agent V2 client errors channel. Avoids blocking when error occurs communicating with the Elastic Agent. {pull}34392[34392] -- Only log publish event messages in trace log level under elastic-agent. {pull}34391[34391] -- Fix issue where updating a single Elastic Agent configuration unit results in other units being turned off. {pull}34504[34504] -- Fix dropped events when monitor a beat under the agent and send its `Host info` log entry. {pull}34599[34599] - Fix namespacing on self-monitoring {pull}32336[32336] -- Fix race condition when stopping runners {pull}32433[32433] -- Fix concurrent map writes when system/process code called from reporter code {pull}32491[32491] -- Fix panics when a processor is closed twice {pull}34647[34647] -- Update elastic-agent-system-metrics to v0.4.6 to allow builds on mips platforms. {pull}34674[34674] -- The Elasticsearch output now splits large requests instead of dropping them when it receives a StatusRequestEntityTooLarge error. {pull}34911[34911] - Fix Beats started by agent do not respect the allow_older_versions: true configuration flag {issue}34227[34227] {pull}34964[34964] - Fix performance issues when we have a lot of inputs starting and stopping by allowing to disable global processors under fleet. {issue}35000[35000] {pull}35031[35031] - In cases where the matcher detects a non-string type in a match statement, report the error as a debug statement, and not a warning statement. {pull}35119[35119] @@ -70,45 +55,19 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Make sure k8s watchers are closed when closing k8s meta processor. {pull}35630[35630] - Upgraded apache arrow library used in x-pack/libbeat/reader/parquet from v11 to v12.0.1 in order to fix cross-compilation issues {pull}35640[35640] - Fix panic when MaxRetryInterval is specified, but RetryInterval is not {pull}35820[35820] +- Do not print context cancelled error message when running under agent {pull}36006[36006] +- Fix recovering from invalid output configuration when running under Elastic-Agent {pull}36016[36016] +- Improve StreamBuf append to improve performance when reading long lines from files. {pull}35928[35928] +- Eliminate cloning of event in deepUpdate {pull}35945[35945] +- Fix ndjson parser to store JSON fields correctly under `target` {issue}29395[29395] +- Support build of projects outside of beats directory {pull}36126[36126] + + *Auditbeat* *Filebeat* -- [Auditbeat System Package] Added support for Apple Silicon chips. {pull}34433[34433] -- [Azure blob storage] Changed logger field name from `container` to `container_name` so that it does not clash - with the ecs field name `container`. {pull}34403[34403] -- [GCS] Added support for more mime types & introduced offset tracking via cursor state. Also added support for - automatic splitting at root level, if root level element is an array. {pull}34155[34155] -- [httpsjon] Improved error handling during pagination with chaining & split processor {pull}34127[34127] -- [Azure blob storage] Added support for more mime types & introduced offset tracking via cursor state. {pull}33981[33981] -- Fix EOF on single line not producing any event. {issue}30436[30436] {pull}33568[33568] -- Fix handling of error in states in direct aws-s3 listing input {issue}33513[33513] {pull}33722[33722] -- Fix `httpjson` input page number initialization and documentation. {pull}33400[33400] -- Add handling of AAA operations for Cisco ASA module. {issue}32257[32257] {pull}32789[32789] -- Fix gc.log always shipped even if gc fileset is disabled {issue}30995[30995] -- Fix handling of empty array in httpjson input. {pull}32001[32001] -- Fix reporting of `filebeat.events.active` in log events such that the current value is always reported instead of the difference from the last value. {pull}33597[33597] -- Fix splitting array of strings/arrays in httpjson input {issue}30345[30345] {pull}33609[33609] -- Fix Google workspace pagination and document ID generation. {pull}33666[33666] -- Fix PANW handling of messages with event.original already set. {issue}33829[33829] {pull}33830[33830] -- Rename identity as identity_name when the value is a string in Azure Platform Logs. {pull}33654[33654] -- Fix 'requires pointer' error while getting cursor metadata. {pull}33956[33956] -- Fix input cancellation handling when HTTP client does not support contexts. {issue}33962[33962] {pull}33968[33968] -- Update mito CEL extension library to v0.0.0-20221207004749-2f0f2875e464 {pull}33974[33974] -- Fix CEL result deserialisation when evaluation fails. {issue}33992[33992] {pull}33996[33996] -- Fix handling of non-200/non-429 status codes. {issue}33999[33999] {pull}34002[34002] -- [azure-eventhub input] Switch the run EPH run mode to non-blocking {pull}34075[34075] -- [google_workspace] Fix pagination and cursor value update. {pull}34274[34274] -- Fix handling of quoted values in auditd module. {issue}22587[22587] {pull}34069[34069] -- Fixing system tests not returning expected content encoding for azure blob storage input. {pull}34412[34412] -- [Azure Logs] Fix authentication_processing_details parsing in sign-in logs. {issue}34330[34330] {pull}34478[34478] -- Prevent Elasticsearch from spewing log warnings about redundant wildcard when setting up ingest pipelines. {issue}34249[34249] {pull}34550[34550] -- Gracefully handle Windows event channel not found errors in winlog input. {issue}30201[30201] {pull}34605[34605] -- Fix the issue of `cometd` input worker getting closed in case of a network connection issue and an EOF error. {issue}34326[34326] {pull}34327[34327] -- Fix for httpjson first_response object throwing false positive errors by making it a flag based object {issue}34747[34747] {pull}34748[34748] -- Fix errors and panics due to re-used processors {pull}34761[34761] -- Add missing Basic Authentication support to CEL input {issue}34609[34609] {pull}34689[34689] - [Gcs Input] - Added missing locks for safe concurrency {pull}34914[34914] - Fix the ignore_inactive option being ignored in Filebeat's filestream input {pull}34770[34770] - Fix TestMultiEventForEOFRetryHandlerInput unit test of CometD input {pull}34903[34903] @@ -132,12 +91,22 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - [system] sync system/auth dataset with system integration 1.29.0. {pull}35581[35581] - [GCS Input] - Fixed an issue where bucket_timeout was being applied to the entire bucket poll interval and not individual bucket object read operations. Fixed a map write concurrency issue arising from data races when using a high number of workers. Fixed the flaky tests that were present in the GCS test suit. {pull}35605[35605] - Fix filestream false positive log error "filestream input with ID 'xyz' already exists" {issue}31767[31767] +- Fix error message formatting from filestream input. {pull}35658[35658] - Fix error when trying to use `include_message` parser {issue}35440[35440] - Fix handling of IPv6 unspecified addresses in TCP input. {issue}35064[35064] {pull}35637[35637] - Fixed a minor code error in the GCS input scheduler where a config value was being used directly instead of the source struct. {pull}35729[35729] - Improve error reporting and fix IPv6 handling of TCP and UDP metric collection. {pull}35772[35772] - Fix CEL input JSON marshalling of nested objects. {issue}35763[35763] {pull}35774[35774] - Fix metric collection in GCPPubSub input. {pull}35773[35773] +- Fix end point deregistration in http_endpoint input. {issue}35899[35899] {pull}35903[35903] +- Fix duplicate ID panic in filestream metrics. {issue}35964[35964] {pull}35972[35972] +- Improve error reporting and fix IPv6 handling of TCP and UDP metric collection. {pull}35996[35996] +- Fix handling of NUL-terminated log lines in Fortinet Firewall module. {issue}36026[36026] {pull}36027[36027] +- Make redact field configuration recommended in CEL input and log warning if missing. {pull}36008[36008] +- Fix handling of region name configuration in awss3 input {pull}36034[36034] +- Fix panic when sqs input metrics getter is invoked {pull}36101[36101] {issue}36077[36077] +- Make CEL input's `now` global variable static for evaluation lifetime. {pull}36107[36107] +- Update mito CEL extension library to v1.5.0. {pull}36146[36146] *Heartbeat* @@ -161,6 +130,13 @@ automatic splitting at root level, if root level element is an array. {pull}3415 - Fix formatting issue with socket trace timeout. {pull}35434[35434] - Update gval version. {pull}35636[35636] - Fix serialization of processors when running diagnostics. {pull}35698[35698] +- Filter dev flags for ui monitors inside synthetics_args. {pull}35788[35788] +- Fix temp dir running out of space with project monitors. {issue}35843[35843] +- Fixing the grok expression outputs of log files {pull}35221[35221] +- Enable heartbeat-wide publish timeout setting with run_once. {pull}35721[35721] + +*Heartbeat* + *Heartbeat* @@ -182,7 +158,6 @@ automatic splitting at root level, if root level element is an array. {pull}3415 *Filebeat* -- Sanitize filenames for request tracer in cel input. {pull}35154[35154] *Heartbeat* @@ -195,12 +170,6 @@ automatic splitting at root level, if root level element is an array. {pull}3415 - Add GCP CloudSQL region filter {pull}32943[32943] - Fix logstash cgroup mappings {pull}33131[33131] - Remove unused `elasticsearch.node_stats.indices.bulk.avg_time.bytes` mapping {pull}33263[33263] -- Fix kafka dashboard field names {pull}33555[33555] -- Add tags to events based on parsed identifier. {pull}33472[33472] -- Support Oracle-specific connection strings in SQL module {issue}32089[32089] {pull}32293[32293] -- Remove deprecated metrics from controller manager, scheduler and proxy {pull}34161[34161] -- Fix metrics split through different events and metadata not matching for aws cloudwatch. {pull}34483[34483] -- Fix metadata enricher with correct container ids for pods with multiple containers in container metricset. Align `kubernetes.container.id` and `container.id` fields for state_container metricset. {pull}34516[34516] - Make generic SQL GA {pull}34637[34637] - Collect missing remote_cluster in elasticsearch ccr metricset {pull}34957[34957] - Add context with timeout in AWS API calls {pull}35425[35425] @@ -209,31 +178,26 @@ automatic splitting at root level, if root level element is an array. {pull}3415 - Improve documentation for ActiveMQ module {issue}35113[35113] {pull}35558[35558] - Fix EC2 host.cpu.usage {pull}35717[35717] - Resolve statsd module's prematurely halting of metrics parsing upon encountering an invalid packet. {pull}35075[35075] +- Fix the gap in fetching forecast API metrics at the end of each month for Azure billing module {pull}36142[36142] *Osquerybeat* -- Adds the `elastic_file_analysis` table to the Osquery extension for macOS builds. {pull}35056[35056] *Packetbeat* -- Fix double channel close panic when reloading. {pull}35324[35324] -- Fix BPF filter setting not being applied to sniffers. {issue}35363[35363] {pull}35484[35484] -- Fix handling of Npcap installation options from Fleet. {pull}35541[35541] +- Fix handling of Npcap installation options from Fleet. {pull}35541[35541] {pull}35935[35935] *Winlogbeat* -- Fix handling of event data with keys containing dots. {issue}34345[34345] {pull}34549[34549] -- Gracefully handle channel not found errors. {issue}30201[30201] {pull}34605[34605] -- Clarify query term limits warning and remove link to missing Microsoft doc page. {pull}34715[34715] -- Improve documentation for event_logs.name configuration. {pull}34931[34931] -- Move repeated channel not found errors to debug level. {issue}35314[35314] {pull}35317[35317] -- Fix panic due to misrepresented buffer use. {pull}35437[35437] -- Prevent panic on closing iterators on empty channels in experimental API. {issue}33966[33966] {pull}35423[35423] -- Allow program termination when attempting to open an absent channel. {pull}35474[35474] +- Fix powershell details regexp to prevent excessive backtracking when processing command invocations. {pull}36178[36178] + +*Functionbeat* + *Functionbeat* + *Elastic Logging Plugin* @@ -242,12 +206,6 @@ automatic splitting at root level, if root level element is an array. {pull}3415 *Affecting all Beats* - Added append Processor which will append concrete values or values from a field to target. {issue}29934[29934] {pull}33364[33364] -- Allow users to enable features via configuration, starting with the FQDN reporting feature. {issue}1070[1070] {pull}34456[34456] -- Add Hetzner Cloud as a provider for `add_cloud_metadata`. {pull}35456[35456] -- Reload Beat when TLS certificates or key files are modified. {issue}34408[34408] {pull}34416[34416] -- Upgrade version of elastic-agent-autodiscover to v0.6.1 for improved memory consumption on k8s. {pull}35483[35483] -- Added `orchestrator.cluster.id` and `orchestrator.cluster.name` fields to the add_cloud_metadata processor, AWS cloud provider. {pull}35182[35182] -- Lowercase reported hostnames per Elastic Common Schema (ECS) guidelines for the host.name field. Upgraded github.com/elastic/go-sysinfo to 1.11.0. {pull}35652[35652] *Auditbeat* @@ -259,53 +217,13 @@ automatic splitting at root level, if root level element is an array. {pull}3415 - Add cloudflare R2 to provider list in AWS S3 input. {pull}32620[32620] - Add support for single string containing multiple relation-types in getRFC5988Link. {pull}32811[32811] - Added separation of transform context object inside httpjson. Introduced new clause `.parent_last_response.*` {pull}33499[33499] -- Cloud Foundry input uses server-side filtering when retrieving logs. {pull}33456[33456] -- Add `parse_aws_vpc_flow_log` processor. {pull}33656[33656] -- Update `aws.vpcflow` dataset in AWS module have a configurable log `format` and to produce ECS 8.x fields. {pull}33699[33699] -- Modified `aws-s3` input to reduce mutex contention when multiple SQS message are being processed concurrently. {pull}33658[33658] -- Disable "event normalization" processing for the aws-s3 input to reduce allocations. {pull}33673[33673] -- Add Common Expression Language input. {pull}31233[31233] -- Add support for http+unix and http+npipe schemes in httpjson input. {issue}33571[33571] {pull}33610[33610] -- Add support for http+unix and http+npipe schemes in cel input. {issue}33571[33571] {pull}33712[33712] -- Add `decode_duration`, `move_fields` processors. {pull}31301[31301] -- Add backup to bucket and delete functionality for the `aws-s3` input. {issue}30696[30696] {pull}33559[33559] -- Add metrics for UDP packet processing. {pull}33870[33870] -- Convert UDP input to v2 input. {pull}33930[33930] -- Improve collection of risk information from Okta debug data. {issue}33677[33677] {pull}34030[34030] - Adding filename details from zip to response for httpjson {issue}33952[33952] {pull}34044[34044] -- Allow user configuration of keep-alive behaviour for HTTPJSON and CEL inputs. {issue}33951[33951] {pull}34014[34014] -- Add support for polling system UDP stats for UDP input metrics. {pull}34070[34070] -- Add support for recognizing the log level in Elasticsearch JVM logs {pull}34159[34159] -- Add new Entity Analytics input with Azure Active Directory support. {pull}34305[34305] -- Added metric `sqs_lag_time` for aws-s3 input. {pull}34306[34306] -- Add metrics for TCP packet processing. {pull}34333[34333] -- Add metrics for unix socket packet processing. {pull}34335[34335] -- Add beta `take over` mode for `filestream` for simple migration from `log` inputs {pull}34292[34292] -- Add pagination support for Salesforce module. {issue}34057[34057] {pull}34065[34065] -- Allow users to redact sensitive data from CEL input debug logs. {pull}34302[34302] -- Added support for HTTP destination override to Google Cloud Storage input. {pull}34413[34413] - Added metric `sqs_messages_waiting_gauge` for aws-s3 input. {pull}34488[34488] -- Add support for new Rabbitmq timestamp format for logs {pull}34211[34211] -- Allow user configuration of timezone offset in Cisco ASA and FTD modules. {pull}34436[34436] -- Allow user configuration of timezone offset in Checkpoint module. {pull}34472[34472] -- Add support for Okta debug attributes, `risk_reasons`, `risk_behaviors` and `factor`. {issue}33677[33677] {pull}34508[34508] -- Fill okta.request.ip_chain.* as a flattened object in Okta module. {pull}34621[34621] -- Fixed GCS log format issues. {pull}34659[34659] - Add nginx.ingress_controller.upstream.ip to related.ip {issue}34645[34645] {pull}34672[34672] -- Include NAT and firewall IPs in `related.ip` in Fortinet Firewall module. {issue}34640[34640] {pull}34673[34673] -- Add Basic Authentication support on constructed requests to CEL input {issue}34609[34609] {pull}34689[34689] -- Add string manipulation extensions to CEL input {issue}34610[34610] {pull}34689[34689] - Add unix socket log parsing for nginx ingress_controller {pull}34732[34732] - Added metric `sqs_worker_utilization` for aws-s3 input. {pull}34793[34793] -- Improve CEL input documentation {pull}34831[34831] -- Add metrics documentation for CEL and AWS CloudWatch inputs. {issue}34887[34887] {pull}34889[34889] -- Register MIME handlers for CSV types in CEL input. {pull}34934[34934] - Add MySQL authentication message parsing and `related.ip` and `related.user` fields {pull}34810[34810] -- Mention `mito` CEL tool in CEL input docs. {pull}34959[34959] - Add nginx ingress_controller parsing if one of upstreams fails to return response {pull}34787[34787] -- Allow neflow v9 and ipfix templates to be shared between source addresses. {pull}35036[35036] -- Add support for collecting IPv6 metrics. {pull}35123[35123] -- Added support for decoding apache parquet files in awss3 input. {issue}34662[34662] {pull}35578[35578] - Add oracle authentication messages parsing {pull}35127[35127] - Add sanitization capabilities to azure-eventhub input {pull}34874[34874] - Add support for CRC validation in Filebeat's HTTP endpoint input. {pull}35204[35204] @@ -313,6 +231,7 @@ automatic splitting at root level, if root level element is an array. {pull}3415 - Add execution budget to CEL input. {pull}35409[35409] - Add XML decoding support to HTTPJSON. {issue}34438[34438] {pull}35235[35235] - Add delegated account support when using Google ADC in `httpjson` input. {pull}35507[35507] +- Allow specifying since when to read journald entries. {pull}35408[35408] - Add metrics for filestream input. {pull}35529[35529] - Add support for collecting `httpjson` metrics. {pull}35392[35392] - Add XML decoding support to CEL. {issue}34438[34438] {pull}35372[35372] @@ -320,39 +239,41 @@ automatic splitting at root level, if root level element is an array. {pull}3415 - Add metrics for gcp-pubsub input. {pull}35614[35614] - [GCS] Added scheduler debug logs and improved the context passing mechanism by removing them from struct params and passing them as function arguments. {pull}35674[35674] - Allow non-AWS endpoints for awss3 input. {issue}35496[35496] {pull}35520[35520] +- Under elastic-agent the input metrics will now be included in agent diagnostics dumps. {pull}35798[35798] - Add Okta input package for entity analytics. {pull}35611[35611] - Expose harvester metrics from filestream input {pull}35835[35835] {issue}33771[33771] +- Add device support for Azure AD entity analytics. {pull}35807[35807] +- Improve CEL input performance. {pull}35915[35915] +- Adding filename details from zip to response for httpjson {issue}33952[33952] {pull}34044[34044] +- Added support for min/max template functions in httpjson input. {issue}36094[36094] {pull}36036[36036] +- Add `clean_session` configuration setting for MQTT input. {pull}35806[16204] +- Add fingerprint mode for the filestream scanner and new file identity based on it {issue}34419[34419] {pull}35734[35734] +- Add file system metadata to events ingested via filestream {issue}35801[35801] {pull}36065[36065] +- Add support for localstack based input integration testing {pull}35727[35727] +- Allow parsing bytes in and bytes out as long integer in CEF processor. {issue}36100[36100] {pull}36108[36108] +- Add support for registered owners and users to AzureAD entity analytics provider. {pull}36092[36092] *Auditbeat* - - Migration of system/package module storage from gob encoding to flatbuffer encoding in bolt db. {pull}34817[34817] *Libbeat* -- Added support for apache parquet file reader. {issue}34662[34662] {pull}35183[35183] *Heartbeat* -- Users can now configure max scheduler job limits per monitor type via env var. {pull}34307[34307] - Added status to monitor run log report. -- Removed beta label for browser monitors. {pull}35424[35424]. *Metricbeat* - Add per-thread metrics to system_summary {pull}33614[33614] - Add GCP CloudSQL metadata {pull}33066[33066] -- Add GCP Redis metadata {pull}33701[33701] -- Remove GCP Compute metadata cache {pull}33655[33655] - Add support for multiple regions in GCP {pull}32964[32964] -- Add GCP Redis regions support {pull}33728[33728] -- Add namespace metadata to all namespaced kubernetes resources. {pull}33763[33763] -- Changed cloudwatch module to call ListMetrics API only once per region, instead of per AWS namespace {pull}34055[34055] -- Add beta ingest_pipeline metricset to Elasticsearch module for ingest pipeline monitoring {pull}34012[34012] -- Handle duplicated TYPE line for prometheus metrics {issue}18813[18813] {pull}33865[33865] - Add GCP Carbon Footprint metricbeat data {pull}34820[34820] - Add event loop utilization metric to Kibana module {pull}35020[35020] - Support collecting metrics from both the monitoring account and linked accounts from AWS CloudWatch. {pull}35540[35540] - Add new parameter `include_linked_accounts` to enable/disable metrics collection from multiple linked AWS Accounts {pull}35648[35648] - Migrate Azure Billing, Monitor, and Storage metricsets to the newer SDK. {pull}33585[33585] - Add support for float64 values parsing for statsd metrics of counter type. {pull}35099[35099] +- Add kubernetes.deployment.status.* fields for Kubernetes module {pull}35999[35999] + *Osquerybeat* @@ -361,6 +282,11 @@ automatic splitting at root level, if root level element is an array. {pull}3415 - Added `packetbeat.interfaces.fanout_group` to allow a Packetbeat sniffer to join an AF_PACKET fanout group. {issue}35451[35451] {pull}35453[35453] - Add AF_PACKET metrics. {issue}35428[35428] {pull}35489[35489] +- Under elastic-agent the input metrics will now be included in agent diagnostics dumps. {pull}35798[35798] +- Add support for multiple regions in GCP {pull}32964[32964] + +*Packetbeat* + *Winlogbeat* @@ -372,6 +298,7 @@ automatic splitting at root level, if root level element is an array. {pull}3415 - Set `host.os.type` and `host.os.family` to "windows" if not already set. {pull}35435[35435] - Handle empty DNS answer data in QueryResults for the Sysmon Pipeline {pull}35207[35207] +- Under elastic-agent the input metrics will now be included in agent diagnostics dumps. {pull}35798[35798] *Elastic Log Driver* @@ -388,7 +315,6 @@ automatic splitting at root level, if root level element is an array. {pull}3415 *Heartbeat* -- Removed zip_url and local browser sources. {pull}35429[35429] *Metricbeat* @@ -409,3 +335,6 @@ automatic splitting at root level, if root level element is an array. {pull}3415 ==== Known Issues + + + diff --git a/Jenkinsfile b/Jenkinsfile index d9192e1e1ad..a0231b94957 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -309,7 +309,7 @@ def cloud(Map args = [:]) { withCloudTestEnv(args) { startCloudTestEnv(name: args.directory, dirs: args.dirs, withAWS: args.withAWS) try { - targetWithoutNode(context: args.context, command: args.command, directory: args.directory, label: args.label, withModule: args.withModule, isMage: true, id: args.id) + targetWithoutNode(dirs: args.dirs, context: args.context, command: args.command, directory: args.directory, label: args.label, withModule: args.withModule, isMage: true, id: args.id) } finally { terraformCleanup(name: args.directory, dir: args.directory, withAWS: args.withAWS) } @@ -578,6 +578,7 @@ def target(Map args = [:]) { * - mage then the dir(location) is required, aka by enabling isMage: true. */ def targetWithoutNode(Map args = [:]) { + def dirs = args.get('dirs',[]) def command = args.command def context = args.context def directory = args.get('directory', '') @@ -590,9 +591,22 @@ def targetWithoutNode(Map args = [:]) { def enableRetry = args.get('enableRetry', false) def withGCP = args.get('withGCP', false) def withNodejs = args.get('withNodejs', false) + String name = normalise(args.directory) withGithubNotify(context: "${context}") { withBeatsEnv(archive: true, withModule: withModule, directory: directory, id: args.id) { dumpVariables() + // unstash terraform outputs in the same directory where the files were stashed + dirs?.each { folder -> + dir("${folder}") { + try { + unstash("terraform-${name}") + //unstash does not print verbose output , hence printing contents of the directory for logging purposes + sh "ls -la ${pwd()}" + } catch (error) { + echo "error unstashing: ${error}" + } + } + } withTools(k8s: installK8s, gcp: withGCP, nodejs: withNodejs) { // make commands use -C while mage commands require the dir(folder) // let's support this scenario with the location variable. @@ -604,7 +618,7 @@ def targetWithoutNode(Map args = [:]) { cmd(label: "${args.id?.trim() ? args.id : env.STAGE_NAME} - ${command}", script: "${command}") } } else { - cmd(label: "${args.id?.trim() ? args.id : env.STAGE_NAME} - ${command}", script: "${command}") + cmd(label: "${args.id?.trim() ? args.id : env.STAGE_NAME} - ${command}", script: "${command}") } } } @@ -920,6 +934,8 @@ def startCloudTestEnv(Map args = [:]) { stage("${name}-prepare-cloud-env"){ withBeatsEnv(archive: false, withModule: false) { try { + // Run the docker services to setup the emulated cloud environment + sh(label: 'Run docker-compose services for emulated cloud env', script: ".ci/scripts/install-docker-services.sh ", returnStatus: true) dirs?.each { folder -> retryWithSleep(retries: 2, seconds: 5, backoff: true){ terraformApply(folder) @@ -930,12 +946,19 @@ def startCloudTestEnv(Map args = [:]) { // If it failed then cleanup without failing the build sh(label: 'Terraform Cleanup', script: ".ci/scripts/terraform-cleanup.sh ${folder}", returnStatus: true) } + // Cleanup the docker services + sh(label: 'Docker Compose Cleanup', script: ".ci/scripts/docker-services-cleanup.sh", returnStatus: true) + error('startCloudTestEnv: terraform apply failed.') } finally { - // Archive terraform states in case manual cleanup is needed. - archiveArtifacts(allowEmptyArchive: true, artifacts: '**/terraform.tfstate') + dirs?.each { folder -> + // Archive terraform states in case manual cleanup is needed. + archiveArtifacts(allowEmptyArchive: true, artifacts: '**/terraform.tfstate') + dir("${folder}") { + stash(name: "terraform-${name}", allowEmpty: true, includes: '**/terraform.tfstate,**/.terraform/**,outputs*.yml') + } + } } - stash(name: "terraform-${name}", allowEmpty: true, includes: '**/terraform.tfstate,**/.terraform/**') } } } @@ -960,6 +983,7 @@ def terraformApply(String directory) { * Tear down the terraform environments, by looking for all terraform states in directory * then it runs terraform destroy for each one. * It uses terraform states previously stashed by startCloudTestEnv. +* This also tears down any associated docker services */ def terraformCleanup(Map args = [:]) { String name = normalise(args.name) @@ -970,6 +994,8 @@ def terraformCleanup(Map args = [:]) { retryWithSleep(retries: 2, seconds: 5, backoff: true) { sh(label: "Terraform Cleanup", script: ".ci/scripts/terraform-cleanup.sh ${directory}") } + // Cleanup associated docker services + sh(label: 'Docker Compose Cleanup', script: ".ci/scripts/docker-services-cleanup.sh") } } } diff --git a/NOTICE.txt b/NOTICE.txt index 969942b5464..109dc19ac6b 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -11,11 +11,11 @@ Third party libraries used by the Elastic Beats project: -------------------------------------------------------------------------------- Dependency : cloud.google.com/go -Version: v0.105.0 +Version: v0.107.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go@v0.105.0/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go@v0.107.0/LICENSE: Apache License @@ -435,11 +435,11 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/bigquery@v1.44 -------------------------------------------------------------------------------- Dependency : cloud.google.com/go/compute -Version: v1.14.0 +Version: v1.15.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/compute@v1.14.0/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/compute@v1.15.1/LICENSE: Apache License @@ -9555,11 +9555,11 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/cespare/xxhash/v2 -Version: v2.1.2 +Version: v2.2.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/cespare/xxhash/v2@v2.1.2/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/cespare/xxhash/v2@v2.2.0/LICENSE.txt: Copyright (c) 2016 Caleb Spare @@ -12479,11 +12479,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-a -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-client/v7 -Version: v7.1.2 +Version: v7.2.0 Licence type (autodetected): Elastic -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-client/v7@v7.1.2/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-client/v7@v7.2.0/LICENSE.txt: ELASTIC LICENSE AGREEMENT @@ -13448,11 +13448,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/go-concert@v0.2 -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-elasticsearch/v8 -Version: v8.8.1 +Version: v8.9.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-elasticsearch/v8@v8.8.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-elasticsearch/v8@v8.9.0/LICENSE: Apache License Version 2.0, January 2004 @@ -15600,11 +15600,11 @@ limitations under the License. -------------------------------------------------------------------------------- Dependency : github.com/elastic/mito -Version: v1.4.0 +Version: v1.5.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/mito@v1.4.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/mito@v1.5.0/LICENSE: Apache License @@ -20399,11 +20399,11 @@ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLI -------------------------------------------------------------------------------- Dependency : github.com/magefile/mage -Version: v1.14.0 +Version: v1.15.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/magefile/mage@v1.14.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/magefile/mage@v1.15.0/LICENSE: Apache License Version 2.0, January 2004 @@ -24801,11 +24801,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/sys -Version: v0.7.0 +Version: v0.9.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.7.0/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.9.0/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -24986,11 +24986,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : google.golang.org/genproto -Version: v0.0.0-20221207170731-23e4bf6bdc37 +Version: v0.0.0-20230110181048-76db0878b65f Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/genproto@v0.0.0-20221207170731-23e4bf6bdc37/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/genproto@v0.0.0-20230110181048-76db0878b65f/LICENSE: Apache License @@ -25198,11 +25198,11 @@ Contents of probable licence file $GOMODCACHE/google.golang.org/genproto@v0.0.0- -------------------------------------------------------------------------------- Dependency : google.golang.org/grpc -Version: v1.51.0 +Version: v1.53.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/grpc@v1.51.0/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/grpc@v1.53.0/LICENSE: Apache License @@ -25410,11 +25410,11 @@ Contents of probable licence file $GOMODCACHE/google.golang.org/grpc@v1.51.0/LIC -------------------------------------------------------------------------------- Dependency : google.golang.org/protobuf -Version: v1.28.1 +Version: v1.29.1 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/protobuf@v1.28.1/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/protobuf@v1.29.1/LICENSE: Copyright (c) 2018 The Go Authors. All rights reserved. @@ -27319,11 +27319,11 @@ THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : cloud.google.com/go/compute/metadata -Version: v0.2.1 +Version: v0.2.3 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/compute/metadata@v0.2.1/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/compute/metadata@v0.2.3/LICENSE: Apache License diff --git a/Vagrantfile b/Vagrantfile index 39084118f33..47bc686d74f 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -132,4 +132,48 @@ Vagrant.configure("2") do |config| end end + config.vm.define "beats" do |nodeconfig| + nodeconfig.vm.box = "ubuntu/jammy64" + + # We deliberately set a fully-qualified domain name for the VM; it helps + # test the FQDN feature flag. + nodeconfig.vm.hostname = "beats-dev.elastic.dev.internal" + + nodeconfig.vm.network "private_network", + hostname: true, + ip: "192.168.56.44" # only 192.168.56.0/21 range allowed: https://www.virtualbox.org/manual/ch06.html#network_hostonly + nodeconfig.vm.network "forwarded_port", + guest: 4244, + host: 4244, + id: "delve" + + nodeconfig.vm.provider "virtualbox" do |vb| + # Display the VirtualBox GUI when booting the machine + vb.gui = false + vb.customize ["modifyvm", :id, "--vram", "128"] + # Customize the amount of memory on the VM: + vb.memory = "4096" + end + + nodeconfig.vm.provision "shell", inline: <<-SHELL + apt-get update + apt-get install -y \ + build-essential \ + curl \ + delve \ + make \ + unzip + vim \ + wget + curl -sL -o /tmp/go#{GO_VERSION}.linux-amd64.tar.gz https://go.dev/dl/go#{GO_VERSION}.linux-amd64.tar.gz + tar -C /usr/local -xzf /tmp/go#{GO_VERSION}.linux-amd64.tar.gz + curl -sL -o /tmp/mage_1.15.0_Linux-64bit.tar.gz https://github.com/magefile/mage/releases/download/v1.15.0/mage_1.15.0_Linux-64bit.tar.gz + tar -C /tmp -xf /tmp/mage_1.15.0_Linux-64bit.tar.gz + mv /tmp/mage /usr/local/bin/mage + echo "alias ll='ls -la'" > /etc/profile.d/ll.sh + echo 'export PATH=$PATH:/usr/local/go/bin' > /etc/profile.d/go.sh + echo 'export PATH=$PATH:$(go env GOPATH)/bin' >> /etc/profile.d/go.sh + SHELL + end + end diff --git a/auditbeat/Dockerfile b/auditbeat/Dockerfile index d410055f8d5..6df307450e9 100644 --- a/auditbeat/Dockerfile +++ b/auditbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.19.10 +FROM golang:1.19.12 RUN \ apt-get update \ @@ -11,7 +11,7 @@ RUN \ && rm -rf /var/lib/apt/lists/* # Use a virtualenv to avoid the PEP668 "externally managed environment" error caused by conflicts -# with the system Python installation. golang:1.19.10 uses Debian 12 which now enforces PEP668. +# with the system Python installation. golang:1.20.6 uses Debian 12 which now enforces PEP668. ENV VIRTUAL_ENV=/opt/venv RUN python3 -m venv $VIRTUAL_ENV ENV PATH="$VIRTUAL_ENV/bin:$PATH" diff --git a/auditbeat/_meta/config/auditbeat.config.modules.yml.tmpl b/auditbeat/_meta/config/auditbeat.config.modules.yml.tmpl index 8108f3edf92..8da31bda1bd 100644 --- a/auditbeat/_meta/config/auditbeat.config.modules.yml.tmpl +++ b/auditbeat/_meta/config/auditbeat.config.modules.yml.tmpl @@ -1,6 +1,6 @@ {{header "Config Reloading"}} -# Config reloading allows to dynamically load modules. Each file which is +# Config reloading allows to dynamically load modules. Each file that is # monitored must contain one or multiple modules as a list. auditbeat.config.modules: diff --git a/auditbeat/auditbeat.reference.yml b/auditbeat/auditbeat.reference.yml index dc803d4ae88..892d4e1b2d0 100644 --- a/auditbeat/auditbeat.reference.yml +++ b/auditbeat/auditbeat.reference.yml @@ -9,7 +9,7 @@ # ============================== Config Reloading ============================== -# Config reloading allows to dynamically load modules. Each file which is +# Config reloading allows to dynamically load modules. Each file that is # monitored must contain one or multiple modules as a list. auditbeat.config.modules: @@ -176,10 +176,10 @@ auditbeat.modules: # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. +# If this option is not defined, the hostname is used. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. Tags make it easy to group servers by different # logical properties. #tags: ["service-X", "web-tier"] @@ -191,7 +191,7 @@ auditbeat.modules: # env: staging # If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields +# fields in the output document instead of being grouped under a field # sub-dictionary. Default is false. #fields_under_root: false @@ -203,7 +203,7 @@ auditbeat.modules: #queue: # Queue type by name (default 'mem') # The memory queue will present all available events (up to the outputs - # bulk_max_size) to the output, the moment the output is ready to server + # bulk_max_size) to the output, the moment the output is ready to serve # another batch of events. #mem: # Max number of events the queue can buffer. @@ -255,7 +255,7 @@ auditbeat.modules: # length of its retry interval each time, up to this maximum. #max_retry_interval: 30s -# Sets the maximum number of CPUs that can be executing simultaneously. The +# Sets the maximum number of CPUs that can be executed simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: @@ -376,7 +376,7 @@ auditbeat.modules: # ignore_missing: false # fail_on_error: true # -# The following example copies the value of message to message_copied +# The following example copies the value of the message to message_copied # #processors: # - copy_fields: @@ -386,7 +386,7 @@ auditbeat.modules: # fail_on_error: true # ignore_missing: false # -# The following example truncates the value of message to 1024 bytes +# The following example truncates the value of the message to 1024 bytes # #processors: # - truncate_fields: @@ -483,7 +483,7 @@ output.elasticsearch: # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. #index: "auditbeat-%{[agent.version]}" - # Optional ingest pipeline. By default no pipeline will be used. + # Optional ingest pipeline. By default, no pipeline will be used. #pipeline: "" # Optional HTTP path @@ -1214,14 +1214,14 @@ output.elasticsearch: # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards are disabled by default and can be enabled either by setting the -# options here, or by using the `-setup` CLI flag or the `setup` command. +# options here or by using the `-setup` CLI flag or the `setup` command. #setup.dashboards.enabled: false # The directory from where to read the dashboards. The default is the `kibana` # folder in the home path. #setup.dashboards.directory: ${path.home}/kibana -# The URL from where to download the dashboards archive. It is used instead of +# The URL from where to download the dashboard archive. It is used instead of # the directory if it has a value. #setup.dashboards.url: @@ -1318,7 +1318,7 @@ setup.template.settings: # Configure index lifecycle management (ILM) to manage the backing indices # of your data streams. -# Enable ILM support. Valid values are true, false. +# Enable ILM support. Valid values are true, or false. #setup.ilm.enabled: true # Set the lifecycle policy name. The default policy name is @@ -1473,25 +1473,25 @@ logging.files: # The name of the files where the logs are written to. #name: auditbeat - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. #rotateeverybytes: 10485760 # = 10MB - # Number of rotated log files to keep. Oldest files will be deleted first. + # Number of rotated log files to keep. The oldest files will be deleted first. #keepfiles: 7 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. #permissions: 0600 - # Enable log file rotation on time intervals in addition to size-based rotation. + # Enable log file rotation on time intervals in addition to the size-based rotation. # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h # are boundary-aligned with minutes, hours, days, weeks, months, and years as # reported by the local system clock. All other intervals are calculated from the # Unix epoch. Defaults to disabled. #interval: 0 - # Rotate existing logs on startup rather than appending to the existing + # Rotate existing logs on startup rather than appending them to the existing # file. Defaults to true. # rotateonstartup: true @@ -1519,7 +1519,7 @@ logging.files: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # In case you specify an additional path, the scheme is required: http://localhost:9200/path # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 #hosts: ["localhost:9200"] @@ -1566,7 +1566,7 @@ logging.files: # Elasticsearch after a network error. The default is 60s. #backoff.max: 60s - # Configure HTTP request timeout before failing an request to Elasticsearch. + # Configure HTTP request timeout before failing a request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. @@ -1663,15 +1663,15 @@ logging.files: # =============================== HTTP Endpoint ================================ -# Each beat can expose internal metrics through a HTTP endpoint. For security +# Each beat can expose internal metrics through an HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. -# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# Stats can be accessed through http://localhost:5066/stats. For pretty JSON output # append ?pretty to the URL. # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# The HTTP endpoint will bind to this hostname, IP address, unix socket, or named pipe. # When using IP addresses, it is recommended to only use localhost. #http.host: localhost @@ -1681,7 +1681,7 @@ logging.files: # Define which user should be owning the named pipe. #http.named_pipe.user: -# Define which the permissions that should be applied to the named pipe, use the Security +# Define which permissions should be applied to the named pipe, use the Security # Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with # `http.user`. #http.named_pipe.security_descriptor: diff --git a/auditbeat/auditbeat.yml b/auditbeat/auditbeat.yml index 0a9152658dc..eb92fbf93bc 100644 --- a/auditbeat/auditbeat.yml +++ b/auditbeat/auditbeat.yml @@ -61,7 +61,7 @@ setup.template.settings: # all the transactions sent by a single shipper in the web interface. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. #tags: ["service-X", "web-tier"] @@ -76,8 +76,8 @@ setup.template.settings: # options here or by using the `setup` command. #setup.dashboards.enabled: false -# The URL from where to download the dashboards archive. By default this URL -# has a value which is computed based on the Beat name and version. For released +# The URL from where to download the dashboard archive. By default, this URL +# has a value that is computed based on the Beat name and version. For released # versions, this URL points to the dashboard archive on the artifacts.elastic.co # website. #setup.dashboards.url: @@ -161,7 +161,7 @@ processors: #logging.level: debug # At debug level, you can selectively enable logging only for some components. -# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# To enable all selectors, use ["*"]. Examples of other selectors are "beat", # "publisher", "service". #logging.selectors: ["*"] @@ -179,7 +179,7 @@ processors: #monitoring.cluster_uuid: # Uncomment to send the metrics to Elasticsearch. Most settings from the -# Elasticsearch output are accepted here as well. +# Elasticsearch outputs are accepted here as well. # Note that the settings should point to your Elasticsearch *monitoring* cluster. # Any setting that is not set is automatically inherited from the Elasticsearch # output configuration, so if you have the Elasticsearch output configured such diff --git a/auditbeat/docs/troubleshooting.asciidoc b/auditbeat/docs/troubleshooting.asciidoc index cd5906710f8..19eb279272b 100644 --- a/auditbeat/docs/troubleshooting.asciidoc +++ b/auditbeat/docs/troubleshooting.asciidoc @@ -8,6 +8,7 @@ following tips: * <> * <> +* <> * <> //sets block macro for getting-help.asciidoc included in next section @@ -26,5 +27,15 @@ include::{libbeat-dir}/getting-help.asciidoc[] include::{libbeat-dir}/debugging.asciidoc[] +//sets block macro for metrics-in-logs.asciidoc included in next section +[id="understand-{beatname_lc}-logs"] +[role="xpack"] +== Understand metrics in {beatname_uc} logs + +++++ +Understand logged metrics +++++ + +include::{libbeat-dir}/metrics-in-logs.asciidoc[] diff --git a/deploy/kubernetes/auditbeat-kubernetes.yaml b/deploy/kubernetes/auditbeat-kubernetes.yaml index c7d5beb201e..4983aa9b90c 100644 --- a/deploy/kubernetes/auditbeat-kubernetes.yaml +++ b/deploy/kubernetes/auditbeat-kubernetes.yaml @@ -205,7 +205,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: auditbeat - image: docker.elastic.co/beats/auditbeat:8.9.0 + image: docker.elastic.co/beats/auditbeat:8.10.0 args: [ "-c", "/etc/auditbeat.yml", "-e", diff --git a/deploy/kubernetes/filebeat-kubernetes.yaml b/deploy/kubernetes/filebeat-kubernetes.yaml index a3a6c88dd18..230d6cd9ca8 100644 --- a/deploy/kubernetes/filebeat-kubernetes.yaml +++ b/deploy/kubernetes/filebeat-kubernetes.yaml @@ -167,7 +167,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: filebeat - image: docker.elastic.co/beats/filebeat:8.9.0 + image: docker.elastic.co/beats/filebeat:8.10.0 args: [ "-c", "/etc/filebeat.yml", "-e", diff --git a/deploy/kubernetes/heartbeat-kubernetes.yaml b/deploy/kubernetes/heartbeat-kubernetes.yaml index 4fef98d60e8..6b34ee106df 100644 --- a/deploy/kubernetes/heartbeat-kubernetes.yaml +++ b/deploy/kubernetes/heartbeat-kubernetes.yaml @@ -171,7 +171,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: heartbeat - image: docker.elastic.co/beats/heartbeat:8.9.0 + image: docker.elastic.co/beats/heartbeat:8.10.0 args: [ "-c", "/etc/heartbeat.yml", "-e", diff --git a/deploy/kubernetes/metricbeat-kubernetes.yaml b/deploy/kubernetes/metricbeat-kubernetes.yaml index 128ce0454ee..4879f43eecc 100644 --- a/deploy/kubernetes/metricbeat-kubernetes.yaml +++ b/deploy/kubernetes/metricbeat-kubernetes.yaml @@ -290,7 +290,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: metricbeat - image: docker.elastic.co/beats/metricbeat:8.9.0 + image: docker.elastic.co/beats/metricbeat:8.10.0 args: [ "-c", "/etc/metricbeat.yml", "-e", diff --git a/dev-tools/cmd/module_include_list/module_include_list.go b/dev-tools/cmd/module_include_list/module_include_list.go index 79ac5e865df..f3b6c79ad2b 100644 --- a/dev-tools/cmd/module_include_list/module_include_list.go +++ b/dev-tools/cmd/module_include_list/module_include_list.go @@ -30,8 +30,6 @@ import ( "strings" "text/template" - "github.com/pkg/errors" - devtools "github.com/elastic/beats/v7/dev-tools/mage" "github.com/elastic/beats/v7/licenses" ) @@ -212,7 +210,7 @@ func findModuleAndDatasets() ([]string, error) { filepath.Join(moduleDir, "*/*/_meta"), ) if err != nil { - return nil, errors.Wrap(err, "failed finding modules and datasets") + return nil, fmt.Errorf("failed finding modules and datasets: %w", err) } for _, metaDir := range metaDirs { diff --git a/dev-tools/kubernetes/filebeat/Dockerfile.debug b/dev-tools/kubernetes/filebeat/Dockerfile.debug index 661ac535c40..5184138de53 100644 --- a/dev-tools/kubernetes/filebeat/Dockerfile.debug +++ b/dev-tools/kubernetes/filebeat/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:alpine3.15 as builder +FROM golang:1.20.7 as builder ENV PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/go/bin:/usr/local/go/bin diff --git a/dev-tools/kubernetes/heartbeat/Dockerfile.debug b/dev-tools/kubernetes/heartbeat/Dockerfile.debug index cf61a85af40..16defa414fd 100644 --- a/dev-tools/kubernetes/heartbeat/Dockerfile.debug +++ b/dev-tools/kubernetes/heartbeat/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:alpine3.15 as builder +FROM golang:1.20.7 as builder ENV PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/go/bin:/usr/local/go/bin diff --git a/dev-tools/kubernetes/metricbeat/Dockerfile.debug b/dev-tools/kubernetes/metricbeat/Dockerfile.debug index 1462bc18b59..24c588d0f24 100644 --- a/dev-tools/kubernetes/metricbeat/Dockerfile.debug +++ b/dev-tools/kubernetes/metricbeat/Dockerfile.debug @@ -1,4 +1,4 @@ -FROM golang:alpine3.15 as builder +FROM golang:1.20.7 as builder ENV PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/go/bin:/usr/local/go/bin diff --git a/dev-tools/mage/build.go b/dev-tools/mage/build.go index ad3d362210c..263299671fd 100644 --- a/dev-tools/mage/build.go +++ b/dev-tools/mage/build.go @@ -18,6 +18,7 @@ package mage import ( + "errors" "fmt" "go/build" "log" @@ -27,7 +28,6 @@ import ( "github.com/josephspurrier/goversioninfo" "github.com/magefile/mage/sh" - "github.com/pkg/errors" ) // BuildArgs are the arguments used for the "build" target and they define how @@ -139,6 +139,18 @@ func GolangCrossBuild(params BuildArgs) error { return err } + // Support projects outside of the beats directory. + repoInfo, err := GetProjectRepoInfo() + if err != nil { + return err + } + + // TODO: Support custom build dir/subdir + projectMountPoint := filepath.ToSlash(filepath.Join("/go", "src", repoInfo.CanonicalRootImportPath)) + if err := sh.Run("git", "config", "--global", "--add", "safe.directory", projectMountPoint); err != nil { + return err + } + return Build(params) } @@ -197,7 +209,7 @@ func Build(params BuildArgs) error { log.Println("Generating a .syso containing Windows file metadata.") syso, err := MakeWindowsSysoFile() if err != nil { - return errors.Wrap(err, "failed generating Windows .syso metadata file") + return fmt.Errorf("failed generating Windows .syso metadata file: %w", err) } defer os.Remove(syso) } @@ -250,7 +262,7 @@ func MakeWindowsSysoFile() (string, error) { vi.Walk() sysoFile := BeatName + "_windows_" + GOARCH + ".syso" if err = vi.WriteSyso(sysoFile, GOARCH); err != nil { - return "", errors.Wrap(err, "failed to generate syso file with Windows metadata") + return "", fmt.Errorf("failed to generate syso file with Windows metadata: %w", err) } return sysoFile, nil } diff --git a/dev-tools/mage/check.go b/dev-tools/mage/check.go index c34255420cd..a9547634eb5 100644 --- a/dev-tools/mage/check.go +++ b/dev-tools/mage/check.go @@ -31,9 +31,10 @@ import ( "runtime" "strings" + "errors" + "github.com/magefile/mage/mg" "github.com/magefile/mage/sh" - "github.com/pkg/errors" "github.com/elastic/beats/v7/dev-tools/mage/gotool" "github.com/elastic/beats/v7/libbeat/dashboards" @@ -53,7 +54,7 @@ func Check() error { changes, err := GitDiffIndex() if err != nil { - return errors.Wrap(err, "failed to diff the git index") + return fmt.Errorf("failed to diff the git index: %w", err) } if len(changes) > 0 { @@ -61,7 +62,7 @@ func Check() error { GitDiff() } - return errors.Errorf("some files are not up-to-date. "+ + return fmt.Errorf("some files are not up-to-date. "+ "Run 'make update' then review and commit the changes. "+ "Modified: %v", changes) } @@ -97,7 +98,7 @@ func GitDiffIndex() ([]string, error) { for s.Scan() { m, err := d.Dissect(s.Text()) if err != nil { - return nil, errors.Wrap(err, "failed to dissect git diff-index output") + return nil, fmt.Errorf("failed to dissect git diff-index output: %w", err) } paths := strings.Split(m["paths"], "\t") @@ -151,7 +152,7 @@ func CheckPythonTestNotExecutable() error { } if len(executableTestFiles) > 0 { - return errors.Errorf("python test files cannot be executable because "+ + return fmt.Errorf("python test files cannot be executable because "+ "they will be skipped. Fix permissions of %v", executableTestFiles) } return nil @@ -173,11 +174,11 @@ func CheckYAMLNotExecutable() error { } }) if err != nil { - return errors.Wrap(err, "failed search for YAML files") + return fmt.Errorf("failed search for YAML files: %w", err) } if len(executableYAMLFiles) > 0 { - return errors.Errorf("YAML files cannot be executable. Fix "+ + return fmt.Errorf("YAML files cannot be executable. Fix "+ "permissions of %v", executableYAMLFiles) } @@ -187,7 +188,10 @@ func CheckYAMLNotExecutable() error { // GoVet vets the .go source code using 'go vet'. func GoVet() error { err := sh.RunV("go", "vet", "./...") - return errors.Wrap(err, "failed running go vet, please fix the issues reported") + if err == nil { + return nil + } + return fmt.Errorf("failed running go vet, please fix the issues reported: %w", err) } // CheckLicenseHeaders checks license headers in .go files. @@ -203,7 +207,7 @@ func CheckLicenseHeaders() error { case "Elastic", "Elastic License": license = "Elastic" default: - return errors.Errorf("unknown license type %v", BeatLicense) + return fmt.Errorf("unknown license type %v", BeatLicense) } licenser := gotool.Licenser @@ -220,14 +224,14 @@ func CheckDashboardsFormat() error { return strings.Contains(filepath.ToSlash(path), dashboardSubDir) && strings.HasSuffix(path, ".json") }) if err != nil { - return errors.Wrap(err, "failed to find dashboards") + return fmt.Errorf("failed to find dashboards: %w", err) } hasErrors := false for _, file := range dashboardFiles { d, err := ioutil.ReadFile(file) if err != nil { - return errors.Wrapf(err, "failed to read dashboard file %s", file) + return fmt.Errorf("failed to read dashboard file %s: %w", file, err) } if checkDashboardForErrors(file, d) { @@ -249,7 +253,7 @@ func checkDashboardForErrors(file string, d []byte) bool { var dashboard DashboardObject err := json.Unmarshal(d, &dashboard) if err != nil { - fmt.Println(errors.Wrapf(err, "failed to parse dashboard from %s", file).Error()) + fmt.Println(fmt.Sprintf("failed to parse dashboard from %s: %s", file, err)) return true } @@ -314,20 +318,20 @@ func (d *DashboardObject) CheckFormat(module string) error { switch d.Type { case "dashboard": if d.Attributes.Description == "" { - return errors.Errorf("empty description on dashboard '%s'", d.Attributes.Title) + return fmt.Errorf("empty description on dashboard '%s'", d.Attributes.Title) } if err := checkTitle(dashboardTitleRegexp, d.Attributes.Title, module); err != nil { - return errors.Wrapf(err, "expected title with format '[%s Module] Some title', found '%s'", strings.Title(BeatName), d.Attributes.Title) + return fmt.Errorf("expected title with format '[%s Module] Some title', found '%s': %w", strings.Title(BeatName), d.Attributes.Title, err) } case "visualization": if err := checkTitle(visualizationTitleRegexp, d.Attributes.Title, module); err != nil { - return errors.Wrapf(err, "expected title with format 'Some title [%s Module]', found '%s'", strings.Title(BeatName), d.Attributes.Title) + return fmt.Errorf("expected title with format 'Some title [%s Module]', found '%s': %w", strings.Title(BeatName), d.Attributes.Title, err) } } expectedIndexPattern := strings.ToLower(BeatName) + "-*" if err := checkDashboardIndexPattern(expectedIndexPattern, d); err != nil { - return errors.Wrapf(err, "expected index pattern reference '%s'", expectedIndexPattern) + return fmt.Errorf("expected index pattern reference '%s': %w", expectedIndexPattern, err) } return nil } @@ -339,7 +343,7 @@ func checkTitle(re *regexp.Regexp, title string, module string) error { } beatTitle := strings.Title(BeatName) if match[1] != beatTitle { - return errors.Errorf("expected: '%s', found: '%s'", beatTitle, match[1]) + return fmt.Errorf("expected: '%s', found: '%s'", beatTitle, match[1]) } // Compare case insensitive, and ignore spaces and underscores in module names @@ -347,7 +351,7 @@ func checkTitle(re *regexp.Regexp, title string, module string) error { expectedModule := replacer.Replace(strings.ToLower(module)) foundModule := replacer.Replace(strings.ToLower(match[2])) if expectedModule != foundModule { - return errors.Errorf("expected module name (%s), found '%s'", module, match[2]) + return fmt.Errorf("expected module name (%s), found '%s'", module, match[2]) } return nil } @@ -355,22 +359,22 @@ func checkTitle(re *regexp.Regexp, title string, module string) error { func checkDashboardIndexPattern(expectedIndex string, o *DashboardObject) error { if objectMeta := o.Attributes.KibanaSavedObjectMeta; objectMeta != nil { if index := objectMeta.SearchSourceJSON.Index; index != nil && *index != expectedIndex { - return errors.Errorf("unexpected index pattern reference found in object meta: `%s` in visualization `%s`", *index, o.Attributes.Title) + return fmt.Errorf("unexpected index pattern reference found in object meta: `%s` in visualization `%s`", *index, o.Attributes.Title) } } if visState := o.Attributes.VisState; visState != nil { for _, control := range visState.Params.Controls { if index := control.IndexPattern; index != nil && *index != expectedIndex { - return errors.Errorf("unexpected index pattern reference found in visualization state: `%s` in visualization `%s`", *index, o.Attributes.Title) + return fmt.Errorf("unexpected index pattern reference found in visualization state: `%s` in visualization `%s`", *index, o.Attributes.Title) } } if index := visState.Params.IndexPattern; index != nil && *index != expectedIndex { - return errors.Errorf("unexpected index pattern reference found in visualization state params: `%s` in visualization `%s`", *index, o.Attributes.Title) + return fmt.Errorf("unexpected index pattern reference found in visualization state params: `%s` in visualization `%s`", *index, o.Attributes.Title) } } for _, reference := range o.References { if reference.Type == "index-pattern" && reference.ID != expectedIndex { - return errors.Errorf("unexpected reference to index pattern `%s`", reference.ID) + return fmt.Errorf("unexpected reference to index pattern `%s`", reference.ID) } } return nil diff --git a/dev-tools/mage/common.go b/dev-tools/mage/common.go index 0647a13a59a..1c1ca25d95b 100644 --- a/dev-tools/mage/common.go +++ b/dev-tools/mage/common.go @@ -29,6 +29,7 @@ import ( "debug/elf" "encoding/hex" "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -50,7 +51,6 @@ import ( "github.com/magefile/mage/mg" "github.com/magefile/mage/sh" "github.com/magefile/mage/target" - "github.com/pkg/errors" ) // Expand expands the given Go text/template string. @@ -91,17 +91,17 @@ func expandTemplate(name, tmpl string, funcs template.FuncMap, args ...map[strin t, err := t.Parse(tmpl) if err != nil { if name == "inline" { - return "", errors.Wrapf(err, "failed to parse template '%v'", tmpl) + return "", fmt.Errorf("failed to parse template '%v': %w", tmpl, err) } - return "", errors.Wrap(err, "failed to parse template") + return "", fmt.Errorf("failed to parse template: %w", err) } buf := new(bytes.Buffer) if err := t.Execute(buf, joinMaps(args...)); err != nil { if name == "inline" { - return "", errors.Wrapf(err, "failed to expand template '%v'", tmpl) + return "", fmt.Errorf("failed to expand template '%v': %w", tmpl, err) } - return "", errors.Wrap(err, "failed to expand template") + return "", fmt.Errorf("failed to expand template: %w", err) } return buf.String(), nil @@ -127,7 +127,7 @@ func joinMaps(args ...map[string]interface{}) map[string]interface{} { func expandFile(src, dst string, args ...map[string]interface{}) error { tmplData, err := ioutil.ReadFile(src) if err != nil { - return errors.Wrapf(err, "failed reading from template %v", src) + return fmt.Errorf("failed reading from template %v: %w", src, err) } output, err := expandTemplate(src, string(tmplData), FuncMap, args...) @@ -141,7 +141,7 @@ func expandFile(src, dst string, args ...map[string]interface{}) error { } if err = ioutil.WriteFile(createDir(dst), []byte(output), 0644); err != nil { - return errors.Wrap(err, "failed to write rendered template") + return fmt.Errorf("failed to write rendered template: %w", err) } return nil @@ -151,7 +151,7 @@ func expandFile(src, dst string, args ...map[string]interface{}) error { func CWD(elem ...string) string { wd, err := os.Getwd() if err != nil { - panic(errors.Wrap(err, "failed to get the CWD")) + panic(fmt.Errorf("failed to get the CWD: %w", err)) } return filepath.Join(append([]string{wd}, elem...)...) } @@ -188,7 +188,7 @@ func (info *DockerInfo) IsBoot2Docker() bool { // HaveDocker returns an error if docker is unavailable. func HaveDocker() error { if _, err := GetDockerInfo(); err != nil { - return errors.Wrap(err, "docker is not available") + return fmt.Errorf("docker is not available: %w", err) } return nil } @@ -274,7 +274,7 @@ func FindReplace(file string, re *regexp.Regexp, repl string) error { // MustFindReplace invokes FindReplace and panics if an error occurs. func MustFindReplace(file string, re *regexp.Regexp, repl string) { if err := FindReplace(file, re, repl); err != nil { - panic(errors.Wrap(err, "failed to find and replace")) + panic(fmt.Errorf("failed to find and replace: %w", err)) } } @@ -285,23 +285,23 @@ func DownloadFile(url, destinationDir string) (string, error) { resp, err := http.Get(url) if err != nil { - return "", errors.Wrap(err, "http get failed") + return "", fmt.Errorf("http get failed: %w", err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return "", errors.Errorf("download failed with http status: %v", resp.StatusCode) + return "", fmt.Errorf("download failed with http status: %v", resp.StatusCode) } name := filepath.Join(destinationDir, filepath.Base(url)) f, err := os.Create(createDir(name)) if err != nil { - return "", errors.Wrap(err, "failed to create output file") + return "", fmt.Errorf("failed to create output file: %w", err) } defer f.Close() if _, err = io.Copy(f, resp.Body); err != nil { - return "", errors.Wrap(err, "failed to write file") + return "", fmt.Errorf("failed to write file: %w", err) } return name, f.Close() @@ -316,7 +316,7 @@ func Extract(sourceFile, destinationDir string) error { case ext == ".zip": return unzip(sourceFile, destinationDir) default: - return errors.Errorf("failed to extract %v, unhandled file extension", sourceFile) + return fmt.Errorf("failed to extract %v, unhandled file extension", sourceFile) } } @@ -340,7 +340,7 @@ func unzip(sourceFile, destinationDir string) error { path := filepath.Join(destinationDir, f.Name) if !strings.HasPrefix(path, destinationDir) { - return errors.Errorf("illegal file path in zip: %v", f.Name) + return fmt.Errorf("illegal file path in zip: %v", f.Name) } if f.FileInfo().IsDir() { @@ -485,7 +485,7 @@ func untar(sourceFile, destinationDir string) error { path := filepath.Join(destinationDir, header.Name) if !strings.HasPrefix(path, destinationDir) { - return errors.Errorf("illegal file path in tar: %v", header.Name) + return fmt.Errorf("illegal file path in tar: %v", header.Name) } switch header.Typeflag { @@ -511,7 +511,7 @@ func untar(sourceFile, destinationDir string) error { return err } default: - return errors.Errorf("unable to untar type=%c in file=%s", header.Typeflag, path) + return fmt.Errorf("unable to untar type=%c in file=%s", header.Typeflag, path) } } @@ -613,7 +613,7 @@ func ParallelCtx(ctx context.Context, fns ...interface{}) { wg.Wait() if len(errs) > 0 { - panic(errors.Errorf(strings.Join(errs, "\n"))) + panic(fmt.Errorf(strings.Join(errs, "\n"))) } } @@ -652,7 +652,7 @@ func FindFiles(globs ...string) ([]string, error) { for _, glob := range globs { files, err := filepath.Glob(glob) if err != nil { - return nil, errors.Wrapf(err, "failed on glob %v", glob) + return nil, fmt.Errorf("failed on glob %v: %w", glob, err) } configFiles = append(configFiles, files...) } @@ -691,7 +691,7 @@ func FindFilesRecursive(match func(path string, info os.FileInfo) bool) ([]strin func FileConcat(out string, perm os.FileMode, files ...string) error { f, err := os.OpenFile(createDir(out), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, perm) if err != nil { - return errors.Wrap(err, "failed to create file") + return fmt.Errorf("failed to create file: %w", err) } defer f.Close() @@ -735,20 +735,20 @@ func MustFileConcat(out string, perm os.FileMode, files ...string) { func VerifySHA256(file string, hash string) error { f, err := os.Open(file) if err != nil { - return errors.Wrap(err, "failed to open file for sha256 verification") + return fmt.Errorf("failed to open file for sha256 verification: %w", err) } defer f.Close() sum := sha256.New() if _, err := io.Copy(sum, f); err != nil { - return errors.Wrap(err, "failed reading from input file") + return fmt.Errorf("failed reading from input file: %w", err) } computedHash := hex.EncodeToString(sum.Sum(nil)) expectedHash := strings.TrimSpace(hash) if computedHash != expectedHash { - return errors.Errorf("SHA256 verification of %v failed. Expected=%v, "+ + return fmt.Errorf("SHA256 verification of %v failed. Expected=%v, "+ "but computed=%v", f.Name(), expectedHash, computedHash) } log.Println("SHA256 OK:", f.Name()) @@ -761,13 +761,13 @@ func VerifySHA256(file string, hash string) error { func CreateSHA512File(file string) error { f, err := os.Open(file) if err != nil { - return errors.Wrap(err, "failed to open file for sha512 summing") + return fmt.Errorf("failed to open file for sha512 summing: %w", err) } defer f.Close() sum := sha512.New() if _, err := io.Copy(sum, f); err != nil { - return errors.Wrap(err, "failed reading from input file") + return fmt.Errorf("failed reading from input file: %w", err) } computedHash := hex.EncodeToString(sum.Sum(nil)) @@ -856,7 +856,7 @@ func XPackBeatDir(path ...string) string { func LibbeatDir(path ...string) string { esBeatsDir, err := ElasticBeatsDir() if err != nil { - panic(errors.Wrap(err, "failed determine libbeat dir location")) + panic(fmt.Errorf("failed determine libbeat dir location: %w", err)) } return filepath.Join(append([]string{esBeatsDir, "libbeat"}, path...)...) @@ -873,7 +873,7 @@ func CreateDir(file string) string { // Create the output directory. if dir := filepath.Dir(file); dir != "." { if err := os.MkdirAll(dir, 0755); err != nil { - panic(errors.Wrapf(err, "failed to create parent dir for %v", file)) + panic(fmt.Errorf("failed to create parent dir for %v: %w", file, err)) } } return file @@ -895,7 +895,7 @@ func ParseVersion(version string) (major, minor, patch int, err error) { names := parseVersionRegex.SubexpNames() matches := parseVersionRegex.FindStringSubmatch(version) if len(matches) == 0 { - err = errors.Errorf("failed to parse version '%v'", version) + err = fmt.Errorf("failed to parse version '%v'", version) return } diff --git a/dev-tools/mage/config.go b/dev-tools/mage/config.go index adfa4c74541..822e7f0f163 100644 --- a/dev-tools/mage/config.go +++ b/dev-tools/mage/config.go @@ -19,6 +19,7 @@ package mage import ( "bytes" + "errors" "fmt" "io/ioutil" "os" @@ -30,7 +31,6 @@ import ( "github.com/magefile/mage/mg" - "github.com/pkg/errors" "gopkg.in/yaml.v2" ) @@ -98,7 +98,7 @@ func Config(types ConfigFileType, args ConfigFileParams, targetDir string) error if types.IsShort() { file := filepath.Join(targetDir, BeatName+".yml") if err := makeConfigTemplate(file, 0600, args, ShortConfigType); err != nil { - return errors.Wrap(err, "failed making short config") + return fmt.Errorf("failed making short config: %w", err) } } @@ -106,7 +106,7 @@ func Config(types ConfigFileType, args ConfigFileParams, targetDir string) error if types.IsReference() { file := filepath.Join(targetDir, BeatName+".reference.yml") if err := makeConfigTemplate(file, 0644, args, ReferenceConfigType); err != nil { - return errors.Wrap(err, "failed making reference config") + return fmt.Errorf("failed making reference config: %w", err) } } @@ -114,7 +114,7 @@ func Config(types ConfigFileType, args ConfigFileParams, targetDir string) error if types.IsDocker() { file := filepath.Join(targetDir, BeatName+".docker.yml") if err := makeConfigTemplate(file, 0600, args, DockerConfigType); err != nil { - return errors.Wrap(err, "failed making docker config") + return fmt.Errorf("failed making docker config: %w", err) } } @@ -136,7 +136,7 @@ func makeConfigTemplate(destination string, mode os.FileMode, confParams ConfigF confFile = confParams.Docker tmplParams = map[string]interface{}{"Docker": true} default: - panic(errors.Errorf("Invalid config file type: %v", typ)) + panic(fmt.Errorf("Invalid config file type: %v", typ)) } // Build the dependencies. @@ -192,18 +192,18 @@ func makeConfigTemplate(destination string, mode os.FileMode, confParams ConfigF var err error for _, templateGlob := range confParams.Templates { if tmpl, err = tmpl.ParseGlob(templateGlob); err != nil { - return errors.Wrapf(err, "failed to parse config templates in %q", templateGlob) + return fmt.Errorf("failed to parse config templates in %q: %w", templateGlob, err) } } data, err := ioutil.ReadFile(confFile.Template) if err != nil { - return errors.Wrapf(err, "failed to read config template %q", confFile.Template) + return fmt.Errorf("failed to read config template %q: %w", confFile.Template, err) } tmpl, err = tmpl.Parse(string(data)) if err != nil { - return errors.Wrap(err, "failed to parse template") + return fmt.Errorf("failed to parse template: %w", err) } out, err := os.OpenFile(CreateDir(destination), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, mode) @@ -213,7 +213,7 @@ func makeConfigTemplate(destination string, mode os.FileMode, confParams ConfigF defer out.Close() if err = tmpl.Execute(out, EnvMap(params)); err != nil { - return errors.Wrapf(err, "failed building %v", destination) + return fmt.Errorf("failed building %v: %w", destination, err) } return nil diff --git a/dev-tools/mage/copy.go b/dev-tools/mage/copy.go index c4774539871..fc9aa854823 100644 --- a/dev-tools/mage/copy.go +++ b/dev-tools/mage/copy.go @@ -18,13 +18,12 @@ package mage import ( + "fmt" "io" "io/ioutil" "os" "path/filepath" "regexp" - - "github.com/pkg/errors" ) // Copy copies a file or a directory (recursively) and preserves the permissions. @@ -38,7 +37,7 @@ func CopyFile(src, dest string) error { copy := &CopyTask{Source: src, Dest: dest} info, err := os.Stat(src) if err != nil { - return errors.Wrapf(err, "copy failed: cannot stat source file %v", src) + return fmt.Errorf("copy failed: cannot stat source file %v: %w", src, err) } return copy.fileCopy(src, dest, info) } @@ -56,22 +55,27 @@ type CopyTask struct { // Execute executes the copy and returns an error of there is a failure. func (t *CopyTask) Execute() error { if err := t.init(); err != nil { - return errors.Wrap(err, "copy failed") + return fmt.Errorf("copy failed: %w", err) } info, err := os.Stat(t.Source) if err != nil { - return errors.Wrapf(err, "copy failed: cannot stat source file %v", t.Source) + return fmt.Errorf("copy failed: cannot stat source file %v: %w", t.Source, err) } - return errors.Wrap(t.recursiveCopy(t.Source, t.Dest, info), "copy failed") + err = t.recursiveCopy(t.Source, t.Dest, info) + if err != nil { + return fmt.Errorf("copy failed: %w", err) + } + + return nil } func (t *CopyTask) init() error { for _, excl := range t.Exclude { re, err := regexp.Compile(excl) if err != nil { - return errors.Wrapf(err, "bad exclude pattern %v", excl) + return fmt.Errorf("bad exclude pattern %v: %w", excl, err) } t.excludes = append(t.excludes, re) } @@ -106,7 +110,7 @@ func (t *CopyTask) fileCopy(src, dest string, info os.FileInfo) error { defer srcFile.Close() if !info.Mode().IsRegular() { - return errors.Errorf("failed to copy source file because it is not a " + + return fmt.Errorf("failed to copy source file because it is not a " + "regular file") } @@ -137,19 +141,19 @@ func (t *CopyTask) dirCopy(src, dest string, info os.FileInfo) error { mode = info.Mode() } if err := os.MkdirAll(dest, mode&os.ModePerm); err != nil { - return errors.Wrap(err, "failed creating dirs") + return fmt.Errorf("failed creating dirs: %w", err) } contents, err := ioutil.ReadDir(src) if err != nil { - return errors.Wrapf(err, "failed to read dir %v", src) + return fmt.Errorf("failed to read dir %v: %w", src, err) } for _, info := range contents { srcFile := filepath.Join(src, info.Name()) destFile := filepath.Join(dest, info.Name()) if err = t.recursiveCopy(srcFile, destFile, info); err != nil { - return errors.Wrapf(err, "failed to copy %v to %v", srcFile, destFile) + return fmt.Errorf("failed to copy %v to %v: %w", srcFile, destFile, err) } } diff --git a/dev-tools/mage/dockerbuilder.go b/dev-tools/mage/dockerbuilder.go index b9c48c2c35f..2066670dc80 100644 --- a/dev-tools/mage/dockerbuilder.go +++ b/dev-tools/mage/dockerbuilder.go @@ -20,6 +20,7 @@ package mage import ( "bytes" "compress/gzip" + "errors" "fmt" "io" "os" @@ -29,7 +30,6 @@ import ( "time" "github.com/magefile/mage/sh" - "github.com/pkg/errors" ) type dockerBuilder struct { @@ -59,7 +59,7 @@ func newDockerBuilder(spec PackageSpec) (*dockerBuilder, error) { func (b *dockerBuilder) Build() error { if err := os.RemoveAll(b.buildDir); err != nil { - return errors.Wrapf(err, "failed to clean existing build directory %s", b.buildDir) + return fmt.Errorf("failed to clean existing build directory %s: %w", b.buildDir, err) } if err := b.copyFiles(); err != nil { @@ -67,7 +67,7 @@ func (b *dockerBuilder) Build() error { } if err := b.prepareBuild(); err != nil { - return errors.Wrap(err, "failed to prepare build") + return fmt.Errorf("failed to prepare build: %w", err) } tag, err := b.dockerBuild() @@ -80,11 +80,11 @@ func (b *dockerBuilder) Build() error { tries -= 1 } if err != nil { - return errors.Wrap(err, "failed to build docker") + return fmt.Errorf("failed to build docker: %w", err) } if err := b.dockerSave(tag); err != nil { - return errors.Wrap(err, "failed to save docker as artifact") + return fmt.Errorf("failed to save docker as artifact: %w", err) } return nil @@ -114,7 +114,7 @@ func (b *dockerBuilder) copyFiles() error { if f.SkipOnMissing && errors.Is(err, os.ErrNotExist) { continue } - return errors.Wrapf(err, "failed to copy from %s to %s", f.Source, target) + return fmt.Errorf("failed to copy from %s to %s: %w", f.Source, target, err) } } return nil @@ -141,7 +141,7 @@ func (b *dockerBuilder) prepareBuild() error { err = b.ExpandFile(path, target, data) if err != nil { - return errors.Wrapf(err, "expanding template '%s' to '%s'", path, target) + return fmt.Errorf("expanding template '%s' to '%s': %w", path, target, err) } } return nil @@ -182,7 +182,7 @@ func (b *dockerBuilder) expandDockerfile(templatesDir string, data map[string]in path := filepath.Join(templatesDir, file.source) err := b.ExpandFile(path, target, data) if err != nil { - return errors.Wrapf(err, "expanding template '%s' to '%s'", path, target) + return fmt.Errorf("expanding template '%s' to '%s': %w", path, target, err) } } @@ -251,9 +251,13 @@ func (b *dockerBuilder) dockerSave(tag string) error { if err = cmd.Wait(); err != nil { if errmsg := strings.TrimSpace(stderr.String()); errmsg != "" { - err = errors.Wrap(errors.New(errmsg), err.Error()) + err = fmt.Errorf(err.Error()+": %w", errors.New(errmsg)) } return err } - return errors.Wrap(CreateSHA512File(outputFile), "failed to create .sha512 file") + + if err = CreateSHA512File(outputFile); err != nil { + return fmt.Errorf("failed to create .sha512 file: %w", err) + } + return nil } diff --git a/dev-tools/mage/fields.go b/dev-tools/mage/fields.go index aff1a3be28c..b90e4e22fa5 100644 --- a/dev-tools/mage/fields.go +++ b/dev-tools/mage/fields.go @@ -18,10 +18,9 @@ package mage import ( + "fmt" "path/filepath" - "github.com/pkg/errors" - "github.com/magefile/mage/sh" ) @@ -229,6 +228,6 @@ func toLibbeatLicenseName(name string) string { case "Elastic License": return "Elastic" default: - panic(errors.Errorf("invalid license name '%v'", name)) + panic(fmt.Errorf("invalid license name '%v'", name)) } } diff --git a/dev-tools/mage/gomod.go b/dev-tools/mage/gomod.go index 11ad159b841..de67f94797a 100644 --- a/dev-tools/mage/gomod.go +++ b/dev-tools/mage/gomod.go @@ -18,11 +18,10 @@ package mage import ( + "fmt" "os" "path/filepath" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/dev-tools/mage/gotool" ) @@ -38,7 +37,7 @@ func CopyFilesToVendor(vendorFolder string, modulesToCopy []CopyModule) error { for _, p := range modulesToCopy { path, err := gotool.ListModuleCacheDir(p.Name) if err != nil { - return errors.Wrapf(err, "error while looking up cached dir of module: %s", p.Name) + return fmt.Errorf("error while looking up cached dir of module: %s: %w", p.Name, err) } for _, f := range p.FilesToCopy { @@ -47,7 +46,7 @@ func CopyFilesToVendor(vendorFolder string, modulesToCopy []CopyModule) error { copyTask := &CopyTask{Source: from, Dest: to, Mode: 0600, DirMode: os.ModeDir | 0750} err = copyTask.Execute() if err != nil { - return errors.Wrapf(err, "error while copying file from %s to %s", from, to) + return fmt.Errorf("error while copying file from %s to %s: %w", from, to, err) } } } diff --git a/dev-tools/mage/gotest.go b/dev-tools/mage/gotest.go index 64a658ee77f..082d9748a39 100644 --- a/dev-tools/mage/gotest.go +++ b/dev-tools/mage/gotest.go @@ -138,7 +138,7 @@ func DefaultTestBinaryArgs() TestBinaryArgs { } // GoTestIntegrationForModule executes the Go integration tests sequentially. -// Currently all test cases must be present under "./module" directory. +// Currently, all test cases must be present under "./module" directory. // // Motivation: previous implementation executed all integration tests at once, // causing high CPU load, high memory usage and resulted in timeouts. @@ -307,12 +307,15 @@ func GoTest(ctx context.Context, params GoTestArgs) error { // Generate a HTML code coverage report. var htmlCoverReport string if params.CoverageProfileFile != "" { + htmlCoverReport = strings.TrimSuffix(params.CoverageProfileFile, filepath.Ext(params.CoverageProfileFile)) + ".html" + coverToHTML := sh.RunCmd("go", "tool", "cover", "-html="+params.CoverageProfileFile, "-o", htmlCoverReport) - if err = coverToHTML(); err != nil { + + if err := coverToHTML(); err != nil { return fmt.Errorf("failed to write HTML code coverage report: %w", err) } } diff --git a/dev-tools/mage/gotest_test.go b/dev-tools/mage/gotest_test.go index edbb1e549f9..49784c0fd6c 100644 --- a/dev-tools/mage/gotest_test.go +++ b/dev-tools/mage/gotest_test.go @@ -210,41 +210,41 @@ var wantTestAssertOutput = `(?sm: Error Trace: gotest_test.go:\d+.* Error: Should be true.* Test: TestGoTest_Helper_AssertOutput/assert_fails.* - --- FAIL: TestGoTest_Helper_AssertOutput/assert_fails .* + === FAIL: dev-tools/mage TestGoTest_Helper_AssertOutput/assert_with_message .* gotest_test.go:\d+:.* Error Trace: gotest_test.go:\d+.* Error: Should be true.* Test: TestGoTest_Helper_AssertOutput/assert_with_message.* Messages: My message.* - --- FAIL: TestGoTest_Helper_AssertOutput/assert_with_message .* + === FAIL: dev-tools/mage TestGoTest_Helper_AssertOutput/assert_with_messagef .* gotest_test.go:\d+:.* Error Trace: gotest_test.go:\d+.* Error: Should be true.* Test: TestGoTest_Helper_AssertOutput/assert_with_messagef.* Messages: My message with arguments: 42.* - --- FAIL: TestGoTest_Helper_AssertOutput/assert_with_messagef .* + === FAIL: dev-tools/mage TestGoTest_Helper_AssertOutput/require_fails .* gotest_test.go:\d+:.* Error Trace: gotest_test.go:\d+.* Error: Should be true.* Test: TestGoTest_Helper_AssertOutput/require_fails.* - --- FAIL: TestGoTest_Helper_AssertOutput/require_fails .* + === FAIL: dev-tools/mage TestGoTest_Helper_AssertOutput/require_with_message .* gotest_test.go:\d+:.* Error Trace: gotest_test.go:\d+.* Error: Should be true.* Test: TestGoTest_Helper_AssertOutput/require_with_message.* Messages: My message.* - --- FAIL: TestGoTest_Helper_AssertOutput/require_with_message .* + === FAIL: dev-tools/mage TestGoTest_Helper_AssertOutput/require_with_messagef .* gotest_test.go:\d+:.* Error Trace: gotest_test.go:\d+.* Error: Should be true.* Test: TestGoTest_Helper_AssertOutput/require_with_messagef.* Messages: My message with arguments: 42.* - --- FAIL: TestGoTest_Helper_AssertOutput/require_with_messagef .* + === FAIL: dev-tools/mage TestGoTest_Helper_AssertOutput/equals_map .* gotest_test.go:\d+:.* Error Trace: gotest_test.go:\d+.* @@ -306,17 +306,17 @@ var wantTestLogOutput = `(?sm: gotest_test.go:\d+: printf style log message: 42.* gotest_test.go:\d+: Log should fail.* gotest_test.go:\d+: Log should fail with printf style log: 23.* - --- FAIL: TestGoTest_Helper_LogOutput/on_error.* + === FAIL: dev-tools/mage TestGoTest_Helper_LogOutput/on_fatal.* gotest_test.go:\d+: Log message should be printed.* gotest_test.go:\d+: printf style log message: 42.* gotest_test.go:\d+: Log should fail.* - --- FAIL: TestGoTest_Helper_LogOutput/on_fatal.* + === FAIL: dev-tools/mage TestGoTest_Helper_LogOutput/on_fatalf.* gotest_test.go:\d+: Log message should be printed.* gotest_test.go:\d+: printf style log message: 42.* gotest_test.go:\d+: Log should fail with printf style log: 42.* - --- FAIL: TestGoTest_Helper_LogOutput/on_fatalf.* + === FAIL: dev-tools/mage TestGoTest_Helper_LogOutput/with_newlines.* gotest_test.go:\d+: Log.* message.* @@ -336,7 +336,7 @@ var wantTestLogOutput = `(?sm: style.* log:.* 42.* - --- FAIL: TestGoTest_Helper_LogOutput/with_newlines.* + === FAIL: dev-tools/mage TestGoTest_Helper_LogOutput.* DONE 5 tests, 5 failures in.* )` diff --git a/dev-tools/mage/keychain.go b/dev-tools/mage/keychain.go index c66bec16d35..4435604ea57 100644 --- a/dev-tools/mage/keychain.go +++ b/dev-tools/mage/keychain.go @@ -18,6 +18,7 @@ package mage import ( + "fmt" "log" "regexp" "strconv" @@ -25,7 +26,6 @@ import ( "sync" "github.com/magefile/mage/sh" - "github.com/pkg/errors" ) var _appleKeychain = &appleKeychain{} @@ -138,19 +138,19 @@ func getAppleSigningInfo() (*AppleSigningInfo, error) { } if len(install) > 1 { - return nil, errors.Errorf("found multiple installer signing identities "+ + return nil, fmt.Errorf("found multiple installer signing identities "+ "that match '%v'. Set a more specific APPLE_SIGNING_IDENTITY_INSTALLER "+ "value that will select one of %+v", identityInstaller, install) } if len(app) > 1 { - return nil, errors.Errorf("found multiple installer signing identities "+ + return nil, fmt.Errorf("found multiple installer signing identities "+ "that match '%v'. Set a more specific APPLE_SIGNING_IDENTITY_APP "+ "value that will select one of %+v", identityApp, app) } if len(install) == 0 || len(app) == 0 { - return nil, errors.Errorf("apple signing was requested with " + + return nil, fmt.Errorf("apple signing was requested with " + "APPLE_SIGNING_ENABLED=true, but the required signing identities " + "for app and installer were not found") } diff --git a/dev-tools/mage/kibana.go b/dev-tools/mage/kibana.go index a556e5b5429..47e67a24a50 100644 --- a/dev-tools/mage/kibana.go +++ b/dev-tools/mage/kibana.go @@ -18,11 +18,11 @@ package mage import ( + "errors" + "fmt" "io/fs" "os" "path/filepath" - - "github.com/pkg/errors" ) const kibanaBuildDir = "build/kibana" @@ -81,7 +81,7 @@ func PackageKibanaDashboardsFromBuildDir() { case Deb, RPM: pkgArgs.Spec.ReplaceFile("/usr/share/{{.BeatName}}/kibana", kibanaDashboards) default: - panic(errors.Errorf("unhandled package type: %v", pkgType)) + panic(fmt.Errorf("unhandled package type: %v", pkgType)) } break } diff --git a/dev-tools/mage/kubernetes/kuberemote.go b/dev-tools/mage/kubernetes/kuberemote.go index 3bb540a42da..8e9d9897d44 100644 --- a/dev-tools/mage/kubernetes/kuberemote.go +++ b/dev-tools/mage/kubernetes/kuberemote.go @@ -35,7 +35,6 @@ import ( "strings" "time" - "github.com/pkg/errors" "golang.org/x/crypto/ssh" apiv1 "k8s.io/api/core/v1" @@ -97,7 +96,7 @@ func NewKubeRemote(kubeconfig string, namespace string, name string, workDir str // Run runs the command remotely on the kubernetes cluster. func (r *KubeRemote) Run(env map[string]string, stdout io.Writer, stderr io.Writer, args ...string) error { if err := r.syncSSHKey(); err != nil { - return errors.Wrap(err, "failed to sync SSH secret") + return fmt.Errorf("failed to sync SSH secret: %w", err) } defer r.deleteSSHKey() if err := r.syncServiceAccount(); err != nil { @@ -106,21 +105,21 @@ func (r *KubeRemote) Run(env map[string]string, stdout io.Writer, stderr io.Writ defer r.deleteServiceAccount() _, err := r.createPod(env, args...) if err != nil { - return errors.Wrap(err, "failed to create execute pod") + return fmt.Errorf("failed to create execute pod: %w", err) } defer r.deletePod() // wait for SSH to be up inside the init container. _, err = r.waitForPod(5*time.Minute, podInitReady) if err != nil { - return errors.Wrap(err, "execute pod init container never started") + return fmt.Errorf("execute pod init container never started: %w", err) } time.Sleep(1 * time.Second) // SSH inside of container can take a moment // forward the SSH port so rsync can be ran. randomPort, err := getFreePort() if err != nil { - return errors.Wrap(err, "failed to find a free port") + return fmt.Errorf("failed to find a free port: %w", err) } stopChannel := make(chan struct{}, 1) readyChannel := make(chan struct{}, 1) @@ -140,19 +139,19 @@ func (r *KubeRemote) Run(env map[string]string, stdout io.Writer, stderr io.Writ // wait for exec container to be running _, err = r.waitForPod(5*time.Minute, containerRunning("exec")) if err != nil { - return errors.Wrap(err, "execute pod container never started") + return fmt.Errorf("execute pod container never started: %w", err) } // stream the logs of the container err = r.streamLogs("exec", stdout) if err != nil { - return errors.Wrap(err, "failed to stream the logs") + return fmt.Errorf("failed to stream the logs: %w", err) } // wait for exec container to be completely done pod, err := r.waitForPod(30*time.Second, podDone) if err != nil { - return errors.Wrap(err, "execute pod didn't terminate after 30 seconds of log stream") + return fmt.Errorf("execute pod didn't terminate after 30 seconds of log stream: %w", err) } // return error on failure @@ -199,18 +198,18 @@ func (r *KubeRemote) syncServiceAccount() error { createServiceAccountManifest(r.svcAccName), metav1.CreateOptions{}) if err != nil { - return errors.Wrap(err, "failed to create service account") + return fmt.Errorf("failed to create service account: %w", err) } _, err = r.cs.RbacV1().ClusterRoles().Create(ctx, createClusterRoleManifest(r.name), metav1.CreateOptions{}) if err != nil { - return errors.Wrap(err, "failed to create cluster role") + return fmt.Errorf("failed to create cluster role: %w", err) } _, err = r.cs.RbacV1().ClusterRoleBindings().Create( ctx, createClusterRoleBindingManifest(r.name, r.namespace, r.svcAccName), metav1.CreateOptions{}) if err != nil { - return errors.Wrap(err, "failed to create cluster role binding") + return fmt.Errorf("failed to create cluster role binding: %w", err) } return nil } diff --git a/dev-tools/mage/kubernetes/kubernetes.go b/dev-tools/mage/kubernetes/kubernetes.go index 9b25f316b2c..e93632d7b65 100644 --- a/dev-tools/mage/kubernetes/kubernetes.go +++ b/dev-tools/mage/kubernetes/kubernetes.go @@ -29,7 +29,6 @@ import ( "time" "github.com/magefile/mage/mg" - "github.com/pkg/errors" "github.com/elastic/beats/v7/dev-tools/mage" ) @@ -106,14 +105,14 @@ func (d *KubernetesIntegrationTester) Test(dir string, mageTarget string, env ma // Apply the manifest from the dir. This is the requirements for the tests that will // run inside the cluster. if err := KubectlApply(env, stdOut, stdErr, manifestPath); err != nil { - return errors.Wrapf(err, "failed to apply manifest %s", manifestPath) + return fmt.Errorf("failed to apply manifest %s: %w", manifestPath, err) } defer func() { if mg.Verbose() { fmt.Println(">> Deleting module manifest from cluster...") } if err := KubectlDelete(env, stdOut, stdErr, manifestPath); err != nil { - log.Printf("%s", errors.Wrapf(err, "failed to apply manifest %s", manifestPath)) + log.Printf("%s", fmt.Errorf("failed to apply manifest %s: %w", manifestPath, err)) } }() @@ -164,7 +163,7 @@ func waitKubeStateMetricsReadiness(env map[string]string, stdOut, stdErr io.Writ break } if readyAttempts > checkKubeStateMetricsReadyAttempts { - return errors.Wrapf(err, "Timeout waiting for kube-state-metrics") + return fmt.Errorf("Timeout waiting for kube-state-metrics: %w", err) } time.Sleep(6 * time.Second) readyAttempts += 1 @@ -177,12 +176,12 @@ func waitKubeStateMetricsReadiness(env map[string]string, stdOut, stdErr io.Writ func kubernetesClusterName() string { commit, err := mage.CommitHash() if err != nil { - panic(errors.Wrap(err, "failed to construct kind cluster name")) + panic(fmt.Errorf("failed to construct kind cluster name: %w", err)) } version, err := mage.BeatQualifiedVersion() if err != nil { - panic(errors.Wrap(err, "failed to construct kind cluster name")) + panic(fmt.Errorf("failed to construct kind cluster name: %w", err)) } version = strings.NewReplacer(".", "-").Replace(version) @@ -202,7 +201,7 @@ func kubernetesClusterName() string { // Note that underscores, in particular, are not permitted. matched, err := regexp.MatchString(subDomainPattern, clusterName) if err != nil { - panic(errors.Wrap(err, "error while validating kind cluster name")) + panic(fmt.Errorf("error while validating kind cluster name: %w", err)) } if !matched { panic("constructed invalid kind cluster name") diff --git a/dev-tools/mage/modules.go b/dev-tools/mage/modules.go index a65c2c2a121..14b7f7ab118 100644 --- a/dev-tools/mage/modules.go +++ b/dev-tools/mage/modules.go @@ -25,7 +25,6 @@ import ( "strings" "github.com/joeshaw/multierror" - "github.com/pkg/errors" "gopkg.in/yaml.v2" ) @@ -131,11 +130,11 @@ func loadModulesD() (modules map[string][]moduleDefinition, err error) { for _, file := range files { contents, err := ioutil.ReadFile(file) if err != nil { - return nil, errors.Wrapf(err, "reading %s", file) + return nil, fmt.Errorf("reading %s: %w", file, err) } var cfg []moduleDefinition if err = yaml.Unmarshal(contents, &cfg); err != nil { - return nil, errors.Wrapf(err, "parsing %s as YAML", file) + return nil, fmt.Errorf("parsing %s as YAML: %w", file, err) } modules[file] = cfg } diff --git a/dev-tools/mage/pkgspecs.go b/dev-tools/mage/pkgspecs.go index 7a029238b61..9d0a6b339b4 100644 --- a/dev-tools/mage/pkgspecs.go +++ b/dev-tools/mage/pkgspecs.go @@ -19,11 +19,11 @@ package mage import ( "bytes" + "fmt" "io/ioutil" "log" "path/filepath" - "github.com/pkg/errors" "gopkg.in/yaml.v2" ) @@ -72,12 +72,12 @@ func MustUsePackaging(specName, specFile string) { func LoadNamedSpec(name string, files ...string) error { specs, err := LoadSpecs(files...) if err != nil { - return errors.Wrap(err, "failed to load spec file") + return fmt.Errorf("failed to load spec file: %w", err) } packages, found := specs[name] if !found { - return errors.Errorf("%v not found in package specs", name) + return fmt.Errorf("%v not found in package specs", name) } log.Printf("%v package spec loaded from %v", name, files) @@ -91,7 +91,7 @@ func LoadSpecs(files ...string) (map[string][]OSPackageArgs, error) { for _, file := range files { d, err := ioutil.ReadFile(file) if err != nil { - return nil, errors.Wrap(err, "failed to read from spec file") + return nil, fmt.Errorf("failed to read from spec file: %w", err) } data = append(data, d) } @@ -102,7 +102,7 @@ func LoadSpecs(files ...string) (map[string][]OSPackageArgs, error) { var packages PackageYAML if err := yaml.Unmarshal(bytes.Join(data, []byte{'\n'}), &packages); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal spec data") + return nil, fmt.Errorf("failed to unmarshal spec data: %w", err) } return packages.Specs, nil diff --git a/dev-tools/mage/pkgtypes.go b/dev-tools/mage/pkgtypes.go index 7a775850cf3..249612f8feb 100644 --- a/dev-tools/mage/pkgtypes.go +++ b/dev-tools/mage/pkgtypes.go @@ -36,7 +36,6 @@ import ( "github.com/magefile/mage/mg" "github.com/magefile/mage/sh" "github.com/mitchellh/hashstructure" - "github.com/pkg/errors" ) const ( @@ -185,19 +184,19 @@ var OSArchNames = map[string]map[PackageType]map[string]string{ func getOSArchName(platform BuildPlatform, t PackageType) (string, error) { names, found := OSArchNames[platform.GOOS()] if !found { - return "", errors.Errorf("arch names for os=%v are not defined", + return "", fmt.Errorf("arch names for os=%v are not defined", platform.GOOS()) } archMap, found := names[t] if !found { - return "", errors.Errorf("arch names for %v on os=%v are not defined", + return "", fmt.Errorf("arch names for %v on os=%v are not defined", t, platform.GOOS()) } arch, found := archMap[platform.Arch()] if !found { - return "", errors.Errorf("arch name associated with %v for %v on "+ + return "", fmt.Errorf("arch name associated with %v for %v on "+ "os=%v is not defined", platform.Arch(), t, platform.GOOS()) } @@ -241,7 +240,7 @@ func (typ *PackageType) UnmarshalText(text []byte) error { case "docker": *typ = Docker default: - return errors.Errorf("unknown package type: %v", string(text)) + return fmt.Errorf("unknown package type: %v", string(text)) } return nil } @@ -286,7 +285,7 @@ func (typ PackageType) Build(spec PackageSpec) error { case Docker: return PackageDocker(spec) default: - return errors.Errorf("unknown package type: %v", typ) + return fmt.Errorf("unknown package type: %v", typ) } } @@ -309,7 +308,7 @@ func (s PackageSpec) Clone() PackageSpec { func (s PackageSpec) ReplaceFile(target string, file PackageFile) { _, found := s.Files[target] if !found { - panic(errors.Errorf("failed to ReplaceFile because target=%v does not exist", target)) + panic(fmt.Errorf("failed to ReplaceFile because target=%v does not exist", target)) } s.Files[target] = file @@ -407,7 +406,7 @@ func (s PackageSpec) Evaluate(args ...map[string]interface{}) PackageSpec { // Execute the dependency if it exists. if f.Dep != nil { if err := f.Dep(s); err != nil { - panic(errors.Wrapf(err, "failed executing package file dependency for target=%v", target)) + panic(fmt.Errorf("failed executing package file dependency for target=%v: %w", target, err)) } } @@ -422,20 +421,20 @@ func (s PackageSpec) Evaluate(args ...map[string]interface{}) PackageSpec { case f.Content != "": content, err := s.Expand(f.Content) if err != nil { - panic(errors.Wrapf(err, "failed to expand content template for target=%v", target)) + panic(fmt.Errorf("failed to expand content template for target=%v: %w", target, err)) } f.Source = filepath.Join(s.packageDir, filepath.Base(f.Target)) if err = ioutil.WriteFile(CreateDir(f.Source), []byte(content), 0644); err != nil { - panic(errors.Wrapf(err, "failed to write file containing content for target=%v", target)) + panic(fmt.Errorf("failed to write file containing content for target=%v: %w", target, err)) } case f.Template != "": f.Source = filepath.Join(s.packageDir, filepath.Base(f.Template)) if err := s.ExpandFile(f.Template, CreateDir(f.Source)); err != nil { - panic(errors.Wrapf(err, "failed to expand template file for target=%v", target)) + panic(fmt.Errorf("failed to expand template file for target=%v: %w", target, err)) } default: - panic(errors.Errorf("package file with target=%v must have either source, content, or template", target)) + panic(fmt.Errorf("package file with target=%v must have either source, content, or template", target)) } evaluatedFiles[f.Target] = f @@ -459,7 +458,7 @@ func (s PackageSpec) ImageName() (string, error) { if name, _ := s.ExtraVars["image_name"]; name != "" { imageName, err := s.Expand(name) if err != nil { - return "", errors.Wrapf(err, "failed to expand image_name") + return "", fmt.Errorf("failed to expand image_name: %w", err) } return imageName, nil } @@ -481,11 +480,11 @@ func copyInstallScript(spec PackageSpec, script string, local *string) error { } if err := spec.ExpandFile(script, createDir(*local)); err != nil { - return errors.Wrap(err, "failed to copy install script to package dir") + return fmt.Errorf("failed to copy install script to package dir: %w", err) } if err := os.Chmod(*local, 0755); err != nil { - return errors.Wrap(err, "failed to chmod install script") + return fmt.Errorf("failed to chmod install script: %w", err) } return nil @@ -494,7 +493,7 @@ func copyInstallScript(spec PackageSpec, script string, local *string) error { func (s PackageSpec) hash() string { h, err := hashstructure.Hash(s, nil) if err != nil { - panic(errors.Wrap(err, "failed to compute hash of spec")) + panic(fmt.Errorf("failed to compute hash of spec: %w", err)) } hash := strconv.FormatUint(h, 10) @@ -550,7 +549,7 @@ func PackageZip(spec PackageSpec) error { if err := addFileToZip(w, baseDir, pkgFile); err != nil { p, _ := filepath.Abs(pkgFile.Source) - return errors.Wrapf(err, "failed adding file=%+v to zip", p) + return fmt.Errorf("failed adding file=%+v to zip: %w", p, err) } } @@ -570,7 +569,7 @@ func PackageZip(spec PackageSpec) error { // Write the zip file. if err := ioutil.WriteFile(CreateDir(spec.OutputFile), buf.Bytes(), 0644); err != nil { - return errors.Wrap(err, "failed to write zip file") + return fmt.Errorf("failed to write zip file: %w", err) } // Any packages beginning with "tmp-" are temporary by nature so don't have @@ -579,7 +578,10 @@ func PackageZip(spec PackageSpec) error { return nil } - return errors.Wrap(CreateSHA512File(spec.OutputFile), "failed to create .sha512 file") + if err := CreateSHA512File(spec.OutputFile); err != nil { + return fmt.Errorf("failed to create .sha512 file: %w", err) + } + return nil } // PackageTarGz packages a gzipped tar file. @@ -619,7 +621,7 @@ func PackageTarGz(spec PackageSpec) error { } if err := addFileToTar(w, baseDir, pkgFile); err != nil { - return errors.Wrapf(err, "failed adding file=%+v to tar", pkgFile) + return fmt.Errorf("failed adding file=%+v to tar: %w", pkgFile, err) } } @@ -636,7 +638,7 @@ func PackageTarGz(spec PackageSpec) error { defer os.RemoveAll(tmpdir) if err := addSymlinkToTar(tmpdir, w, baseDir, pkgFile); err != nil { - return errors.Wrapf(err, "failed adding file=%+v to tar", pkgFile) + return fmt.Errorf("failed adding file=%+v to tar: %w", pkgFile, err) } } @@ -679,7 +681,11 @@ func PackageTarGz(spec PackageSpec) error { return nil } - return errors.Wrap(CreateSHA512File(spec.OutputFile), "failed to create .sha512 file") + if err := CreateSHA512File(spec.OutputFile); err != nil { + return fmt.Errorf("failed to create .sha512 file: %w", err) + } + + return nil } func replaceFileArch(filename string, pkgFile PackageFile, arch string) (string, PackageFile) { @@ -706,7 +712,7 @@ func runFPM(spec PackageSpec, packageType PackageType) error { case RPM, Deb: fpmPackageType = packageType.String() default: - return errors.Errorf("unsupported package type=%v for runFPM", fpmPackageType) + return fmt.Errorf("unsupported package type=%v for runFPM", fpmPackageType) } if err := HaveDocker(); err != nil { @@ -787,10 +793,13 @@ func runFPM(spec PackageSpec, packageType PackageType) error { ) if err = dockerRun(args...); err != nil { - return errors.Wrap(err, "failed while running FPM in docker") + return fmt.Errorf("failed while running FPM in docker: %w", err) } - return errors.Wrap(CreateSHA512File(spec.OutputFile), "failed to create .sha512 file") + if err = CreateSHA512File(spec.OutputFile); err != nil { + return fmt.Errorf("failed to create .sha512 file: %w", err) + } + return nil } func addUidGidEnvArgs(args []string) ([]string, error) { @@ -800,7 +809,7 @@ func addUidGidEnvArgs(args []string) ([]string, error) { info, err := GetDockerInfo() if err != nil { - return args, errors.Wrap(err, "failed to get docker info") + return args, fmt.Errorf("failed to get docker info: %w", err) } uid, gid := os.Getuid(), os.Getgid() @@ -1006,7 +1015,7 @@ func addSymlinkToTar(tmpdir string, ar *tar.Writer, baseDir string, pkgFile Pack // PackageDocker packages the Beat into a docker image. func PackageDocker(spec PackageSpec) error { if err := HaveDocker(); err != nil { - return errors.Errorf("docker daemon required to build images: %s", err) + return fmt.Errorf("docker daemon required to build images: %s", err) } b, err := newDockerBuilder(spec) diff --git a/dev-tools/mage/settings.go b/dev-tools/mage/settings.go index 0fe010877f0..46ccf691502 100644 --- a/dev-tools/mage/settings.go +++ b/dev-tools/mage/settings.go @@ -18,6 +18,7 @@ package mage import ( + "errors" "fmt" "go/build" "io/ioutil" @@ -31,7 +32,6 @@ import ( "time" "github.com/magefile/mage/sh" - "github.com/pkg/errors" "golang.org/x/tools/go/vcs" "github.com/elastic/beats/v7/dev-tools/mage/gotool" @@ -108,22 +108,22 @@ func init() { var err error RaceDetector, err = strconv.ParseBool(EnvOr("RACE_DETECTOR", "false")) if err != nil { - panic(errors.Wrap(err, "failed to parse RACE_DETECTOR env value")) + panic(fmt.Errorf("failed to parse RACE_DETECTOR env value: %w", err)) } TestCoverage, err = strconv.ParseBool(EnvOr("TEST_COVERAGE", "false")) if err != nil { - panic(errors.Wrap(err, "failed to parse TEST_COVERAGE env value")) + panic(fmt.Errorf("failed to parse TEST_COVERAGE env value: %w", err)) } Snapshot, err = strconv.ParseBool(EnvOr("SNAPSHOT", "false")) if err != nil { - panic(errors.Wrap(err, "failed to parse SNAPSHOT env value")) + panic(fmt.Errorf("failed to parse SNAPSHOT env value: %w", err)) } DevBuild, err = strconv.ParseBool(EnvOr("DEV", "false")) if err != nil { - panic(errors.Wrap(err, "failed to parse DEV env value")) + panic(fmt.Errorf("failed to parse DEV env value: %w", err)) } versionQualifier, versionQualified = os.LookupEnv("VERSION_QUALIFIER") @@ -448,7 +448,7 @@ func getBuildVariableSources() *BuildVariableSources { return buildVariableSources } - panic(errors.Errorf("magefile must call devtools.SetBuildVariableSources() "+ + panic(fmt.Errorf("magefile must call devtools.SetBuildVariableSources() "+ "because it is not an elastic beat (repo=%+v)", repo.RootImportPath)) } @@ -493,7 +493,7 @@ func (s *BuildVariableSources) GetBeatVersion() (string, error) { data, err := ioutil.ReadFile(file) if err != nil { - return "", errors.Wrapf(err, "failed to read beat version file=%v", file) + return "", fmt.Errorf("failed to read beat version file=%v: %w", file, err) } if s.BeatVersionParser == nil { @@ -511,7 +511,7 @@ func (s *BuildVariableSources) GetGoVersion() (string, error) { data, err := ioutil.ReadFile(file) if err != nil { - return "", errors.Wrapf(err, "failed to read go version file=%v", file) + return "", fmt.Errorf("failed to read go version file=%v: %w", file, err) } if s.GoVersionParser == nil { @@ -529,7 +529,7 @@ func (s *BuildVariableSources) GetDocBranch() (string, error) { data, err := ioutil.ReadFile(file) if err != nil { - return "", errors.Wrapf(err, "failed to read doc branch file=%v", file) + return "", fmt.Errorf("failed to read doc branch file=%v: %w", file, err) } if s.DocBranchParser == nil { @@ -645,7 +645,7 @@ func getProjectRepoInfoWithModules() (*ProjectRepoInfo, error) { } if rootDir == "" { - return nil, errors.Errorf("failed to find root dir of module file: %v", errs) + return nil, fmt.Errorf("failed to find root dir of module file: %v", errs) } rootImportPath, err := gotool.GetModuleName() @@ -699,12 +699,12 @@ func getProjectRepoInfoUnderGopath() (*ProjectRepoInfo, error) { } if rootDir == "" { - return nil, errors.Errorf("error while determining root directory: %v", errs) + return nil, fmt.Errorf("error while determining root directory: %v", errs) } subDir, err := filepath.Rel(rootDir, cwd) if err != nil { - return nil, errors.Wrap(err, "failed to get relative path to repo root") + return nil, fmt.Errorf("failed to get relative path to repo root: %w", err) } rootImportPath, err := gotool.GetModuleName() @@ -756,7 +756,7 @@ func listSrcGOPATHs() ([]string, error) { } if len(srcDirs) == 0 { - return srcDirs, errors.Errorf("failed to find any GOPATH %v", errs) + return srcDirs, fmt.Errorf("failed to find any GOPATH %v", errs) } return srcDirs, nil diff --git a/dev-tools/mage/target/dashboards/dashboards.go b/dev-tools/mage/target/dashboards/dashboards.go index c993b8bacc0..b7ac6685c31 100644 --- a/dev-tools/mage/target/dashboards/dashboards.go +++ b/dev-tools/mage/target/dashboards/dashboards.go @@ -18,8 +18,9 @@ package dashboards import ( + "errors" + "github.com/magefile/mage/mg" - "github.com/pkg/errors" devtools "github.com/elastic/beats/v7/dev-tools/mage" ) diff --git a/dev-tools/packaging/templates/docker/Dockerfile.tmpl b/dev-tools/packaging/templates/docker/Dockerfile.tmpl index 29d086463e8..3ba9207f077 100644 --- a/dev-tools/packaging/templates/docker/Dockerfile.tmpl +++ b/dev-tools/packaging/templates/docker/Dockerfile.tmpl @@ -64,7 +64,7 @@ ENV ELASTIC_CONTAINER "true" ENV PATH={{ $beatHome }}:$PATH ENV GODEBUG="madvdontneed=1" -# Add an init process, check the checksum to make sure it's a match +# Add an init process, and check the checksum to make sure it's a match RUN set -e ; \ TINI_BIN=""; \ TINI_SHA256=""; \ diff --git a/dev-tools/packaging/templates/windows/install-service.ps1.tmpl b/dev-tools/packaging/templates/windows/install-service.ps1.tmpl index 2ad44b04e19..840f8dd97d3 100644 --- a/dev-tools/packaging/templates/windows/install-service.ps1.tmpl +++ b/dev-tools/packaging/templates/windows/install-service.ps1.tmpl @@ -17,4 +17,4 @@ New-Service -name {{.BeatName}} ` Try { Start-Process -FilePath sc.exe -ArgumentList 'config {{.BeatName}} start= delayed-auto' } -Catch { Write-Host -f red "An error occured setting the service to delayed start." } +Catch { Write-Host -f red "An error occurred setting the service to delayed start." } diff --git a/filebeat/_meta/config/filebeat.global.reference.yml.tmpl b/filebeat/_meta/config/filebeat.global.reference.yml.tmpl index 5ff8e745de8..0287fb3f9f5 100644 --- a/filebeat/_meta/config/filebeat.global.reference.yml.tmpl +++ b/filebeat/_meta/config/filebeat.global.reference.yml.tmpl @@ -4,12 +4,12 @@ # data path. #filebeat.registry.path: ${path.data}/registry -# The permissions mask to apply on registry data, and meta files. The default +# The permissions mask to apply on registry data and meta files. The default # value is 0600. Must be a valid Unix-style file permissions mask expressed in # octal notation. This option is not supported on Windows. #filebeat.registry.file_permissions: 0600 -# The timeout value that controls when registry entries are written to disk +# The timeout value that controls when registry entries are written to the disk # (flushed). When an unwritten update exceeds this value, it triggers a write # to disk. When flush is set to 0s, the registry is written to disk after each # batch of events has been published successfully. The default value is 1s. @@ -25,7 +25,7 @@ # By default Ingest pipelines are not updated if a pipeline with the same ID # already exists. If this option is enabled Filebeat overwrites pipelines -# everytime a new Elasticsearch connection is established. +# every time a new Elasticsearch connection is established. #filebeat.overwrite_pipelines: false # How long filebeat waits on shutdown for the publisher to finish. diff --git a/filebeat/_meta/config/filebeat.inputs.reference.yml.tmpl b/filebeat/_meta/config/filebeat.inputs.reference.yml.tmpl index 59ed0e74ce1..68ff3d22e07 100644 --- a/filebeat/_meta/config/filebeat.inputs.reference.yml.tmpl +++ b/filebeat/_meta/config/filebeat.inputs.reference.yml.tmpl @@ -24,7 +24,7 @@ filebeat.inputs: # To fetch all ".log" files from a specific level of subdirectories # /var/log/*/*.log can be used. # For each file found under this path, a harvester is started. - # Make sure not file is defined twice as this can lead to unexpected behaviour. + # Make sure no file is defined twice as this can lead to unexpected behaviour. paths: - /var/log/*.log #- c:\programdata\elasticsearch\logs\* @@ -61,7 +61,7 @@ filebeat.inputs: # level: debug # review: 1 - # Set to true to store the additional fields as top level fields instead + # Set to true to store the additional fields as top-level fields instead # of under the "fields" sub-dictionary. In case of name conflicts with the # fields added by Filebeat itself, the custom fields overwrite the default # fields. @@ -75,7 +75,7 @@ filebeat.inputs: # false. #publisher_pipeline.disable_host: false - # Ignore files which were modified more then the defined timespan in the past. + # Ignore files that were modified more than the defined timespan in the past. # ignore_older is disabled by default, so no files are ignored by setting it to 0. # Time strings like 2h (2 hours), 5m (5 minutes) can be used. #ignore_older: 0 @@ -93,7 +93,7 @@ filebeat.inputs: # This is especially useful for multiline log messages which can get large. #max_bytes: 10485760 - # Characters which separate the lines. Valid values: auto, line_feed, vertical_tab, form_feed, + # Characters that separate the lines. Valid values: auto, line_feed, vertical_tab, form_feed, # carriage_return, carriage_return_line_feed, next_line, line_separator, paragraph_separator, # null_terminator #line_terminator: auto @@ -116,7 +116,7 @@ filebeat.inputs: #json.keys_under_root: false # If keys_under_root and this setting are enabled, then the values from the decoded - # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) + # JSON object overwrites the fields that Filebeat normally adds (type, source, offset, etc.) # in case of conflicts. #json.overwrite_keys: false @@ -125,7 +125,7 @@ filebeat.inputs: # For example, `{"a.b.c": 123}` would be expanded into `{"a":{"b":{"c":123}}}`. #json.expand_keys: false - # If this setting is enabled, Filebeat adds a "error.message" and "error.key: json" key in case of JSON + # If this setting is enabled, Filebeat adds an "error.message" and "error.key: json" key in case of JSON # unmarshaling errors or when a text key is defined in the configuration but cannot # be used. #json.add_error_key: false @@ -138,20 +138,20 @@ filebeat.inputs: # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ #multiline.pattern: ^\[ - # Defines if the pattern set under pattern should be negated or not. Default is false. + # Defines if the pattern set under the pattern should be negated or not. Default is false. #multiline.negate: false - # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # Match can be set to "after" or "before". It is used to define if lines should be appended to a pattern # that was (not) matched before or after or as long as a pattern is not matched based on negate. # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash #multiline.match: after - # The maximum number of lines that are combined to one event. + # The maximum number of lines that are combined into one event. # In case there are more the max_lines the additional lines are discarded. # Default is 500 #multiline.max_lines: 500 - # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # After the defined timeout, a multiline event is sent even if no new pattern was found to start a new event # Default is 5s. #multiline.timeout: 5s @@ -161,7 +161,7 @@ filebeat.inputs: # The number of lines to aggregate into a single event. #multiline.count_lines: 3 - # Do not add new line character when concatenating lines. + # Do not add new line characters when concatenating lines. #multiline.skip_newline: false # Setting tail_files to true means filebeat starts reading new files at the end @@ -174,13 +174,13 @@ filebeat.inputs: #pipeline: # If symlinks is enabled, symlinks are opened and harvested. The harvester is opening the - # original for harvesting but will report the symlink name as source. + # original for harvesting but will report the symlink name as the source. #symlinks: false # Backoff values define how aggressively filebeat crawls new files for updates - # The default values can be used in most cases. Backoff defines how long it is waited + # The default values can be used in most cases. Backoff defines how long it has to wait # to check a file again after EOF is reached. Default is 1s which means the file - # is checked every second if new lines were added. This leads to a near real time crawling. + # is checked every second if new lines were added. This leads to a near real-time crawling. # Every time a new line appears, backoff is reset to the initial value. #backoff: 1s @@ -203,7 +203,7 @@ filebeat.inputs: # Close inactive closes the file handler after the predefined period. # The period starts when the last line of the file was, not the file ModTime. - # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + # Time strings like 2h (2 hours), and 5m (5 minutes) can be used. #close_inactive: 5m # Close renamed closes a file handler when the file is renamed or rotated. @@ -215,18 +215,18 @@ filebeat.inputs: # after scan_frequency. #close_removed: true - # Closes the file handler as soon as the harvesters reaches the end of the file. + # Closes the file handler as soon as the harvesters reach the end of the file. # By default this option is disabled. # Note: Potential data loss. Make sure to read and understand the docs for this option. #close_eof: false ### State options - # Files for the modification data is older then clean_inactive the state from the registry is removed + # Files for the modification data are older than clean_inactive the state from the registry is removed # By default this is disabled. #clean_inactive: 0 - # Removes the state for file which cannot be found on disk anymore immediately + # Removes the state for files which cannot be found on disk anymore immediately #clean_removed: true # Close timeout closes the harvester after the predefined time. @@ -235,7 +235,7 @@ filebeat.inputs: # Note: Potential data loss. Make sure to read and understand the docs for this option. #close_timeout: 0 - # Defines if inputs is enabled + # Defines if inputs are enabled #enabled: true #--------------------------- Filestream input ---------------------------- @@ -297,9 +297,22 @@ filebeat.inputs: #prospector.scanner.recursive_glob: true # If symlinks is enabled, symlinks are opened and harvested. The harvester is opening the - # original for harvesting but will report the symlink name as source. + # original for harvesting but will report the symlink name as the source. #prospector.scanner.symlinks: false + # If enabled, instead of relying on the device ID and inode values when comparing files, + # compare hashes of the given byte ranges in files. A file becomes an ingest target + # when its size grows larger than offset+length (see below). Until then it's ignored. + #prospector.scanner.fingerprint.enabled: false + + # If fingerprint mode is enabled, sets the offset from the beginning of the file + # for the byte range used for computing the fingerprint value. + #prospector.scanner.fingerprint.offset: 0 + + # If fingerprint mode is enabled, sets the length of the byte range used for + # computing the fingerprint value. Cannot be less than 64 bytes. + #prospector.scanner.fingerprint.length: 1024 + ### Parsers configuration #### JSON configuration @@ -355,12 +368,12 @@ filebeat.inputs: # Defines if the pattern set under the pattern setting should be negated or not. Default is false. #negate: false - # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # Match can be set to "after" or "before". It is used to define if lines should be appended to a pattern # that was (not) matched before or after or as long as a pattern is not matched based on negate. # Note: After is the equivalent to previous and before is the equivalent to next in Logstash #match: after - # The maximum number of lines that are combined to one event. + # The maximum number of lines that are combined into one event. # In case there are more than max_lines the additional lines are discarded. # Default is 500 #max_lines: 500 @@ -381,16 +394,16 @@ filebeat.inputs: # The number of lines to aggregate into a single event. #count_lines: 3 - # The maximum number of lines that are combined to one event. + # The maximum number of lines that are combined into one event. # In case there are more than max_lines the additional lines are discarded. # Default is 500 #max_lines: 500 - # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # After the defined timeout, a multiline event is sent even if no new pattern was found to start a new event # Default is 5s. #timeout: 5s - # Do not add new line character when concatenating lines. + # Do not add new line characters when concatenating lines. #skip_newline: false #### Parsing container events @@ -417,11 +430,11 @@ filebeat.inputs: ### State options - # Files for the modification data is older then clean_inactive the state from the registry is removed + # Files for the modification data is older than clean_inactive the state from the registry is removed # By default this is disabled. #clean_inactive: 0 - # Removes the state for file which cannot be found on disk anymore immediately + # Removes the state for files which cannot be found on disk anymore immediately #clean_removed: true # Method to determine if two files are the same or not. By default @@ -442,9 +455,9 @@ filebeat.inputs: # false. #publisher_pipeline.disable_host: false - # Ignore files which were modified more then the defined timespan in the past. + # Ignore files that were modified more than the defined timespan in the past. # ignore_older is disabled by default, so no files are ignored by setting it to 0. - # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + # Time strings like 2h (2 hours) and 5m (5 minutes) can be used. #ignore_older: 0 # Ignore files that have not been updated since the selected event. @@ -465,7 +478,7 @@ filebeat.inputs: # This is especially useful for multiline log messages which can get large. #message_max_bytes: 10485760 - # Characters which separate the lines. Valid values: auto, line_feed, vertical_tab, form_feed, + # Characters that separate the lines. Valid values: auto, line_feed, vertical_tab, form_feed, # carriage_return, carriage_return_line_feed, next_line, line_separator, paragraph_separator, # null_terminator #line_terminator: auto @@ -475,9 +488,9 @@ filebeat.inputs: #pipeline: # Backoff values define how aggressively filebeat crawls new files for updates - # The default values can be used in most cases. Backoff defines how long it is waited + # The default values can be used in most cases. Backoff defines how long it has to wait # to check a file again after EOF is reached. Default is 1s which means the file - # is checked every second if new lines were added. This leads to a near real time crawling. + # is checked every second if new lines were added. This leads to a near real-time crawling. # Every time a new line appears, backoff is reset to the initial value. #backoff.init: 1s @@ -491,7 +504,7 @@ filebeat.inputs: # Close inactive closes the file handler after the predefined period. # The period starts when the last line of the file was, not the file ModTime. - # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + # Time strings like 2h (2 hours) and 5m (5 minutes) can be used. #close.on_state_change.inactive: 5m # Close renamed closes a file handler when the file is renamed or rotated. diff --git a/filebeat/_meta/config/filebeat.inputs.yml.tmpl b/filebeat/_meta/config/filebeat.inputs.yml.tmpl index a0633b76f06..552988ee448 100644 --- a/filebeat/_meta/config/filebeat.inputs.yml.tmpl +++ b/filebeat/_meta/config/filebeat.inputs.yml.tmpl @@ -4,7 +4,7 @@ filebeat.inputs: # Each - is an input. Most options can be set at the input level, so # you can use different inputs for various configurations. -# Below are the input specific configurations. +# Below are the input-specific configurations. # filestream is an input for collecting log messages from files. - type: filestream diff --git a/filebeat/beater/filebeat.go b/filebeat/beater/filebeat.go index 737c2aa9344..532d8132110 100644 --- a/filebeat/beater/filebeat.go +++ b/filebeat/beater/filebeat.go @@ -128,6 +128,18 @@ func newBeater(b *beat.Beat, plugins PluginFactory, rawConfig *conf.C) (beat.Bea } } + if b.Manager != nil { + b.Manager.RegisterDiagnosticHook("input_metrics", "Metrics from active inputs.", + "input_metrics.json", "application/json", func() []byte { + data, err := inputmon.MetricSnapshotJSON() + if err != nil { + logp.L().Warnw("Failed to collect input metric snapshot for Agent diagnostics.", "error", err) + return []byte(err.Error()) + } + return data + }) + } + // Add inputs created by the modules config.Inputs = append(config.Inputs, moduleInputs...) diff --git a/filebeat/docs/inputs/input-common-file-options.asciidoc b/filebeat/docs/inputs/input-common-file-options.asciidoc index c913e4e3380..7d62c5827fe 100644 --- a/filebeat/docs/inputs/input-common-file-options.asciidoc +++ b/filebeat/docs/inputs/input-common-file-options.asciidoc @@ -77,10 +77,10 @@ certain criteria or time. Closing the harvester means closing the file handler. If a file is updated after the harvester is closed, the file will be picked up again after `scan_frequency` has elapsed. However, if the file is moved or deleted while the harvester is closed, {beatname_uc} will not be able to pick up -the file again, and any data that the harvester hasn't read will be lost. -The `close_*` settings are applied synchronously when {beatname_uc} attempts +the file again, and any data that the harvester hasn't read will be lost. +The `close_*` settings are applied synchronously when {beatname_uc} attempts to read from a file, meaning that if {beatname_uc} is in a blocked state -due to blocked output, full queue or other issue, a file that would +due to blocked output, full queue or other issue, a file that would otherwise be closed remains open until {beatname_uc} once again attempts to read from the file. @@ -240,7 +240,7 @@ that should be removed based on the `clean_inactive` setting. This happens because {beatname_uc} doesn't remove the entries until it opens the registry again to read a different file. If you are testing the `clean_inactive` setting, make sure {beatname_uc} is configured to read from more than one file, or the -file state will never be removed from the registry. +file state will never be removed from the registry. [float] [id="{beatname_lc}-input-{type}-clean-removed"] @@ -441,4 +441,3 @@ Set the location of the marker file the following way: ---- file_identity.inode_marker.path: /logs/.filebeat-marker ---- - diff --git a/filebeat/docs/inputs/input-common-udp-options.asciidoc b/filebeat/docs/inputs/input-common-udp-options.asciidoc index 5a9dbd7e324..e4b2cae25e4 100644 --- a/filebeat/docs/inputs/input-common-udp-options.asciidoc +++ b/filebeat/docs/inputs/input-common-udp-options.asciidoc @@ -20,10 +20,11 @@ The host and UDP port to listen on for event streams. [id="{beatname_lc}-input-{type}-udp-read-buffer"] ==== `read_buffer` -The size of the read buffer on the UDP socket. +The size of the read buffer on the UDP socket. If not specified the default +from the operating system will be used. [float] [id="{beatname_lc}-input-{type}-udp-timeout"] ==== `timeout` -The read and write timeout for socket operations. +The read and write timeout for socket operations. The default is `5m`. diff --git a/filebeat/docs/inputs/input-filestream-file-options.asciidoc b/filebeat/docs/inputs/input-filestream-file-options.asciidoc index 68629d32e8a..8b1bd083668 100644 --- a/filebeat/docs/inputs/input-filestream-file-options.asciidoc +++ b/filebeat/docs/inputs/input-filestream-file-options.asciidoc @@ -146,6 +146,62 @@ stays open and constantly polls your files. The default setting is 10s. +[float] +[id="{beatname_lc}-input-{type}-scan-fingerprint"] +===== `prospector.scanner.fingerprint` + +Instead of relying on the device ID and inode values when comparing files, compare hashes of the given byte ranges of files. + +Enable this option if you're experiencing data loss or data duplication due to unstable file identifiers provided by the file system. + +Following are some scenarios where this can happen: + +. Some file systems (i.e. in Docker) cache and re-use inodes ++ +for example if you: ++ +.. Create a file (`touch x`) +.. Check the file's inode (`ls -i x`) +.. Delete the file (`rm x`) +.. Create a new file right away (`touch y`) +.. Check the inode of the new file (`ls -i y`) ++ + +For both files you might see the same inode value despite even having different filenames. ++ +. Non-Ext file systems can change inodes: ++ +Ext file systems store the inode number in the `i_ino` file, inside a struct `inode`, which is written to disk. In this case, if the file is the same (not another file with the same name) then the inode number is guaranteed to be the same. ++ +If the file system is other than Ext, the inode number is generated by the inode operations defined by the file system driver. As they don't have the concept of what an inode is, they have to mimic all of the inode's internal fields to comply with VFS, so this number will probably be different after a reboot, even after closing and opening the file again (theoretically). ++ +. Some file processing tools change inode values ++ +Sometimes users unintentionally change inodes by using tools like `rsync` or `sed`. ++ +. Some operating systems change device IDs after reboot ++ +Depending on a mounting approach, the device ID (which is also used for comparing files) might change after a reboot. + +**Configuration** + +Fingerprint mode is disabled by default. + +WARNING: Enabling fingerprint mode delays ingesting new files until they grow to at least `offset`+`length` bytes in size, so they can be fingerprinted. Until then these files are ignored. + +Normally, log lines contain timestamps and other unique fields that should be able to use the fingerprint mode, +but in every use-case users should inspect their logs to determine what are the appropriate values for +the `offset` and `length` parameters. Default `offset` is `0` and default `length` is `1024` or 1 KB. `length` cannot be less than `64`. + +[source,yaml] +---- +fingerprint: + enabled: false + offset: 0 + length: 1024 +---- + + [float] [id="{beatname_lc}-input-{type}-ignore-older"] ===== `ignore_older` @@ -502,6 +558,17 @@ Set the location of the marker file the following way: file_identity.inode_marker.path: /logs/.filebeat-marker ---- +*`fingerprint`*:: To identify files based on their content byte range. + +WARNING: In order to use this file identity option, you must enable the <<{beatname_lc}-input-filestream-scan-fingerprint,fingerprint option in the scanner>>. Once this file identity is enabled, changing the fingerprint configuration (offset, length, or other settings) will lead to a global re-ingestion of all files that match the paths configuration of the input. + +Please refer to the <<{beatname_lc}-input-filestream-scan-fingerprint,fingerprint configuration for details>>. + +[source,yaml] +---- +file_identity.fingerprint: ~ +---- + [[filestream-log-rotation-support]] [float] === Log rotation diff --git a/filebeat/docs/inputs/input-filestream.asciidoc b/filebeat/docs/inputs/input-filestream.asciidoc index bb9e32e235c..e55ff611496 100644 --- a/filebeat/docs/inputs/input-filestream.asciidoc +++ b/filebeat/docs/inputs/input-filestream.asciidoc @@ -95,7 +95,7 @@ device IDs. However, on network shares and cloud providers these values might change during the lifetime of the file. If this happens {beatname_uc} thinks that file is new and resends the whole content of the file. To solve this problem you can configure `file_identity` option. Possible -values besides the default `inode_deviceid` are `path` and `inode_marker`. +values besides the default `inode_deviceid` are `path`, `inode_marker` and `fingerprint`. WARNING: Changing `file_identity` methods between runs may result in duplicated events in the output. @@ -116,6 +116,13 @@ example oneliner generates a hidden marker file for the selected mountpoint `/lo Please note that you should not use this option on Windows as file identifiers might be more volatile. +Selecting `fingerprint` instructs {beatname_uc} to identify files based on their +content byte range. + +WARNING: In order to use this file identity option, one must enable the <<{beatname_lc}-input-filestream-scan-fingerprint,fingerprint option in the scanner>>. Once this file identity is enabled, changing the fingerprint configuration (offset, length, etc) will lead to a global re-ingestion of all files that match the paths configuration of the input. + +Please refer to the <<{beatname_lc}-input-filestream-scan-fingerprint,fingerprint configuration for details>>. + ["source","sh",subs="attributes"] ---- $ lsblk -o MOUNTPOINT,UUID | grep /logs | awk '{print $2}' >> /logs/.filebeat-marker diff --git a/filebeat/docs/inputs/input-journald.asciidoc b/filebeat/docs/inputs/input-journald.asciidoc index bbc4211b0c5..5b932b4d133 100644 --- a/filebeat/docs/inputs/input-journald.asciidoc +++ b/filebeat/docs/inputs/input-journald.asciidoc @@ -122,17 +122,41 @@ The position to start reading the journal from. Valid settings are: * `head`: Starts reading at the beginning of the journal. After a restart, {beatname_uc} resends all log messages in the journal. -* `tail`: Starts reading at the end of the journal. After a restart, -{beatname_uc} resends the last message, which might result in duplicates. If -multiple log messages are written to a journal while {beatname_uc} is down, -only the last log message is sent on restart. +* `tail`: Starts reading at the end of the journal. This means that no events +will be sent until a new message is written. * `cursor`: On first read, starts reading at the beginning of the journal. After a reload or restart, continues reading at the last known position. +* `since`: Use the `since` option to determine where to start reading from. If you have old log files and want to skip lines, start {beatname_uc} with `seek: tail` specified. Then stop {beatname_uc}, set `seek: cursor`, and restart {beatname_uc}. +[float] +[id="{beatname_lc}-input-{type}-cursor_seek_fallback"] +==== `cursor_seek_fallback` + +The position to start reading the journal from if no cursor information is +available. Valid options are `head`, `tail` and `since`. + +[float] +[id="{beatname_lc}-input-{type}-since"] +==== `since` + +A time offset from the current time to start reading from. To use +`since`, either the `seek` option must be set to `since`, or the `seek` mode +must be set to `cursor` and the `cursor_seek_fallback` set to `since`. + +This example demonstrates how to resume from the persisted cursor when +it exists, or otherwise begin reading logs from the last 24 hours. + +["source","yaml",subs="attributes"] +---- +seek: cursor +cursor_seek_fallback: since +since: -24h +---- + [float] [id="{beatname_lc}-input-{type}-units"] ==== `units` diff --git a/filebeat/docs/inputs/input-mqtt.asciidoc b/filebeat/docs/inputs/input-mqtt.asciidoc index 081f9788234..3f8e61cedb9 100644 --- a/filebeat/docs/inputs/input-mqtt.asciidoc +++ b/filebeat/docs/inputs/input-mqtt.asciidoc @@ -68,6 +68,17 @@ A client username used for authentication provided on the application level by t A client password used for authentication provided on the application level by the MQTT protocol. +===== `clean_session` + +The `clean_session` flag indicates whether the client wants to establish a persistent session with the broker. +The default is `true`. + +When `clean_session` is set to false, the session is considered to be persistent. The broker stores all subscriptions for +the client and all missed messages for the client that subscribed with a Quality of Service (QoS) level 1 or 2. + +In contrast, when `clean_session` is set to true, the broker doesn’t retain any information for the client +and discards any previous state from any persistent session. + ===== `ssl` Configuration options for SSL parameters like the certificate, key and the certificate authorities diff --git a/filebeat/docs/modules.asciidoc b/filebeat/docs/modules.asciidoc index efe3f172489..9de23994c65 100644 --- a/filebeat/docs/modules.asciidoc +++ b/filebeat/docs/modules.asciidoc @@ -9,4 +9,6 @@ modules. Filebeat modules require Elasticsearch 5.2 or later. +NOTE: While {filebeat} modules are still supported, we recommend {agent} integrations over {filebeat} modules. Integrations provide a streamlined way to connect data from a variety of vendors to the {stack}. Refer to the https://www.elastic.co/integrations/data-integrations[full list of integrations]. For more information, please refer to the {fleet-guide}/beats-agent-comparison.html[{beats} vs {agent} comparison documentation]. + include::modules_list.asciidoc[] diff --git a/filebeat/docs/troubleshooting.asciidoc b/filebeat/docs/troubleshooting.asciidoc index 0edffce46cf..f16042bc871 100644 --- a/filebeat/docs/troubleshooting.asciidoc +++ b/filebeat/docs/troubleshooting.asciidoc @@ -8,6 +8,7 @@ following tips: * <> * <> +* <> * <> //sets block macro for getting-help.asciidoc included in next section @@ -26,5 +27,15 @@ include::{libbeat-dir}/getting-help.asciidoc[] include::{libbeat-dir}/debugging.asciidoc[] +//sets block macro for metrics-in-logs.asciidoc included in next section +[id="understand-{beatname_lc}-logs"] +[role="xpack"] +== Understand metrics in {beatname_uc} logs + +++++ +Understand logged metrics +++++ + +include::{libbeat-dir}/metrics-in-logs.asciidoc[] diff --git a/filebeat/filebeat.reference.yml b/filebeat/filebeat.reference.yml index 5d45c4e8f52..2de0bc61f56 100644 --- a/filebeat/filebeat.reference.yml +++ b/filebeat/filebeat.reference.yml @@ -431,7 +431,7 @@ filebeat.inputs: # To fetch all ".log" files from a specific level of subdirectories # /var/log/*/*.log can be used. # For each file found under this path, a harvester is started. - # Make sure not file is defined twice as this can lead to unexpected behaviour. + # Make sure no file is defined twice as this can lead to unexpected behaviour. paths: - /var/log/*.log #- c:\programdata\elasticsearch\logs\* @@ -468,7 +468,7 @@ filebeat.inputs: # level: debug # review: 1 - # Set to true to store the additional fields as top level fields instead + # Set to true to store the additional fields as top-level fields instead # of under the "fields" sub-dictionary. In case of name conflicts with the # fields added by Filebeat itself, the custom fields overwrite the default # fields. @@ -482,7 +482,7 @@ filebeat.inputs: # false. #publisher_pipeline.disable_host: false - # Ignore files which were modified more then the defined timespan in the past. + # Ignore files that were modified more than the defined timespan in the past. # ignore_older is disabled by default, so no files are ignored by setting it to 0. # Time strings like 2h (2 hours), 5m (5 minutes) can be used. #ignore_older: 0 @@ -500,7 +500,7 @@ filebeat.inputs: # This is especially useful for multiline log messages which can get large. #max_bytes: 10485760 - # Characters which separate the lines. Valid values: auto, line_feed, vertical_tab, form_feed, + # Characters that separate the lines. Valid values: auto, line_feed, vertical_tab, form_feed, # carriage_return, carriage_return_line_feed, next_line, line_separator, paragraph_separator, # null_terminator #line_terminator: auto @@ -523,7 +523,7 @@ filebeat.inputs: #json.keys_under_root: false # If keys_under_root and this setting are enabled, then the values from the decoded - # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) + # JSON object overwrites the fields that Filebeat normally adds (type, source, offset, etc.) # in case of conflicts. #json.overwrite_keys: false @@ -532,7 +532,7 @@ filebeat.inputs: # For example, `{"a.b.c": 123}` would be expanded into `{"a":{"b":{"c":123}}}`. #json.expand_keys: false - # If this setting is enabled, Filebeat adds a "error.message" and "error.key: json" key in case of JSON + # If this setting is enabled, Filebeat adds an "error.message" and "error.key: json" key in case of JSON # unmarshaling errors or when a text key is defined in the configuration but cannot # be used. #json.add_error_key: false @@ -545,20 +545,20 @@ filebeat.inputs: # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ #multiline.pattern: ^\[ - # Defines if the pattern set under pattern should be negated or not. Default is false. + # Defines if the pattern set under the pattern should be negated or not. Default is false. #multiline.negate: false - # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # Match can be set to "after" or "before". It is used to define if lines should be appended to a pattern # that was (not) matched before or after or as long as a pattern is not matched based on negate. # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash #multiline.match: after - # The maximum number of lines that are combined to one event. + # The maximum number of lines that are combined into one event. # In case there are more the max_lines the additional lines are discarded. # Default is 500 #multiline.max_lines: 500 - # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # After the defined timeout, a multiline event is sent even if no new pattern was found to start a new event # Default is 5s. #multiline.timeout: 5s @@ -568,7 +568,7 @@ filebeat.inputs: # The number of lines to aggregate into a single event. #multiline.count_lines: 3 - # Do not add new line character when concatenating lines. + # Do not add new line characters when concatenating lines. #multiline.skip_newline: false # Setting tail_files to true means filebeat starts reading new files at the end @@ -581,13 +581,13 @@ filebeat.inputs: #pipeline: # If symlinks is enabled, symlinks are opened and harvested. The harvester is opening the - # original for harvesting but will report the symlink name as source. + # original for harvesting but will report the symlink name as the source. #symlinks: false # Backoff values define how aggressively filebeat crawls new files for updates - # The default values can be used in most cases. Backoff defines how long it is waited + # The default values can be used in most cases. Backoff defines how long it has to wait # to check a file again after EOF is reached. Default is 1s which means the file - # is checked every second if new lines were added. This leads to a near real time crawling. + # is checked every second if new lines were added. This leads to a near real-time crawling. # Every time a new line appears, backoff is reset to the initial value. #backoff: 1s @@ -610,7 +610,7 @@ filebeat.inputs: # Close inactive closes the file handler after the predefined period. # The period starts when the last line of the file was, not the file ModTime. - # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + # Time strings like 2h (2 hours), and 5m (5 minutes) can be used. #close_inactive: 5m # Close renamed closes a file handler when the file is renamed or rotated. @@ -622,18 +622,18 @@ filebeat.inputs: # after scan_frequency. #close_removed: true - # Closes the file handler as soon as the harvesters reaches the end of the file. + # Closes the file handler as soon as the harvesters reach the end of the file. # By default this option is disabled. # Note: Potential data loss. Make sure to read and understand the docs for this option. #close_eof: false ### State options - # Files for the modification data is older then clean_inactive the state from the registry is removed + # Files for the modification data are older than clean_inactive the state from the registry is removed # By default this is disabled. #clean_inactive: 0 - # Removes the state for file which cannot be found on disk anymore immediately + # Removes the state for files which cannot be found on disk anymore immediately #clean_removed: true # Close timeout closes the harvester after the predefined time. @@ -642,7 +642,7 @@ filebeat.inputs: # Note: Potential data loss. Make sure to read and understand the docs for this option. #close_timeout: 0 - # Defines if inputs is enabled + # Defines if inputs are enabled #enabled: true #--------------------------- Filestream input ---------------------------- @@ -704,9 +704,22 @@ filebeat.inputs: #prospector.scanner.recursive_glob: true # If symlinks is enabled, symlinks are opened and harvested. The harvester is opening the - # original for harvesting but will report the symlink name as source. + # original for harvesting but will report the symlink name as the source. #prospector.scanner.symlinks: false + # If enabled, instead of relying on the device ID and inode values when comparing files, + # compare hashes of the given byte ranges in files. A file becomes an ingest target + # when its size grows larger than offset+length (see below). Until then it's ignored. + #prospector.scanner.fingerprint.enabled: false + + # If fingerprint mode is enabled, sets the offset from the beginning of the file + # for the byte range used for computing the fingerprint value. + #prospector.scanner.fingerprint.offset: 0 + + # If fingerprint mode is enabled, sets the length of the byte range used for + # computing the fingerprint value. Cannot be less than 64 bytes. + #prospector.scanner.fingerprint.length: 1024 + ### Parsers configuration #### JSON configuration @@ -762,12 +775,12 @@ filebeat.inputs: # Defines if the pattern set under the pattern setting should be negated or not. Default is false. #negate: false - # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # Match can be set to "after" or "before". It is used to define if lines should be appended to a pattern # that was (not) matched before or after or as long as a pattern is not matched based on negate. # Note: After is the equivalent to previous and before is the equivalent to next in Logstash #match: after - # The maximum number of lines that are combined to one event. + # The maximum number of lines that are combined into one event. # In case there are more than max_lines the additional lines are discarded. # Default is 500 #max_lines: 500 @@ -788,16 +801,16 @@ filebeat.inputs: # The number of lines to aggregate into a single event. #count_lines: 3 - # The maximum number of lines that are combined to one event. + # The maximum number of lines that are combined into one event. # In case there are more than max_lines the additional lines are discarded. # Default is 500 #max_lines: 500 - # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # After the defined timeout, a multiline event is sent even if no new pattern was found to start a new event # Default is 5s. #timeout: 5s - # Do not add new line character when concatenating lines. + # Do not add new line characters when concatenating lines. #skip_newline: false #### Parsing container events @@ -824,11 +837,11 @@ filebeat.inputs: ### State options - # Files for the modification data is older then clean_inactive the state from the registry is removed + # Files for the modification data is older than clean_inactive the state from the registry is removed # By default this is disabled. #clean_inactive: 0 - # Removes the state for file which cannot be found on disk anymore immediately + # Removes the state for files which cannot be found on disk anymore immediately #clean_removed: true # Method to determine if two files are the same or not. By default @@ -849,9 +862,9 @@ filebeat.inputs: # false. #publisher_pipeline.disable_host: false - # Ignore files which were modified more then the defined timespan in the past. + # Ignore files that were modified more than the defined timespan in the past. # ignore_older is disabled by default, so no files are ignored by setting it to 0. - # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + # Time strings like 2h (2 hours) and 5m (5 minutes) can be used. #ignore_older: 0 # Ignore files that have not been updated since the selected event. @@ -872,7 +885,7 @@ filebeat.inputs: # This is especially useful for multiline log messages which can get large. #message_max_bytes: 10485760 - # Characters which separate the lines. Valid values: auto, line_feed, vertical_tab, form_feed, + # Characters that separate the lines. Valid values: auto, line_feed, vertical_tab, form_feed, # carriage_return, carriage_return_line_feed, next_line, line_separator, paragraph_separator, # null_terminator #line_terminator: auto @@ -882,9 +895,9 @@ filebeat.inputs: #pipeline: # Backoff values define how aggressively filebeat crawls new files for updates - # The default values can be used in most cases. Backoff defines how long it is waited + # The default values can be used in most cases. Backoff defines how long it has to wait # to check a file again after EOF is reached. Default is 1s which means the file - # is checked every second if new lines were added. This leads to a near real time crawling. + # is checked every second if new lines were added. This leads to a near real-time crawling. # Every time a new line appears, backoff is reset to the initial value. #backoff.init: 1s @@ -898,7 +911,7 @@ filebeat.inputs: # Close inactive closes the file handler after the predefined period. # The period starts when the last line of the file was, not the file ModTime. - # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + # Time strings like 2h (2 hours) and 5m (5 minutes) can be used. #close.on_state_change.inactive: 5m # Close renamed closes a file handler when the file is renamed or rotated. @@ -1213,12 +1226,12 @@ filebeat.inputs: # data path. #filebeat.registry.path: ${path.data}/registry -# The permissions mask to apply on registry data, and meta files. The default +# The permissions mask to apply on registry data and meta files. The default # value is 0600. Must be a valid Unix-style file permissions mask expressed in # octal notation. This option is not supported on Windows. #filebeat.registry.file_permissions: 0600 -# The timeout value that controls when registry entries are written to disk +# The timeout value that controls when registry entries are written to the disk # (flushed). When an unwritten update exceeds this value, it triggers a write # to disk. When flush is set to 0s, the registry is written to disk after each # batch of events has been published successfully. The default value is 1s. @@ -1234,7 +1247,7 @@ filebeat.inputs: # By default Ingest pipelines are not updated if a pipeline with the same ID # already exists. If this option is enabled Filebeat overwrites pipelines -# everytime a new Elasticsearch connection is established. +# every time a new Elasticsearch connection is established. #filebeat.overwrite_pipelines: false # How long filebeat waits on shutdown for the publisher to finish. @@ -1259,10 +1272,10 @@ filebeat.inputs: # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. +# If this option is not defined, the hostname is used. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. Tags make it easy to group servers by different # logical properties. #tags: ["service-X", "web-tier"] @@ -1274,7 +1287,7 @@ filebeat.inputs: # env: staging # If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields +# fields in the output document instead of being grouped under a field # sub-dictionary. Default is false. #fields_under_root: false @@ -1286,7 +1299,7 @@ filebeat.inputs: #queue: # Queue type by name (default 'mem') # The memory queue will present all available events (up to the outputs - # bulk_max_size) to the output, the moment the output is ready to server + # bulk_max_size) to the output, the moment the output is ready to serve # another batch of events. #mem: # Max number of events the queue can buffer. @@ -1338,7 +1351,7 @@ filebeat.inputs: # length of its retry interval each time, up to this maximum. #max_retry_interval: 30s -# Sets the maximum number of CPUs that can be executing simultaneously. The +# Sets the maximum number of CPUs that can be executed simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: @@ -1459,7 +1472,7 @@ filebeat.inputs: # ignore_missing: false # fail_on_error: true # -# The following example copies the value of message to message_copied +# The following example copies the value of the message to message_copied # #processors: # - copy_fields: @@ -1469,7 +1482,7 @@ filebeat.inputs: # fail_on_error: true # ignore_missing: false # -# The following example truncates the value of message to 1024 bytes +# The following example truncates the value of the message to 1024 bytes # #processors: # - truncate_fields: @@ -1566,7 +1579,7 @@ output.elasticsearch: # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. #index: "filebeat-%{[agent.version]}" - # Optional ingest pipeline. By default no pipeline will be used. + # Optional ingest pipeline. By default, no pipeline will be used. #pipeline: "" # Optional HTTP path @@ -2297,14 +2310,14 @@ output.elasticsearch: # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards are disabled by default and can be enabled either by setting the -# options here, or by using the `-setup` CLI flag or the `setup` command. +# options here or by using the `-setup` CLI flag or the `setup` command. #setup.dashboards.enabled: false # The directory from where to read the dashboards. The default is the `kibana` # folder in the home path. #setup.dashboards.directory: ${path.home}/kibana -# The URL from where to download the dashboards archive. It is used instead of +# The URL from where to download the dashboard archive. It is used instead of # the directory if it has a value. #setup.dashboards.url: @@ -2401,7 +2414,7 @@ setup.template.settings: # Configure index lifecycle management (ILM) to manage the backing indices # of your data streams. -# Enable ILM support. Valid values are true, false. +# Enable ILM support. Valid values are true, or false. #setup.ilm.enabled: true # Set the lifecycle policy name. The default policy name is @@ -2556,25 +2569,25 @@ logging.files: # The name of the files where the logs are written to. #name: filebeat - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. #rotateeverybytes: 10485760 # = 10MB - # Number of rotated log files to keep. Oldest files will be deleted first. + # Number of rotated log files to keep. The oldest files will be deleted first. #keepfiles: 7 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. #permissions: 0600 - # Enable log file rotation on time intervals in addition to size-based rotation. + # Enable log file rotation on time intervals in addition to the size-based rotation. # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h # are boundary-aligned with minutes, hours, days, weeks, months, and years as # reported by the local system clock. All other intervals are calculated from the # Unix epoch. Defaults to disabled. #interval: 0 - # Rotate existing logs on startup rather than appending to the existing + # Rotate existing logs on startup rather than appending them to the existing # file. Defaults to true. # rotateonstartup: true @@ -2602,7 +2615,7 @@ logging.files: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # In case you specify an additional path, the scheme is required: http://localhost:9200/path # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 #hosts: ["localhost:9200"] @@ -2649,7 +2662,7 @@ logging.files: # Elasticsearch after a network error. The default is 60s. #backoff.max: 60s - # Configure HTTP request timeout before failing an request to Elasticsearch. + # Configure HTTP request timeout before failing a request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. @@ -2746,15 +2759,15 @@ logging.files: # =============================== HTTP Endpoint ================================ -# Each beat can expose internal metrics through a HTTP endpoint. For security +# Each beat can expose internal metrics through an HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. -# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# Stats can be accessed through http://localhost:5066/stats. For pretty JSON output # append ?pretty to the URL. # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# The HTTP endpoint will bind to this hostname, IP address, unix socket, or named pipe. # When using IP addresses, it is recommended to only use localhost. #http.host: localhost @@ -2764,7 +2777,7 @@ logging.files: # Define which user should be owning the named pipe. #http.named_pipe.user: -# Define which the permissions that should be applied to the named pipe, use the Security +# Define which permissions should be applied to the named pipe, use the Security # Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with # `http.user`. #http.named_pipe.security_descriptor: diff --git a/filebeat/filebeat.yml b/filebeat/filebeat.yml index 4e6dab043bb..d4080e9cccb 100644 --- a/filebeat/filebeat.yml +++ b/filebeat/filebeat.yml @@ -16,7 +16,7 @@ filebeat.inputs: # Each - is an input. Most options can be set at the input level, so # you can use different inputs for various configurations. -# Below are the input specific configurations. +# Below are the input-specific configurations. # filestream is an input for collecting log messages from files. - type: filestream @@ -80,7 +80,7 @@ setup.template.settings: # all the transactions sent by a single shipper in the web interface. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. #tags: ["service-X", "web-tier"] @@ -95,8 +95,8 @@ setup.template.settings: # options here or by using the `setup` command. #setup.dashboards.enabled: false -# The URL from where to download the dashboards archive. By default this URL -# has a value which is computed based on the Beat name and version. For released +# The URL from where to download the dashboard archive. By default, this URL +# has a value that is computed based on the Beat name and version. For released # versions, this URL points to the dashboard archive on the artifacts.elastic.co # website. #setup.dashboards.url: @@ -178,7 +178,7 @@ processors: #logging.level: debug # At debug level, you can selectively enable logging only for some components. -# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# To enable all selectors, use ["*"]. Examples of other selectors are "beat", # "publisher", "service". #logging.selectors: ["*"] @@ -196,7 +196,7 @@ processors: #monitoring.cluster_uuid: # Uncomment to send the metrics to Elasticsearch. Most settings from the -# Elasticsearch output are accepted here as well. +# Elasticsearch outputs are accepted here as well. # Note that the settings should point to your Elasticsearch *monitoring* cluster. # Any setting that is not set is automatically inherited from the Elasticsearch # output configuration, so if you have the Elasticsearch output configured such diff --git a/filebeat/input/filestream/config.go b/filebeat/input/filestream/config.go index a9ca55d8eb2..f6e1ca03f4c 100644 --- a/filebeat/input/filestream/config.go +++ b/filebeat/input/filestream/config.go @@ -33,6 +33,7 @@ import ( type config struct { Reader readerConfig `config:",inline"` + ID string `config:"id"` Paths []string `config:"paths"` Close closerConfig `config:"close"` FileWatcher *conf.Namespace `config:"prospector"` diff --git a/filebeat/input/filestream/copytruncate_prospector.go b/filebeat/input/filestream/copytruncate_prospector.go index 1f12f167a4c..10884cb9b94 100644 --- a/filebeat/input/filestream/copytruncate_prospector.go +++ b/filebeat/input/filestream/copytruncate_prospector.go @@ -329,7 +329,9 @@ func (p *copyTruncateFileProspector) onRotatedFile( hg.Start(ctx, src) return } - originalSrc := p.identifier.GetSource(loginp.FSEvent{NewPath: originalPath, Info: fi}) + descCopy := fe.Descriptor + descCopy.Info = fi + originalSrc := p.identifier.GetSource(loginp.FSEvent{NewPath: originalPath, Descriptor: descCopy}) p.rotatedFiles.addOriginalFile(originalPath, originalSrc) p.rotatedFiles.addRotatedFile(originalPath, fe.NewPath, src) hg.Start(ctx, src) diff --git a/filebeat/input/filestream/environment_test.go b/filebeat/input/filestream/environment_test.go index 2c121fa8c4a..cc53d23a214 100644 --- a/filebeat/input/filestream/environment_test.go +++ b/filebeat/input/filestream/environment_test.go @@ -374,7 +374,7 @@ func (e *inputTestingEnvironment) getRegistryState(key string) (registryEntry, e func getIDFromPath(filepath, inputID string, fi os.FileInfo) string { identifier, _ := newINodeDeviceIdentifier(nil) - src := identifier.GetSource(loginp.FSEvent{Info: fi, Op: loginp.OpCreate, NewPath: filepath}) + src := identifier.GetSource(loginp.FSEvent{Descriptor: loginp.FileDescriptor{Info: fi}, Op: loginp.OpCreate, NewPath: filepath}) return "filestream::" + inputID + "::" + src.Name() } diff --git a/filebeat/input/filestream/fswatch.go b/filebeat/input/filestream/fswatch.go index 68ff28f47dd..ab166cefbe0 100644 --- a/filebeat/input/filestream/fswatch.go +++ b/filebeat/input/filestream/fswatch.go @@ -18,7 +18,11 @@ package filestream import ( + "bufio" + "crypto/sha256" + "encoding/hex" "fmt" + "io" "os" "path/filepath" "time" @@ -28,35 +32,18 @@ import ( "github.com/elastic/beats/v7/filebeat/input/file" loginp "github.com/elastic/beats/v7/filebeat/input/filestream/internal/input-logfile" - file_helper "github.com/elastic/beats/v7/libbeat/common/file" "github.com/elastic/beats/v7/libbeat/common/match" conf "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/logp" ) const ( - RecursiveGlobDepth = 8 - scannerName = "scanner" - watcherDebugKey = "file_watcher" + RecursiveGlobDepth = 8 + DefaultFingerprintSize int64 = 1024 // 1KB + scannerDebugKey = "scanner" + watcherDebugKey = "file_watcher" ) -var watcherFactories = map[string]watcherFactory{ - scannerName: newScannerWatcher, -} - -type watcherFactory func(paths []string, cfg *conf.C) (loginp.FSWatcher, error) - -// fileScanner looks for files which match the patterns in paths. -// It is able to exclude files and symlinks. -type fileScanner struct { - paths []string - excludedFiles []match.Matcher - includedFiles []match.Matcher - symlinks bool - - log *logp.Logger -} - type fileWatcherConfig struct { // Interval is the time between two scans. Interval time.Duration `config:"check_interval"` @@ -70,27 +57,22 @@ type fileWatcherConfig struct { // fileWatcher gets the list of files from a FSWatcher and creates events by // comparing the files between its last two runs. type fileWatcher struct { - interval time.Duration - resendOnModTime bool - prev map[string]os.FileInfo - scanner loginp.FSScanner - log *logp.Logger - events chan loginp.FSEvent - sameFileFunc func(os.FileInfo, os.FileInfo) bool + cfg fileWatcherConfig + prev map[string]loginp.FileDescriptor + scanner loginp.FSScanner + log *logp.Logger + events chan loginp.FSEvent } func newFileWatcher(paths []string, ns *conf.Namespace) (loginp.FSWatcher, error) { + var config *conf.C if ns == nil { - return newScannerWatcher(paths, conf.NewConfig()) + config = conf.NewConfig() + } else { + config = ns.Config() } - watcherType := ns.Name() - f, ok := watcherFactories[watcherType] - if !ok { - return nil, fmt.Errorf("no such file watcher: %s", watcherType) - } - - return f(paths, ns.Config()) + return newScannerWatcher(paths, config) } func newScannerWatcher(paths []string, c *conf.C) (loginp.FSWatcher, error) { @@ -104,13 +86,11 @@ func newScannerWatcher(paths []string, c *conf.C) (loginp.FSWatcher, error) { return nil, err } return &fileWatcher{ - log: logp.NewLogger(watcherDebugKey), - interval: config.Interval, - resendOnModTime: config.ResendOnModTime, - prev: make(map[string]os.FileInfo, 0), - scanner: scanner, - events: make(chan loginp.FSEvent), - sameFileFunc: os.SameFile, + log: logp.NewLogger(watcherDebugKey), + cfg: config, + prev: make(map[string]loginp.FileDescriptor, 0), + scanner: scanner, + events: make(chan loginp.FSEvent), }, nil } @@ -128,7 +108,7 @@ func (w *fileWatcher) Run(ctx unison.Canceler) { // run initial scan before starting regular w.watch(ctx) - _ = timed.Periodic(ctx, w.interval, func() error { + _ = timed.Periodic(ctx, w.cfg.Interval, func() error { w.watch(ctx) return nil @@ -140,140 +120,198 @@ func (w *fileWatcher) watch(ctx unison.Canceler) { paths := w.scanner.GetFiles() - newFiles := make(map[string]os.FileInfo) + // for debugging purposes + writtenCount := 0 + truncatedCount := 0 + renamedCount := 0 + removedCount := 0 + createdCount := 0 - for path, info := range paths { + newFilesByName := make(map[string]*loginp.FileDescriptor) + newFilesByID := make(map[string]*loginp.FileDescriptor) + for path, fd := range paths { // if the scanner found a new path or an existing path // with a different file, it is a new file - prevInfo, ok := w.prev[path] - if !ok || !w.sameFileFunc(prevInfo, info) { - newFiles[path] = info + prevDesc, ok := w.prev[path] + sfd := fd // to avoid memory aliasing + if !ok || !loginp.SameFile(&prevDesc, &sfd) { + newFilesByName[path] = &sfd + newFilesByID[fd.FileID()] = &sfd continue } - // if the two infos belong to the same file and it has been modified - // if the size is smaller than before, it is truncated, if bigger, it is a write event. - // It might happen that a file is truncated and then more data is added, both - // within the same second, this will make the reader stop, but a new one will not - // start because the modification data is the same, to avoid this situation, - // we also check for size changes here. - if prevInfo.ModTime() != info.ModTime() || prevInfo.Size() != info.Size() { - if prevInfo.Size() > info.Size() || w.resendOnModTime && prevInfo.Size() == info.Size() { - select { - case <-ctx.Done(): - return - case w.events <- truncateEvent(path, info): - } - } else { - select { - case <-ctx.Done(): - return - case w.events <- writeEvent(path, info): - } + var e loginp.FSEvent + switch { + + // the new size is smaller, the file was truncated + case prevDesc.Info.Size() > fd.Info.Size(): + e = truncateEvent(path, fd) + truncatedCount++ + + // the size is the same, timestamps are different, the file was touched + case prevDesc.Info.Size() == fd.Info.Size() && prevDesc.Info.ModTime() != fd.Info.ModTime(): + if w.cfg.ResendOnModTime { + e = truncateEvent(path, fd) + truncatedCount++ } + + // the new size is larger, something was written + case prevDesc.Info.Size() < fd.Info.Size(): + e = writeEvent(path, fd) + writtenCount++ } - // delete from previous state, as we have more up to date info + // if none of the conditions were true, the file remained unchanged and we don't need to create an event + if e.Op != loginp.OpDone { + select { + case <-ctx.Done(): + return + case w.events <- e: + } + } + + // delete from previous state to mark that we've seen the existing file again delete(w.prev, path) } - // remaining files are in the prev map are the ones that are missing + // remaining files in the prev map are the ones that are missing // either because they have been deleted or renamed - for removedPath, removedInfo := range w.prev { - for newPath, newInfo := range newFiles { - if w.sameFileFunc(removedInfo, newInfo) { - select { - case <-ctx.Done(): - return - case w.events <- renamedEvent(removedPath, newPath, newInfo): - delete(newFiles, newPath) - goto CHECK_NEXT_REMOVED - } - } + for remainingPath, remainingDesc := range w.prev { + var e loginp.FSEvent + + id := remainingDesc.FileID() + if newDesc, renamed := newFilesByID[id]; renamed { + e = renamedEvent(remainingPath, newDesc.Filename, *newDesc) + delete(newFilesByName, newDesc.Filename) + delete(newFilesByID, id) + renamedCount++ + } else { + e = deleteEvent(remainingPath, remainingDesc) + removedCount++ } - select { case <-ctx.Done(): return - case w.events <- deleteEvent(removedPath, removedInfo): + case w.events <- e: } - CHECK_NEXT_REMOVED: } - // remaining files in newFiles are new - for path, info := range newFiles { + // remaining files in newFiles are newly created files + for path, fd := range newFilesByName { + // no need to react on empty new files + if fd.Info.Size() == 0 { + w.log.Warnf("file %q has no content yet, skipping", fd.Filename) + delete(paths, path) + continue + } select { case <-ctx.Done(): return - case w.events <- createEvent(path, info): + case w.events <- createEvent(path, *fd): + createdCount++ } } - w.log.Debugf("Found %d paths", len(paths)) + w.log.With( + "total", len(paths), + "written", writtenCount, + "truncated", truncatedCount, + "renamed", renamedCount, + "removed", removedCount, + "created", createdCount, + ).Debugf("File scan complete") + w.prev = paths } -func createEvent(path string, fi os.FileInfo) loginp.FSEvent { - return loginp.FSEvent{Op: loginp.OpCreate, OldPath: "", NewPath: path, Info: fi} +func createEvent(path string, fd loginp.FileDescriptor) loginp.FSEvent { + return loginp.FSEvent{Op: loginp.OpCreate, OldPath: "", NewPath: path, Descriptor: fd} } -func writeEvent(path string, fi os.FileInfo) loginp.FSEvent { - return loginp.FSEvent{Op: loginp.OpWrite, OldPath: path, NewPath: path, Info: fi} +func writeEvent(path string, fd loginp.FileDescriptor) loginp.FSEvent { + return loginp.FSEvent{Op: loginp.OpWrite, OldPath: path, NewPath: path, Descriptor: fd} } -func truncateEvent(path string, fi os.FileInfo) loginp.FSEvent { - return loginp.FSEvent{Op: loginp.OpTruncate, OldPath: path, NewPath: path, Info: fi} +func truncateEvent(path string, fd loginp.FileDescriptor) loginp.FSEvent { + return loginp.FSEvent{Op: loginp.OpTruncate, OldPath: path, NewPath: path, Descriptor: fd} } -func renamedEvent(oldPath, path string, fi os.FileInfo) loginp.FSEvent { - return loginp.FSEvent{Op: loginp.OpRename, OldPath: oldPath, NewPath: path, Info: fi} +func renamedEvent(oldPath, path string, fd loginp.FileDescriptor) loginp.FSEvent { + return loginp.FSEvent{Op: loginp.OpRename, OldPath: oldPath, NewPath: path, Descriptor: fd} } -func deleteEvent(path string, fi os.FileInfo) loginp.FSEvent { - return loginp.FSEvent{Op: loginp.OpDelete, OldPath: path, NewPath: "", Info: fi} +func deleteEvent(path string, fd loginp.FileDescriptor) loginp.FSEvent { + return loginp.FSEvent{Op: loginp.OpDelete, OldPath: path, NewPath: "", Descriptor: fd} } func (w *fileWatcher) Event() loginp.FSEvent { return <-w.events } -func (w *fileWatcher) GetFiles() map[string]os.FileInfo { +func (w *fileWatcher) GetFiles() map[string]loginp.FileDescriptor { return w.scanner.GetFiles() } +type fingerprintConfig struct { + Enabled bool `config:"enabled"` + Offset int64 `config:"offset"` + Length int64 `config:"length"` +} + type fileScannerConfig struct { - ExcludedFiles []match.Matcher `config:"exclude_files"` - IncludedFiles []match.Matcher `config:"include_files"` - Symlinks bool `config:"symlinks"` - RecursiveGlob bool `config:"recursive_glob"` + ExcludedFiles []match.Matcher `config:"exclude_files"` + IncludedFiles []match.Matcher `config:"include_files"` + Symlinks bool `config:"symlinks"` + RecursiveGlob bool `config:"recursive_glob"` + Fingerprint fingerprintConfig `config:"fingerprint"` } func defaultFileScannerConfig() fileScannerConfig { return fileScannerConfig{ Symlinks: false, RecursiveGlob: true, + Fingerprint: fingerprintConfig{ + Enabled: false, + Offset: 0, + Length: DefaultFingerprintSize, + }, } } -func newFileScanner(paths []string, cfg fileScannerConfig) (loginp.FSScanner, error) { - fs := fileScanner{ - paths: paths, - excludedFiles: cfg.ExcludedFiles, - includedFiles: cfg.IncludedFiles, - symlinks: cfg.Symlinks, - log: logp.NewLogger(scannerName), +// fileScanner looks for files which match the patterns in paths. +// It is able to exclude files and symlinks. +type fileScanner struct { + paths []string + cfg fileScannerConfig + log *logp.Logger +} + +func newFileScanner(paths []string, config fileScannerConfig) (loginp.FSScanner, error) { + s := fileScanner{ + paths: paths, + cfg: config, + log: logp.NewLogger(scannerDebugKey), + } + + if s.cfg.Fingerprint.Enabled { + if s.cfg.Fingerprint.Length < sha256.BlockSize { + err := fmt.Errorf("fingerprint size %d bytes cannot be smaller than %d bytes", config.Fingerprint.Length, sha256.BlockSize) + return nil, fmt.Errorf("error while reading configuration of fingerprint: %w", err) + } + s.log.Debugf("fingerprint mode enabled: offset %d, length %d", s.cfg.Fingerprint.Offset, s.cfg.Fingerprint.Length) } - err := fs.resolveRecursiveGlobs(cfg) + + err := s.resolveRecursiveGlobs(config) if err != nil { return nil, err } - err = fs.normalizeGlobPatterns() + err = s.normalizeGlobPatterns() if err != nil { return nil, err } - return &fs, nil + return &s, nil } // resolveRecursiveGlobs expands `**` from the globs in multiple patterns @@ -313,11 +351,12 @@ func (s *fileScanner) normalizeGlobPatterns() error { return nil } -// GetFiles returns a map of files and fileinfos which +// GetFiles returns a map of file descriptors by filenames that // match the configured paths. -func (s *fileScanner) GetFiles() map[string]os.FileInfo { - pathInfo := map[string]os.FileInfo{} - uniqFileID := map[string]os.FileInfo{} +func (s *fileScanner) GetFiles() map[string]loginp.FileDescriptor { + fdByName := map[string]loginp.FileDescriptor{} + // used to determine if a symlink resolves in a already known target + uniqueIDs := map[string]string{} for _, path := range s.paths { matches, err := filepath.Glob(path) @@ -326,93 +365,137 @@ func (s *fileScanner) GetFiles() map[string]os.FileInfo { continue } - for _, file := range matches { - if s.shouldSkipFile(file) { + for _, filename := range matches { + it, err := s.getIngestTarget(filename) + if err != nil { + s.log.Debugf("cannot create an ingest target for file %q: %s", filename, err) continue } - // If symlink is enabled, it is checked that original is not part of same input - // If original is harvested by other input, states will potentially overwrite each other - if s.isOriginalAndSymlinkConfigured(file, uniqFileID) { + fd, err := s.toFileDescriptor(&it) + if err != nil { + s.log.Warnf("cannot create a file descriptor for an ingest target %q: %s", filename, err) continue } - fileInfo, err := os.Stat(file) - if err != nil { - s.log.Debug("stat(%s) failed: %s", file, err) + fileID := fd.FileID() + if knownFilename, exists := uniqueIDs[fileID]; exists { + s.log.Warnf("%q points to an already known ingest target %q [%s==%s]. Skipping", fd.Filename, knownFilename, fileID, fileID) continue } - pathInfo[file] = fileInfo + uniqueIDs[fileID] = fd.Filename + fdByName[filename] = fd } } - return pathInfo + return fdByName } -func (s *fileScanner) shouldSkipFile(file string) bool { - if s.isFileExcluded(file) || !s.isFileIncluded(file) { - s.log.Debugf("Exclude file: %s", file) - return true - } +type ingestTarget struct { + filename string + originalFilename string + symlink bool + info os.FileInfo +} - fileInfo, err := os.Lstat(file) - if err != nil { - s.log.Debugf("lstat(%s) failed: %s", file, err) - return true +func (s *fileScanner) getIngestTarget(filename string) (it ingestTarget, err error) { + if s.isFileExcluded(filename) { + return it, fmt.Errorf("file %q is excluded from ingestion", filename) } - if fileInfo.IsDir() { - s.log.Debugf("Skipping directory: %s", file) - return true + if !s.isFileIncluded(filename) { + return it, fmt.Errorf("file %q is not included in ingestion", filename) } - isSymlink := fileInfo.Mode()&os.ModeSymlink > 0 - if isSymlink && !s.symlinks { - s.log.Debugf("File %s skipped as it is a symlink", file) - return true - } + it.filename = filename + it.originalFilename = filename - originalFile, err := filepath.EvalSymlinks(file) + it.info, err = os.Lstat(it.filename) // to determine if it's a symlink if err != nil { - s.log.Debugf("finding path to original file has failed %s: %+v", file, err) - return true + return it, fmt.Errorf("failed to lstat %q: %w", it.filename, err) } - // Check if original file is included to make sure we are not reading from - // unwanted files. - if s.isFileExcluded(originalFile) || !s.isFileIncluded(originalFile) { - s.log.Debugf("Exclude original file: %s", file) - return true + + if it.info.IsDir() { + return it, fmt.Errorf("file %q is a directory", it.filename) } - return false + it.symlink = it.info.Mode()&os.ModeSymlink > 0 + + if it.symlink { + if !s.cfg.Symlinks { + return it, fmt.Errorf("file %q is a symlink and they're disabled", it.filename) + } + + // now we know it's a symlink, we stat with link resolution + it.info, err = os.Stat(it.filename) + if err != nil { + return it, fmt.Errorf("failed to stat the symlink %q: %w", it.filename, err) + } + + it.originalFilename, err = filepath.EvalSymlinks(it.filename) + if err != nil { + return it, fmt.Errorf("failed to resolve the symlink %q: %w", it.filename, err) + } + + if s.isFileExcluded(it.originalFilename) { + return it, fmt.Errorf("file %q->%q is excluded from ingestion", it.filename, it.originalFilename) + } + + if !s.isFileIncluded(it.originalFilename) { + return it, fmt.Errorf("file %q->%q is not included in ingestion", it.filename, it.originalFilename) + } + } + + return it, nil } -func (s *fileScanner) isOriginalAndSymlinkConfigured(file string, uniqFileID map[string]os.FileInfo) bool { - if s.symlinks { - fileInfo, err := os.Stat(file) +func (s *fileScanner) toFileDescriptor(it *ingestTarget) (fd loginp.FileDescriptor, err error) { + fd.Filename = it.filename + fd.Info = it.info + + if s.cfg.Fingerprint.Enabled { + fileSize := it.info.Size() + minSize := s.cfg.Fingerprint.Offset + s.cfg.Fingerprint.Length + if fileSize < minSize { + return fd, fmt.Errorf("filesize of %q is %d bytes, expected at least %d bytes for fingerprinting", fd.Filename, fileSize, minSize) + } + + file, err := os.Open(it.originalFilename) if err != nil { - s.log.Debugf("stat(%s) failed: %s", file, err) - return false + return fd, fmt.Errorf("failed to open %q for fingerprinting: %w", it.originalFilename, err) } - fileID := file_helper.GetOSState(fileInfo).String() - if finfo, exists := uniqFileID[fileID]; exists { - s.log.Infof("Same file found as symlink and original. Skipping file: %s (as it same as %s)", file, finfo.Name()) - return true + defer file.Close() + + if s.cfg.Fingerprint.Offset != 0 { + _, err = file.Seek(s.cfg.Fingerprint.Offset, io.SeekStart) + if err != nil { + return fd, fmt.Errorf("failed to seek %q for fingerprinting: %w", fd.Filename, err) + } } - uniqFileID[fileID] = fileInfo + + bfile := bufio.NewReaderSize(file, int(s.cfg.Fingerprint.Length)) + r := io.LimitReader(bfile, s.cfg.Fingerprint.Length) + h := sha256.New() + written, err := io.Copy(h, r) + if err != nil { + return fd, fmt.Errorf("failed to compute hash for first %d bytes of %q: %w", s.cfg.Fingerprint.Length, fd.Filename, err) + } + if written != s.cfg.Fingerprint.Length { + return fd, fmt.Errorf("failed to read %d bytes from %q to compute fingerprint, read only %d", written, fd.Filename, s.cfg.Fingerprint.Length) + } + + fd.Fingerprint = hex.EncodeToString(h.Sum(nil)) } - return false + + return fd, nil } func (s *fileScanner) isFileExcluded(file string) bool { - return len(s.excludedFiles) > 0 && s.matchAny(s.excludedFiles, file) + return len(s.cfg.ExcludedFiles) > 0 && s.matchAny(s.cfg.ExcludedFiles, file) } func (s *fileScanner) isFileIncluded(file string) bool { - if len(s.includedFiles) == 0 { - return true - } - return s.matchAny(s.includedFiles, file) + return len(s.cfg.IncludedFiles) == 0 || s.matchAny(s.cfg.IncludedFiles, file) } // matchAny checks if the text matches any of the regular expressions diff --git a/filebeat/input/filestream/fswatch_test.go b/filebeat/input/filestream/fswatch_test.go index da656a1ca0f..d798cbaa752 100644 --- a/filebeat/input/filestream/fswatch_test.go +++ b/filebeat/input/filestream/fswatch_test.go @@ -19,301 +19,879 @@ package filestream import ( "context" - "io/ioutil" + "fmt" "os" "path/filepath" + "strings" "testing" "time" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" loginp "github.com/elastic/beats/v7/filebeat/input/filestream/internal/input-logfile" - "github.com/elastic/beats/v7/libbeat/common/match" + conf "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/logp" ) -var ( - excludedFileName = "excluded_file" - includedFileName = "included_file" - directoryPath = "unharvestable_dir" -) +func TestFileWatcher(t *testing.T) { + dir := t.TempDir() + paths := []string{filepath.Join(dir, "*.log")} + cfgStr := ` +scanner: + check_interval: 100ms + resend_on_touch: true + symlinks: false + recursive_glob: true + fingerprint: + enabled: false + offset: 0 + length: 1024 +` -func TestFileScanner(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "fswatch_test_file_scanner") - if err != nil { - t.Fatalf("cannot create temporary test dir: %v", err) - } - defer os.RemoveAll(tmpDir) - setupFilesForScannerTest(t, tmpDir) - - excludedFilePath := filepath.Join(tmpDir, excludedFileName) - includedFilePath := filepath.Join(tmpDir, includedFileName) - - testCases := map[string]struct { - paths []string - excludedFiles []match.Matcher - includedFiles []match.Matcher - symlinks bool - expectedFiles []string - }{ - "select all files": { - paths: []string{excludedFilePath, includedFilePath}, - expectedFiles: []string{excludedFilePath, includedFilePath}, - }, - "skip excluded files": { - paths: []string{excludedFilePath, includedFilePath}, - excludedFiles: []match.Matcher{ - match.MustCompile(excludedFileName), + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + fw := createWatcherWithConfig(t, paths, cfgStr) + + go fw.Run(ctx) + + t.Run("detects a new file", func(t *testing.T) { + basename := "created.log" + filename := filepath.Join(dir, basename) + err := os.WriteFile(filename, []byte("hello"), 0777) + require.NoError(t, err) + + e := fw.Event() + expEvent := loginp.FSEvent{ + NewPath: filename, + Op: loginp.OpCreate, + Descriptor: loginp.FileDescriptor{ + Filename: filename, + Info: testFileInfo{name: basename, size: 5}, // 5 bytes written }, - expectedFiles: []string{includedFilePath}, - }, - "only include included_files": { - paths: []string{excludedFilePath, includedFilePath}, - includedFiles: []match.Matcher{ - match.MustCompile(includedFileName), + } + requireEqualEvents(t, expEvent, e) + }) + + t.Run("detects a file write", func(t *testing.T) { + basename := "created.log" + filename := filepath.Join(dir, basename) + + f, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0777) + require.NoError(t, err) + _, err = f.WriteString("world") + require.NoError(t, err) + f.Close() + + e := fw.Event() + expEvent := loginp.FSEvent{ + NewPath: filename, + OldPath: filename, + Op: loginp.OpWrite, + Descriptor: loginp.FileDescriptor{ + Filename: filename, + Info: testFileInfo{name: basename, size: 10}, // +5 bytes appended }, - expectedFiles: []string{includedFilePath}, - }, - "skip directories": { - paths: []string{filepath.Join(tmpDir, directoryPath)}, - expectedFiles: []string{}, - }, - } + } + requireEqualEvents(t, expEvent, e) + }) - for name, test := range testCases { - test := test + t.Run("detects a file rename", func(t *testing.T) { + basename := "created.log" + filename := filepath.Join(dir, basename) + newBasename := "renamed.log" + newFilename := filepath.Join(dir, newBasename) - t.Run(name, func(t *testing.T) { - cfg := fileScannerConfig{ - ExcludedFiles: test.excludedFiles, - IncludedFiles: test.includedFiles, - Symlinks: test.symlinks, - RecursiveGlob: false, - } - fs, err := newFileScanner(test.paths, cfg) - if err != nil { - t.Fatal(err) - } - files := fs.GetFiles() - paths := make([]string, 0) - for p := range files { - paths = append(paths, p) + err := os.Rename(filename, newFilename) + require.NoError(t, err) + + e := fw.Event() + expEvent := loginp.FSEvent{ + NewPath: newFilename, + OldPath: filename, + Op: loginp.OpRename, + Descriptor: loginp.FileDescriptor{ + Filename: newFilename, + Info: testFileInfo{name: newBasename, size: 10}, + }, + } + requireEqualEvents(t, expEvent, e) + }) + + t.Run("detects a file truncate", func(t *testing.T) { + basename := "renamed.log" + filename := filepath.Join(dir, basename) + + err := os.Truncate(filename, 2) + require.NoError(t, err) + + e := fw.Event() + expEvent := loginp.FSEvent{ + NewPath: filename, + OldPath: filename, + Op: loginp.OpTruncate, + Descriptor: loginp.FileDescriptor{ + Filename: filename, + Info: testFileInfo{name: basename, size: 2}, + }, + } + requireEqualEvents(t, expEvent, e) + }) + + t.Run("emits truncate on touch when resend_on_touch is enabled", func(t *testing.T) { + basename := "renamed.log" + filename := filepath.Join(dir, basename) + time := time.Now().Local().Add(time.Hour) + err := os.Chtimes(filename, time, time) + require.NoError(t, err) + + e := fw.Event() + expEvent := loginp.FSEvent{ + NewPath: filename, + OldPath: filename, + Op: loginp.OpTruncate, + Descriptor: loginp.FileDescriptor{ + Filename: filename, + Info: testFileInfo{name: basename, size: 2}, + }, + } + requireEqualEvents(t, expEvent, e) + }) + + t.Run("detects a file remove", func(t *testing.T) { + basename := "renamed.log" + filename := filepath.Join(dir, basename) + + err := os.Remove(filename) + require.NoError(t, err) + + e := fw.Event() + expEvent := loginp.FSEvent{ + OldPath: filename, + Op: loginp.OpDelete, + Descriptor: loginp.FileDescriptor{ + Filename: filename, + Info: testFileInfo{name: basename, size: 2}, + }, + } + requireEqualEvents(t, expEvent, e) + }) + + t.Run("propagates a fingerprints for a new file", func(t *testing.T) { + dir := t.TempDir() + paths := []string{filepath.Join(dir, "*.log")} + cfgStr := ` +scanner: + check_interval: 100ms + symlinks: false + recursive_glob: true + fingerprint: + enabled: true + offset: 0 + length: 1024 +` + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + fw := createWatcherWithConfig(t, paths, cfgStr) + go fw.Run(ctx) + + basename := "created.log" + filename := filepath.Join(dir, basename) + err := os.WriteFile(filename, []byte(strings.Repeat("a", 1024)), 0777) + require.NoError(t, err) + + e := fw.Event() + expEvent := loginp.FSEvent{ + NewPath: filename, + Op: loginp.OpCreate, + Descriptor: loginp.FileDescriptor{ + Filename: filename, + Fingerprint: "2edc986847e209b4016e141a6dc8716d3207350f416969382d431539bf292e4a", + Info: testFileInfo{name: basename, size: 1024}, + }, + } + requireEqualEvents(t, expEvent, e) + }) + + t.Run("does not emit events if a file is touched and resend_on_touch is disabled", func(t *testing.T) { + dir := t.TempDir() + paths := []string{filepath.Join(dir, "*.log")} + cfgStr := ` +scanner: + check_interval: 10ms +` + + ctx, cancel := context.WithTimeout(context.Background(), 1000*time.Millisecond) + defer cancel() + + fw := createWatcherWithConfig(t, paths, cfgStr) + go fw.Run(ctx) + + basename := "created.log" + filename := filepath.Join(dir, basename) + err := os.WriteFile(filename, []byte(strings.Repeat("a", 1024)), 0777) + require.NoError(t, err) + + e := fw.Event() + expEvent := loginp.FSEvent{ + NewPath: filename, + Op: loginp.OpCreate, + Descriptor: loginp.FileDescriptor{ + Filename: filename, + Info: testFileInfo{name: basename, size: 1024}, + }, + } + requireEqualEvents(t, expEvent, e) + + time := time.Now().Local().Add(time.Hour) + err = os.Chtimes(filename, time, time) + require.NoError(t, err) + + e = fw.Event() + require.Equal(t, loginp.OpDone, e.Op) + }) + + t.Run("does not emit events for empty files", func(t *testing.T) { + dir := t.TempDir() + paths := []string{filepath.Join(dir, "*.log")} + cfgStr := ` +scanner: + check_interval: 10ms +` + + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + err := logp.DevelopmentSetup(logp.ToObserverOutput()) + require.NoError(t, err) + + fw := createWatcherWithConfig(t, paths, cfgStr) + go fw.Run(ctx) + + basename := "created.log" + filename := filepath.Join(dir, basename) + err = os.WriteFile(filename, nil, 0777) + require.NoError(t, err) + + t.Run("issues a warning in logs", func(t *testing.T) { + var lastWarning string + expLogMsg := fmt.Sprintf("file %q has no content yet, skipping", filename) + require.Eventually(t, func() bool { + logs := logp.ObserverLogs().FilterLevelExact(logp.WarnLevel.ZapLevel()).TakeAll() + if len(logs) == 0 { + return false + } + lastWarning = logs[len(logs)-1].Message + return strings.Contains(lastWarning, expLogMsg) + }, 100*time.Millisecond, 10*time.Millisecond, "required a warning message %q but got %q", expLogMsg, lastWarning) + }) + + t.Run("emits a create event once something is written to the empty file", func(t *testing.T) { + err = os.WriteFile(filename, []byte("hello"), 0777) + require.NoError(t, err) + + e := fw.Event() + expEvent := loginp.FSEvent{ + NewPath: filename, + Op: loginp.OpCreate, + Descriptor: loginp.FileDescriptor{ + Filename: filename, + Info: testFileInfo{name: basename, size: 5}, // +5 bytes appended + }, } - assert.ElementsMatch(t, paths, test.expectedFiles) + requireEqualEvents(t, expEvent, e) }) - } + }) + + t.Run("does not emit an event for a fingerprint collision", func(t *testing.T) { + dir := t.TempDir() + paths := []string{filepath.Join(dir, "*.log")} + cfgStr := ` +scanner: + check_interval: 10ms + fingerprint.enabled: true +` + + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + fw := createWatcherWithConfig(t, paths, cfgStr) + go fw.Run(ctx) + + basename := "created.log" + filename := filepath.Join(dir, basename) + err := os.WriteFile(filename, []byte(strings.Repeat("a", 1024)), 0777) + require.NoError(t, err) + + e := fw.Event() + expEvent := loginp.FSEvent{ + NewPath: filename, + Op: loginp.OpCreate, + Descriptor: loginp.FileDescriptor{ + Filename: filename, + Fingerprint: "2edc986847e209b4016e141a6dc8716d3207350f416969382d431539bf292e4a", + Info: testFileInfo{name: basename, size: 1024}, + }, + } + requireEqualEvents(t, expEvent, e) + + // collisions are resolved in the alphabetical order, the first filename wins + basename = "created_collision.log" + filename = filepath.Join(dir, basename) + err = os.WriteFile(filename, []byte(strings.Repeat("a", 1024)), 0777) + require.NoError(t, err) + + e = fw.Event() + // means no event + require.Equal(t, loginp.OpDone, e.Op) + }) } -func setupFilesForScannerTest(t *testing.T, tmpDir string) { - err := os.Mkdir(filepath.Join(tmpDir, directoryPath), 0750) - if err != nil { - t.Fatalf("cannot create non harvestable directory: %v", err) +func TestFileScanner(t *testing.T) { + dir := t.TempDir() + dir2 := t.TempDir() // for symlink testing + paths := []string{filepath.Join(dir, "*.log")} + + normalBasename := "normal.log" + undersizedBasename := "undersized.log" + excludedBasename := "excluded.log" + excludedIncludedBasename := "excluded_included.log" + travelerBasename := "traveler.log" + normalSymlinkBasename := "normal_symlink.log" + exclSymlinkBasename := "excl_symlink.log" + travelerSymlinkBasename := "portal.log" + + normalFilename := filepath.Join(dir, normalBasename) + undersizedFilename := filepath.Join(dir, undersizedBasename) + excludedFilename := filepath.Join(dir, excludedBasename) + excludedIncludedFilename := filepath.Join(dir, excludedIncludedBasename) + travelerFilename := filepath.Join(dir2, travelerBasename) + normalSymlinkFilename := filepath.Join(dir, normalSymlinkBasename) + exclSymlinkFilename := filepath.Join(dir, exclSymlinkBasename) + travelerSymlinkFilename := filepath.Join(dir, travelerSymlinkBasename) + + files := map[string]string{ + normalFilename: strings.Repeat("a", 1024), + undersizedFilename: strings.Repeat("a", 128), + excludedFilename: strings.Repeat("nothing to see here", 1024), + excludedIncludedFilename: strings.Repeat("perhaps something to see here", 1024), + travelerFilename: strings.Repeat("folks, I think I got lost", 1024), } - for _, path := range []string{excludedFileName, includedFileName} { - f, err := os.Create(filepath.Join(tmpDir, path)) - if err != nil { - t.Fatalf("file %s, error %v", path, err) - } - f.Close() + + sizes := make(map[string]int64, len(files)) + for filename, content := range files { + sizes[filename] = int64(len(content)) + } + for filename, content := range files { + err := os.WriteFile(filename, []byte(content), 0777) + require.NoError(t, err) } -} -func TestFileWatchNewDeleteModified(t *testing.T) { - oldTs := time.Now() - newTs := oldTs.Add(5 * time.Second) - testCases := map[string]struct { - prevFiles map[string]os.FileInfo - nextFiles map[string]os.FileInfo - expectedEvents []loginp.FSEvent + // this is to test that a symlink for a known file does not add the file twice + err := os.Symlink(normalFilename, normalSymlinkFilename) + require.NoError(t, err) + + // this is to test that a symlink for an unknown file is added once + err = os.Symlink(travelerFilename, travelerSymlinkFilename) + require.NoError(t, err) + + // this is to test that a symlink to an excluded file is not added + err = os.Symlink(exclSymlinkFilename, exclSymlinkFilename) + require.NoError(t, err) + + // this is to test that directories are handled and excluded + err = os.Mkdir(filepath.Join(dir, "dir"), 0777) + require.NoError(t, err) + + cases := []struct { + name string + cfgStr string + expDesc map[string]loginp.FileDescriptor }{ - "one new file": { - prevFiles: map[string]os.FileInfo{}, - nextFiles: map[string]os.FileInfo{ - "new_path": testFileInfo{"new_path", 5, oldTs, nil}, - }, - expectedEvents: []loginp.FSEvent{ - {Op: loginp.OpCreate, OldPath: "", NewPath: "new_path", Info: testFileInfo{"new_path", 5, oldTs, nil}}, + { + name: "returns all files when no limits, not including the repeated symlink", + cfgStr: ` +scanner: + symlinks: true + recursive_glob: true + fingerprint: + enabled: false + offset: 0 + length: 1024 +`, + expDesc: map[string]loginp.FileDescriptor{ + normalFilename: { + Filename: normalFilename, + Info: testFileInfo{ + size: sizes[normalFilename], + name: normalBasename, + }, + }, + undersizedFilename: { + Filename: undersizedFilename, + Info: testFileInfo{ + size: sizes[undersizedFilename], + name: undersizedBasename, + }, + }, + excludedFilename: { + Filename: excludedFilename, + Info: testFileInfo{ + size: sizes[excludedFilename], + name: excludedBasename, + }, + }, + excludedIncludedFilename: { + Filename: excludedIncludedFilename, + Info: testFileInfo{ + size: sizes[excludedIncludedFilename], + name: excludedIncludedBasename, + }, + }, + travelerSymlinkFilename: { + Filename: travelerSymlinkFilename, + Info: testFileInfo{ + size: sizes[travelerFilename], + name: travelerSymlinkBasename, + }, + }, }, }, - "one deleted file": { - prevFiles: map[string]os.FileInfo{ - "old_path": testFileInfo{"old_path", 5, oldTs, nil}, - }, - nextFiles: map[string]os.FileInfo{}, - expectedEvents: []loginp.FSEvent{ - {Op: loginp.OpDelete, OldPath: "old_path", NewPath: "", Info: testFileInfo{"old_path", 5, oldTs, nil}}, + { + name: "returns filtered files, excluding symlinks", + cfgStr: ` +scanner: + symlinks: false # symlinks are disabled + recursive_glob: false + fingerprint: + enabled: false + offset: 0 + length: 1024 +`, + expDesc: map[string]loginp.FileDescriptor{ + normalFilename: { + Filename: normalFilename, + Info: testFileInfo{ + size: sizes[normalFilename], + name: normalBasename, + }, + }, + undersizedFilename: { + Filename: undersizedFilename, + Info: testFileInfo{ + size: sizes[undersizedFilename], + name: undersizedBasename, + }, + }, + excludedFilename: { + Filename: excludedFilename, + Info: testFileInfo{ + size: sizes[excludedFilename], + name: excludedBasename, + }, + }, + excludedIncludedFilename: { + Filename: excludedIncludedFilename, + Info: testFileInfo{ + size: sizes[excludedIncludedFilename], + name: excludedIncludedBasename, + }, + }, }, }, - "one modified file": { - prevFiles: map[string]os.FileInfo{ - "path": testFileInfo{"path", 5, oldTs, nil}, - }, - nextFiles: map[string]os.FileInfo{ - "path": testFileInfo{"path", 10, newTs, nil}, - }, - expectedEvents: []loginp.FSEvent{ - {Op: loginp.OpWrite, OldPath: "path", NewPath: "path", Info: testFileInfo{"path", 10, newTs, nil}}, + { + name: "returns files according to excluded list", + cfgStr: ` +scanner: + exclude_files: ['.*exclude.*'] + symlinks: true + recursive_glob: true + fingerprint: + enabled: false + offset: 0 + length: 1024 +`, + expDesc: map[string]loginp.FileDescriptor{ + normalFilename: { + Filename: normalFilename, + Info: testFileInfo{ + size: sizes[normalFilename], + name: normalBasename, + }, + }, + undersizedFilename: { + Filename: undersizedFilename, + Info: testFileInfo{ + size: sizes[undersizedFilename], + name: undersizedBasename, + }, + }, + travelerSymlinkFilename: { + Filename: travelerSymlinkFilename, + Info: testFileInfo{ + size: sizes[travelerFilename], + name: travelerSymlinkBasename, + }, + }, }, }, - "two modified files": { - prevFiles: map[string]os.FileInfo{ - "path1": testFileInfo{"path1", 5, oldTs, nil}, - "path2": testFileInfo{"path2", 5, oldTs, nil}, - }, - nextFiles: map[string]os.FileInfo{ - "path1": testFileInfo{"path1", 10, newTs, nil}, - "path2": testFileInfo{"path2", 10, newTs, nil}, - }, - expectedEvents: []loginp.FSEvent{ - {Op: loginp.OpWrite, OldPath: "path1", NewPath: "path1", Info: testFileInfo{"path1", 10, newTs, nil}}, - {Op: loginp.OpWrite, OldPath: "path2", NewPath: "path2", Info: testFileInfo{"path2", 10, newTs, nil}}, + { + name: "returns no symlink if the original file is excluded", + cfgStr: ` +scanner: + exclude_files: ['.*exclude.*', '.*traveler.*'] + symlinks: true +`, + expDesc: map[string]loginp.FileDescriptor{ + normalFilename: { + Filename: normalFilename, + Info: testFileInfo{ + size: sizes[normalFilename], + name: normalBasename, + }, + }, + undersizedFilename: { + Filename: undersizedFilename, + Info: testFileInfo{ + size: sizes[undersizedFilename], + name: undersizedBasename, + }, + }, }, }, - "one modified file, one new file": { - prevFiles: map[string]os.FileInfo{ - "path1": testFileInfo{"path1", 5, oldTs, nil}, - }, - nextFiles: map[string]os.FileInfo{ - "path1": testFileInfo{"path1", 10, newTs, nil}, - "path2": testFileInfo{"path2", 10, newTs, nil}, + { + name: "returns files according to included list", + cfgStr: ` +scanner: + include_files: ['.*include.*'] + symlinks: true + recursive_glob: true + fingerprint: + enabled: false + offset: 0 + length: 1024 +`, + expDesc: map[string]loginp.FileDescriptor{ + excludedIncludedFilename: { + Filename: excludedIncludedFilename, + Info: testFileInfo{ + size: sizes[excludedIncludedFilename], + name: excludedIncludedBasename, + }, + }, }, - expectedEvents: []loginp.FSEvent{ - {Op: loginp.OpWrite, OldPath: "path1", NewPath: "path1", Info: testFileInfo{"path1", 10, newTs, nil}}, - {Op: loginp.OpCreate, OldPath: "", NewPath: "path2", Info: testFileInfo{"path2", 10, newTs, nil}}, + }, + { + name: "returns no included symlink if the original file is not included", + cfgStr: ` +scanner: + include_files: ['.*include.*', '.*portal.*'] + symlinks: true +`, + expDesc: map[string]loginp.FileDescriptor{ + excludedIncludedFilename: { + Filename: excludedIncludedFilename, + Info: testFileInfo{ + size: sizes[excludedIncludedFilename], + name: excludedIncludedBasename, + }, + }, }, }, - "one new file, one deleted file": { - prevFiles: map[string]os.FileInfo{ - "path_deleted": testFileInfo{"path_deleted", 5, oldTs, nil}, + { + name: "returns an included symlink if the original file is included", + cfgStr: ` +scanner: + include_files: ['.*include.*', '.*portal.*', '.*traveler.*'] + symlinks: true +`, + expDesc: map[string]loginp.FileDescriptor{ + excludedIncludedFilename: { + Filename: excludedIncludedFilename, + Info: testFileInfo{ + size: sizes[excludedIncludedFilename], + name: excludedIncludedBasename, + }, + }, + travelerSymlinkFilename: { + Filename: travelerSymlinkFilename, + Info: testFileInfo{ + size: sizes[travelerFilename], + name: travelerSymlinkBasename, + }, + }, }, - nextFiles: map[string]os.FileInfo{ - "path_new": testFileInfo{"path_new", 10, newTs, nil}, + }, + { + name: "returns all files except too small to fingerprint", + cfgStr: ` +scanner: + symlinks: true + recursive_glob: true + fingerprint: + enabled: true + offset: 0 + length: 1024 +`, + expDesc: map[string]loginp.FileDescriptor{ + normalFilename: { + Filename: normalFilename, + Fingerprint: "2edc986847e209b4016e141a6dc8716d3207350f416969382d431539bf292e4a", + Info: testFileInfo{ + size: sizes[normalFilename], + name: normalBasename, + }, + }, + excludedFilename: { + Filename: excludedFilename, + Fingerprint: "bd151321c3bbdb44185414a1b56b5649a00206dd4792e7230db8904e43987336", + Info: testFileInfo{ + size: sizes[excludedFilename], + name: excludedBasename, + }, + }, + excludedIncludedFilename: { + Filename: excludedIncludedFilename, + Fingerprint: "bfdb99a65297062658c26dfcea816d76065df2a2da2594bfd9b96e9e405da1c2", + Info: testFileInfo{ + size: sizes[excludedIncludedFilename], + name: excludedIncludedBasename, + }, + }, + travelerSymlinkFilename: { + Filename: travelerSymlinkFilename, + Fingerprint: "c4058942bffcea08810a072d5966dfa5c06eb79b902bf0011890dd8d22e1a5f8", + Info: testFileInfo{ + size: sizes[travelerFilename], + name: travelerSymlinkBasename, + }, + }, }, - expectedEvents: []loginp.FSEvent{ - {Op: loginp.OpDelete, OldPath: "path_deleted", NewPath: "", Info: testFileInfo{"path_deleted", 5, oldTs, nil}}, - {Op: loginp.OpCreate, OldPath: "", NewPath: "path_new", Info: testFileInfo{"path_new", 10, newTs, nil}}, + }, + { + name: "returns all files that match a non-standard fingerprint window", + cfgStr: ` +scanner: + symlinks: true + recursive_glob: true + fingerprint: + enabled: true + offset: 2 + length: 64 +`, + expDesc: map[string]loginp.FileDescriptor{ + normalFilename: { + Filename: normalFilename, + Fingerprint: "ffe054fe7ae0cb6dc65c3af9b61d5209f439851db43d0ba5997337df154668eb", + Info: testFileInfo{ + size: sizes[normalFilename], + name: normalBasename, + }, + }, + // undersizedFilename got excluded because of the matching fingerprint + excludedFilename: { + Filename: excludedFilename, + Fingerprint: "9c225a1e6a7df9c869499e923565b93937e88382bb9188145f117195cd41dcd1", + Info: testFileInfo{ + size: sizes[excludedFilename], + name: excludedBasename, + }, + }, + excludedIncludedFilename: { + Filename: excludedIncludedFilename, + Fingerprint: "7985b2b9750bdd3c76903db408aff3859204d6334279eaf516ecaeb618a218d5", + Info: testFileInfo{ + size: sizes[excludedIncludedFilename], + name: excludedIncludedBasename, + }, + }, + travelerSymlinkFilename: { + Filename: travelerSymlinkFilename, + Fingerprint: "da437600754a8eed6c194b7241b078679551c06c7dc89685a9a71be7829ad7e5", + Info: testFileInfo{ + size: sizes[travelerFilename], + name: travelerSymlinkBasename, + }, + }, }, }, } - for name, test := range testCases { - test := test + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + s := createScannerWithConfig(t, paths, tc.cfgStr) + requireEqualFiles(t, tc.expDesc, s.GetFiles()) + }) + } - t.Run(name, func(t *testing.T) { - w := fileWatcher{ - log: logp.L(), - prev: test.prevFiles, - scanner: &mockScanner{test.nextFiles}, - events: make(chan loginp.FSEvent), - sameFileFunc: testSameFile, - } + t.Run("returns error when creating scanner with a fingerprint too small", func(t *testing.T) { + cfgStr := ` +scanner: + fingerprint: + enabled: true + offset: 0 + length: 1 +` + cfg, err := conf.NewConfigWithYAML([]byte(cfgStr), cfgStr) + require.NoError(t, err) - go w.watch(context.Background()) + ns := &conf.Namespace{} + err = ns.Unpack(cfg) + require.NoError(t, err) - count := len(test.expectedEvents) - actual := make([]loginp.FSEvent, count) - for i := 0; i < count; i++ { - actual[i] = w.Event() - } + _, err = newFileWatcher(paths, ns) + require.Error(t, err) + require.Contains(t, err.Error(), "fingerprint size 1 bytes cannot be smaller than 64 bytes") + }) +} - assert.ElementsMatch(t, actual, test.expectedEvents) - }) +const benchmarkFileCount = 1000 + +func BenchmarkGetFiles(b *testing.B) { + dir := b.TempDir() + basenameFormat := "file-%d.log" + + for i := 0; i < benchmarkFileCount; i++ { + filename := filepath.Join(dir, fmt.Sprintf(basenameFormat, i)) + content := fmt.Sprintf("content-%d\n", i) + err := os.WriteFile(filename, []byte(strings.Repeat(content, 1024)), 0777) + require.NoError(b, err) } -} -func TestFileWatcherTruncate(t *testing.T) { - oldTs := time.Now() - newTs := oldTs.Add(time.Second) - testCases := map[string]struct { - prevFiles map[string]os.FileInfo - nextFiles map[string]os.FileInfo - expectedEvents []loginp.FSEvent - }{ - "truncated file, only size changes": { - prevFiles: map[string]os.FileInfo{ - "path": testFileInfo{"path", 42, oldTs, nil}, - }, - nextFiles: map[string]os.FileInfo{ - "path": testFileInfo{"path", 0, oldTs, nil}, - }, - expectedEvents: []loginp.FSEvent{ - {Op: loginp.OpTruncate, OldPath: "path", NewPath: "path", Info: testFileInfo{"path", 0, oldTs, nil}}, + s := fileScanner{ + paths: []string{filepath.Join(dir, "*.log")}, + cfg: fileScannerConfig{ + Fingerprint: fingerprintConfig{ + Enabled: false, }, }, - "truncated file, mod time and size changes": { - prevFiles: map[string]os.FileInfo{ - "path": testFileInfo{"path", 42, oldTs, nil}, - }, - nextFiles: map[string]os.FileInfo{ - "path": testFileInfo{"path", 0, newTs, nil}, - }, - expectedEvents: []loginp.FSEvent{ - {Op: loginp.OpTruncate, OldPath: "path", NewPath: "path", Info: testFileInfo{"path", 0, newTs, nil}}, - }, - }, - "no file change": { - prevFiles: map[string]os.FileInfo{ - "path": testFileInfo{"path", 42, oldTs, nil}, - }, - nextFiles: map[string]os.FileInfo{ - "path": testFileInfo{"path", 42, oldTs, nil}, + } + + for i := 0; i < b.N; i++ { + files := s.GetFiles() + require.Len(b, files, benchmarkFileCount) + } +} + +func BenchmarkGetFilesWithFingerprint(b *testing.B) { + dir := b.TempDir() + basenameFormat := "file-%d.log" + + for i := 0; i < benchmarkFileCount; i++ { + filename := filepath.Join(dir, fmt.Sprintf(basenameFormat, i)) + content := fmt.Sprintf("content-%d\n", i) + err := os.WriteFile(filename, []byte(strings.Repeat(content, 1024)), 0777) + require.NoError(b, err) + } + + s := fileScanner{ + paths: []string{filepath.Join(dir, "*.log")}, + cfg: fileScannerConfig{ + Fingerprint: fingerprintConfig{ + Enabled: true, + Offset: 0, + Length: 1024, }, - expectedEvents: []loginp.FSEvent{}, }, } - for name, test := range testCases { - t.Run(name, func(t *testing.T) { - w := fileWatcher{ - log: logp.L(), - prev: test.prevFiles, - scanner: &mockScanner{test.nextFiles}, - events: make(chan loginp.FSEvent, len(test.expectedEvents)), - sameFileFunc: testSameFile, - } + for i := 0; i < b.N; i++ { + files := s.GetFiles() + require.Len(b, files, benchmarkFileCount) + } +} - w.watch(context.Background()) - close(w.events) +func createWatcherWithConfig(t *testing.T, paths []string, cfgStr string) loginp.FSWatcher { + cfg, err := conf.NewConfigWithYAML([]byte(cfgStr), cfgStr) + require.NoError(t, err) - actual := []loginp.FSEvent{} - for evt := range w.events { - actual = append(actual, evt) - } + ns := &conf.Namespace{} + err = ns.Unpack(cfg) + require.NoError(t, err) - if len(actual) != len(test.expectedEvents) { - t.Fatalf("expecting %d elements, got %d", len(test.expectedEvents), len(actual)) - } - for i := range test.expectedEvents { - if test.expectedEvents[i] != actual[i] { - t.Errorf("element [%d] differ. Expecting:\n%#v\nGot:\n%#v\n", i, test.expectedEvents[i], actual[i]) - } - } - }) + fw, err := newFileWatcher(paths, ns) + require.NoError(t, err) + + return fw +} + +func createScannerWithConfig(t *testing.T, paths []string, cfgStr string) loginp.FSScanner { + cfg, err := conf.NewConfigWithYAML([]byte(cfgStr), cfgStr) + require.NoError(t, err) + + ns := &conf.Namespace{} + err = ns.Unpack(cfg) + require.NoError(t, err) + + config := defaultFileWatcherConfig() + err = ns.Config().Unpack(&config) + require.NoError(t, err) + scanner, err := newFileScanner(paths, config.Scanner) + require.NoError(t, err) + + return scanner +} + +func requireEqualFiles(t *testing.T, expected, actual map[string]loginp.FileDescriptor) { + t.Helper() + require.Equalf(t, len(expected), len(actual), "amount of files does not match:\n\nexpected \n%v\n\n actual \n%v\n", filenames(expected), filenames(actual)) + + for expFilename, expFD := range expected { + actFD, exists := actual[expFilename] + require.Truef(t, exists, "the actual file list is missing expected filename %s", expFilename) + requireEqualDescriptors(t, expFD, actFD) } } -type mockScanner struct { - files map[string]os.FileInfo +func requireEqualEvents(t *testing.T, expected, actual loginp.FSEvent) { + t.Helper() + require.Equal(t, expected.NewPath, actual.NewPath, "NewPath") + require.Equal(t, expected.OldPath, actual.OldPath, "OldPath") + require.Equal(t, expected.Op, actual.Op, "Op") + requireEqualDescriptors(t, expected.Descriptor, actual.Descriptor) } -func (m *mockScanner) GetFiles() map[string]os.FileInfo { - return m.files +func requireEqualDescriptors(t *testing.T, expected, actual loginp.FileDescriptor) { + t.Helper() + require.Equal(t, expected.Filename, actual.Filename, "Filename") + require.Equal(t, expected.Fingerprint, actual.Fingerprint, "Fingerprint") + require.Equal(t, expected.Info.Name(), actual.Info.Name(), "Info.Name()") + require.Equal(t, expected.Info.Size(), actual.Info.Size(), "Info.Size()") } -type testFileInfo struct { - path string - size int64 - time time.Time - sys interface{} +func filenames(m map[string]loginp.FileDescriptor) (result string) { + for filename := range m { + result += filename + "\n" + } + return result } -func (t testFileInfo) Name() string { return t.path } -func (t testFileInfo) Size() int64 { return t.size } -func (t testFileInfo) Mode() os.FileMode { return 0 } -func (t testFileInfo) ModTime() time.Time { return t.time } -func (t testFileInfo) IsDir() bool { return false } -func (t testFileInfo) Sys() interface{} { return t.sys } +func BenchmarkToFileDescriptor(b *testing.B) { + dir := b.TempDir() + basename := "created.log" + filename := filepath.Join(dir, basename) + err := os.WriteFile(filename, []byte(strings.Repeat("a", 1024)), 0777) + require.NoError(b, err) -func testSameFile(fi1, fi2 os.FileInfo) bool { - return fi1.Name() == fi2.Name() + s := fileScanner{ + paths: []string{filename}, + cfg: fileScannerConfig{ + Fingerprint: fingerprintConfig{ + Enabled: true, + Offset: 0, + Length: 1024, + }, + }, + } + + it, err := s.getIngestTarget(filename) + require.NoError(b, err) + + for i := 0; i < b.N; i++ { + fd, err := s.toFileDescriptor(&it) + require.NoError(b, err) + require.Equal(b, "2edc986847e209b4016e141a6dc8716d3207350f416969382d431539bf292e4a", fd.Fingerprint) + } } diff --git a/filebeat/input/filestream/identifier.go b/filebeat/input/filestream/identifier.go index 467ec64f23a..0cfeb031d63 100644 --- a/filebeat/input/filestream/identifier.go +++ b/filebeat/input/filestream/identifier.go @@ -19,7 +19,6 @@ package filestream import ( "fmt" - "os" loginp "github.com/elastic/beats/v7/filebeat/input/filestream/internal/input-logfile" "github.com/elastic/beats/v7/libbeat/common/file" @@ -36,6 +35,7 @@ const ( nativeName = "native" pathName = "path" inodeMarkerName = "inode_marker" + fingerprintName = "fingerprint" DefaultIdentifierName = nativeName identitySep = "::" @@ -45,6 +45,7 @@ var identifierFactories = map[string]identifierFactory{ nativeName: newINodeDeviceIdentifier, pathName: newPathIdentifier, inodeMarkerName: newINodeMarkerIdentifier, + fingerprintName: newFingerprintIdentifier, } type identifierFactory func(*conf.C) (fileIdentifier, error) @@ -58,19 +59,19 @@ type fileIdentifier interface { // fileSource implements the Source interface // It is required to identify and manage file sources. type fileSource struct { - info os.FileInfo + desc loginp.FileDescriptor newPath string oldPath string truncated bool archived bool - name string + fileID string identifierGenerator string } // Name returns the registry identifier of the file. func (f fileSource) Name() string { - return f.name + return f.fileID } // newFileIdentifier creates a new state identifier for a log input. @@ -108,12 +109,12 @@ func newINodeDeviceIdentifier(_ *conf.C) (fileIdentifier, error) { func (i *inodeDeviceIdentifier) GetSource(e loginp.FSEvent) fileSource { return fileSource{ - info: e.Info, + desc: e.Descriptor, newPath: e.NewPath, oldPath: e.OldPath, truncated: e.Op == loginp.OpTruncate, archived: e.Op == loginp.OpArchived, - name: i.name + identitySep + file.GetOSState(e.Info).String(), + fileID: i.name + identitySep + file.GetOSState(e.Descriptor.Info).String(), identifierGenerator: i.name, } } @@ -147,12 +148,12 @@ func (p *pathIdentifier) GetSource(e loginp.FSEvent) fileSource { path = e.OldPath } return fileSource{ - info: e.Info, + desc: e.Descriptor, newPath: e.NewPath, oldPath: e.OldPath, truncated: e.Op == loginp.OpTruncate, archived: e.Op == loginp.OpArchived, - name: p.name + identitySep + path, + fileID: p.name + identitySep + path, identifierGenerator: p.name, } } @@ -179,7 +180,7 @@ func withSuffix(inner fileIdentifier, suffix string) fileIdentifier { func (s *suffixIdentifier) GetSource(e loginp.FSEvent) fileSource { fs := s.i.GetSource(e) - fs.name += "-" + s.suffix + fs.fileID += "-" + s.suffix return fs } diff --git a/filebeat/input/filestream/identifier_fingerprint.go b/filebeat/input/filestream/identifier_fingerprint.go new file mode 100644 index 00000000000..5fe524ebaf2 --- /dev/null +++ b/filebeat/input/filestream/identifier_fingerprint.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package filestream + +import ( + loginp "github.com/elastic/beats/v7/filebeat/input/filestream/internal/input-logfile" + conf "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" +) + +type fingerprintIdentifier struct { + log *logp.Logger +} + +func newFingerprintIdentifier(cfg *conf.C) (fileIdentifier, error) { + return &fingerprintIdentifier{ + log: logp.NewLogger("fingerprint_identifier"), + }, nil +} + +func (i *fingerprintIdentifier) GetSource(e loginp.FSEvent) fileSource { + return fileSource{ + desc: e.Descriptor, + newPath: e.NewPath, + oldPath: e.OldPath, + truncated: e.Op == loginp.OpTruncate, + archived: e.Op == loginp.OpArchived, + fileID: fingerprintName + identitySep + e.Descriptor.Fingerprint, + identifierGenerator: fingerprintName, + } +} + +func (i *fingerprintIdentifier) Name() string { + return fingerprintName +} + +func (i *fingerprintIdentifier) Supports(f identifierFeature) bool { + switch f { + case trackRename: + return true + default: + } + return false +} diff --git a/filebeat/input/filestream/identifier_inode_deviceid.go b/filebeat/input/filestream/identifier_inode_deviceid.go index d5ef0aa6c65..af6a5610086 100644 --- a/filebeat/input/filestream/identifier_inode_deviceid.go +++ b/filebeat/input/filestream/identifier_inode_deviceid.go @@ -93,14 +93,14 @@ func (i *inodeMarkerIdentifier) markerContents() string { } func (i *inodeMarkerIdentifier) GetSource(e loginp.FSEvent) fileSource { - osstate := file.GetOSState(e.Info) + osstate := file.GetOSState(e.Descriptor.Info) return fileSource{ - info: e.Info, + desc: e.Descriptor, newPath: e.NewPath, oldPath: e.OldPath, truncated: e.Op == loginp.OpTruncate, archived: e.Op == loginp.OpArchived, - name: i.name + identitySep + osstate.InodeString() + "-" + i.markerContents(), + fileID: i.name + identitySep + osstate.InodeString() + "-" + i.markerContents(), identifierGenerator: i.name, } } diff --git a/filebeat/input/filestream/identifier_test.go b/filebeat/input/filestream/identifier_test.go index 4007161116f..ca67ba375d6 100644 --- a/filebeat/input/filestream/identifier_test.go +++ b/filebeat/input/filestream/identifier_test.go @@ -52,8 +52,8 @@ func TestFileIdentifier(t *testing.T) { } src := identifier.GetSource(loginp.FSEvent{ - NewPath: tmpFile.Name(), - Info: fi, + NewPath: tmpFile.Name(), + Descriptor: loginp.FileDescriptor{Info: fi}, }) assert.Equal(t, identifier.Name()+"::"+file.GetOSState(fi).String(), src.Name()) @@ -76,8 +76,8 @@ func TestFileIdentifier(t *testing.T) { } src := identifier.GetSource(loginp.FSEvent{ - NewPath: tmpFile.Name(), - Info: fi, + NewPath: tmpFile.Name(), + Descriptor: loginp.FileDescriptor{Info: fi}, }) assert.Equal(t, identifier.Name()+"::"+file.GetOSState(fi).String()+"-my-suffix", src.Name()) @@ -129,4 +129,56 @@ func TestFileIdentifier(t *testing.T) { assert.Equal(t, test.expectedSrc, src.Name()) } }) + + t.Run("fingerprint identifier", func(t *testing.T) { + c := conf.MustNewConfigFrom(map[string]interface{}{ + "identifier": map[string]interface{}{ + "fingerprint": nil, + }, + }) + var cfg testFileIdentifierConfig + err := c.Unpack(&cfg) + require.NoError(t, err) + + identifier, err := newFileIdentifier(cfg.Identifier, "") + require.NoError(t, err) + assert.Equal(t, fingerprintName, identifier.Name()) + + testCases := []struct { + newPath string + oldPath string + operation loginp.Operation + desc loginp.FileDescriptor + expectedSrc string + }{ + { + newPath: "/path/to/file", + desc: loginp.FileDescriptor{Fingerprint: "fingerprintvalue"}, + expectedSrc: fingerprintName + "::fingerprintvalue", + }, + { + newPath: "/new/path/to/file", + oldPath: "/old/path/to/file", + operation: loginp.OpRename, + desc: loginp.FileDescriptor{Fingerprint: "fingerprintvalue"}, + expectedSrc: fingerprintName + "::fingerprintvalue", + }, + { + oldPath: "/old/path/to/file", + operation: loginp.OpDelete, + desc: loginp.FileDescriptor{Fingerprint: "fingerprintvalue"}, + expectedSrc: fingerprintName + "::fingerprintvalue", + }, + } + + for _, test := range testCases { + src := identifier.GetSource(loginp.FSEvent{ + NewPath: test.newPath, + OldPath: test.oldPath, + Op: test.operation, + Descriptor: test.desc, + }) + assert.Equal(t, test.expectedSrc, src.Name()) + } + }) } diff --git a/filebeat/input/filestream/input.go b/filebeat/input/filestream/input.go index 30fcb916ef5..b935161c126 100644 --- a/filebeat/input/filestream/input.go +++ b/filebeat/input/filestream/input.go @@ -228,7 +228,7 @@ func (inp *filestream) open(log *logp.Logger, canceler input.Canceler, fs fileSo r = readfile.NewStripNewline(r, inp.readerConfig.LineTerminator) - r = readfile.NewFilemeta(r, fs.newPath, offset) + r = readfile.NewFilemeta(r, fs.newPath, fs.desc.Info, fs.desc.Fingerprint, offset) r = inp.parsers.Create(r) diff --git a/filebeat/input/filestream/internal/input-logfile/clean.go b/filebeat/input/filestream/internal/input-logfile/clean.go index 14035f8de4d..a96f5529895 100644 --- a/filebeat/input/filestream/internal/input-logfile/clean.go +++ b/filebeat/input/filestream/internal/input-logfile/clean.go @@ -35,14 +35,15 @@ type cleaner struct { // run starts a loop that tries to clean entries from the registry. // The cleaner locks the store, such that no new states can be created -// during the cleanup phase. Only resources that are finished and whos TTL +// during the cleanup phase. Only resources that are finished and whose TTL // (clean_timeout setting) has expired will be removed. // // Resources are considered "Finished" if they do not have a current owner (active input), and // if they have no pending updates that still need to be written to the registry file after associated // events have been ACKed by the outputs. +// // The event acquisition timestamp is used as reference to clean resources. If a resources was blocked -// for a long time, and the life time has been exhausted, then the resource will be removed immediately +// for a long time, and the lifetime has been exhausted, then the resource will be removed immediately // once the last event has been ACKed. func (c *cleaner) run(canceler unison.Canceler, store *store, interval time.Duration) { started := time.Now() @@ -51,7 +52,7 @@ func (c *cleaner) run(canceler unison.Canceler, store *store, interval time.Dura return nil }) if err != nil && !errors.Is(err, context.Canceled) { - c.log.Errorf("failed to start the registry cleaning routine: %s", err) + c.log.Errorw("failed running periodic registry cleaning routine", "error", err) } } diff --git a/filebeat/input/filestream/internal/input-logfile/fswatch.go b/filebeat/input/filestream/internal/input-logfile/fswatch.go index 4f8fffe6741..dc00519c437 100644 --- a/filebeat/input/filestream/internal/input-logfile/fswatch.go +++ b/filebeat/input/filestream/internal/input-logfile/fswatch.go @@ -21,6 +21,8 @@ import ( "os" "github.com/elastic/go-concert/unison" + + file_helper "github.com/elastic/beats/v7/libbeat/common/file" ) const ( @@ -54,6 +56,33 @@ func (o *Operation) String() string { return name } +// FileDescriptor represents full information about a file. +type FileDescriptor struct { + // Filename is an original filename this descriptor was created from. + // In case it was a symlink, this will be the filename of the symlink unlike + // the filename from the `Info`. + Filename string + // Info is the result of file stat + Info os.FileInfo + // Fingerprint is a computed hash of the file header + Fingerprint string +} + +// FileID returns a unique file ID +// If fingerprint is computed it's used as the ID. +// Otherwise, a combination of the device ID and inode is used. +func (fd FileDescriptor) FileID() string { + if fd.Fingerprint != "" { + return fd.Fingerprint + } + return file_helper.GetOSState(fd.Info).String() +} + +// SameFile returns true if descriptors point to the same file. +func SameFile(a, b *FileDescriptor) bool { + return a.FileID() == b.FileID() +} + // FSEvent returns inforamation about file system changes. type FSEvent struct { // NewPath is the new path of the file. @@ -63,16 +92,16 @@ type FSEvent struct { OldPath string // Op is the file system event: create, write, rename, remove Op Operation - // Info describes the file in the event. - Info os.FileInfo + // Descriptor describes the file in the event. + Descriptor FileDescriptor } // FSScanner retrieves a list of files from the file system. type FSScanner interface { // GetFiles returns the list of monitored files. // The keys of the map are the paths to the files and - // the values are the FileInfos describing the file. - GetFiles() map[string]os.FileInfo + // the values are the file descriptors that contain all necessary information about the file. + GetFiles() map[string]FileDescriptor } // FSWatcher returns file events of the monitored files. diff --git a/filebeat/input/filestream/internal/input-logfile/input.go b/filebeat/input/filestream/internal/input-logfile/input.go index afb68f9ba8b..db3e713fbdd 100644 --- a/filebeat/input/filestream/internal/input-logfile/input.go +++ b/filebeat/input/filestream/internal/input-logfile/input.go @@ -30,6 +30,7 @@ import ( type managedInput struct { userID string + metricsID string manager *InputManager ackCH *updateChan sourceIdentifier *sourceIdentifier @@ -61,7 +62,7 @@ func (inp *managedInput) Run( defer cancel() ctx.Cancelation = cancelCtx - metrics := NewMetrics(ctx.ID) + metrics := NewMetrics(inp.metricsID) defer metrics.Close() hg := &defaultHarvesterGroup{ diff --git a/filebeat/input/filestream/internal/input-logfile/manager.go b/filebeat/input/filestream/internal/input-logfile/manager.go index 0e1895f8b83..c95efd1c5a1 100644 --- a/filebeat/input/filestream/internal/input-logfile/manager.go +++ b/filebeat/input/filestream/internal/input-logfile/manager.go @@ -177,10 +177,13 @@ func (cim *InputManager) Create(config *conf.C) (v2.Input, error) { " duplication, please add an ID and restart Filebeat") } + metricsID := settings.ID cim.idsMux.Lock() if _, exists := cim.ids[settings.ID]; exists { cim.Logger.Errorf("filestream input with ID '%s' already exists, this "+ - "will lead to data duplication, please use a different ID", settings.ID) + "will lead to data duplication, please use a different ID. Metrics "+ + "collection has been disabled on this input.", settings.ID) + metricsID = "" } // TODO: improve how inputs with empty IDs are tracked. @@ -223,6 +226,7 @@ func (cim *InputManager) Create(config *conf.C) (v2.Input, error) { manager: cim, ackCH: cim.ackCH, userID: settings.ID, + metricsID: metricsID, prospector: prospector, harvester: harvester, sourceIdentifier: sourceIdentifier, diff --git a/filebeat/input/filestream/legacy_metrics_integration_test.go b/filebeat/input/filestream/legacy_metrics_integration_test.go new file mode 100644 index 00000000000..649ede41f3e --- /dev/null +++ b/filebeat/input/filestream/legacy_metrics_integration_test.go @@ -0,0 +1,250 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration + +package filestream + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/v7/libbeat/tests/integration" +) + +var fconfig = ` +filebeat.inputs: + - type: filestream + id: my-filestream-id + enabled: true + close.reader.after_interval: 1s + prospector.scanner.check_interval: 500ms + paths: + - %s/*.filestream + - type: log + id: my-log-input + enabled: true + close_timeout: 1s + scan_frequency: 500ms + paths: + - %s/*.log + +output.console: + codec.json: + pretty: true + +logging: + level: debug + selectors: "*" + +http: + enabled: true +` + +func TestLegacyMetrics(t *testing.T) { + filebeat := integration.NewBeat(t, "filebeat", "../../filebeat.test") + + cfg := fmt.Sprintf(fconfig, filebeat.TempDir(), filebeat.TempDir()) + + filebeat.WriteConfigFile(cfg) + filebeat.Start() + + filebeat.WaitForLogs("Metrics endpoint listening on:", 10*time.Second) + + // After starting Filebeat all counters must be zero + waitForMetrics(t, + LegacyHarvesterMetrics{ + OpenFiles: 0, + Closed: 0, + Running: 0, + Started: 0, + }) + + filestreamLogFile := filepath.Join(filebeat.TempDir(), "01.filestream") + filestreamLog, err := os.Create(filestreamLogFile) + if err != nil { + t.Fatalf("could not create log file '%s': %s", filestreamLogFile, err) + } + + // Write a line in the file harvested by Filestream + fmt.Fprintln(filestreamLog, "first line") + + waitForMetrics(t, + LegacyHarvesterMetrics{ + OpenFiles: 1, + Running: 1, + Started: 1, + Closed: 0, + }, + "Filestream input did not start the harvester") + + // Wait for the harvester to close the file + waitForMetrics(t, + LegacyHarvesterMetrics{ + OpenFiles: 0, + Running: 0, + Started: 1, + Closed: 1, + }, + "Filestream input did not close the harvester") + + // Write a line in the file harvested by the log input + logInputLogFileName := filepath.Join(filebeat.TempDir(), "01.log") + logInputLog, err := os.Create(logInputLogFileName) + if err != nil { + t.Fatalf("could not create log file '%s': %s", logInputLogFileName, err) + } + + fmt.Fprintln(logInputLog, "first line") + + waitForMetrics(t, + LegacyHarvesterMetrics{ + OpenFiles: 1, + Running: 1, + Started: 2, + Closed: 1, + }, + "Log input did not start harvester") + + // Wait for the log input to close the file + waitForMetrics(t, + LegacyHarvesterMetrics{ + OpenFiles: 0, + Running: 0, + Started: 2, + Closed: 2, + }, + "Log input did not close the harvester") + + // Writes one more line to each log file + fmt.Fprintln(logInputLog, "second line") + fmt.Fprintln(filestreamLog, "second line") + + // Both harvesters should be running + waitForMetrics(t, + LegacyHarvesterMetrics{ + OpenFiles: 2, + Running: 2, + Started: 4, + Closed: 2, + }, + "Two harvesters should be running") + + // Wait for both harvesters to close the file + waitForMetrics(t, + LegacyHarvesterMetrics{ + OpenFiles: 0, + Running: 0, + Started: 4, + Closed: 4, + }, + "All harvesters must be closed") +} + +func waitForMetrics(t *testing.T, expect LegacyHarvesterMetrics, msgAndArgs ...any) { + t.Helper() + got := LegacyHarvesterMetrics{} + assert.Eventually(t, func() bool { + got = getHarvesterMetrics(t) + return expect == got + }, 10*time.Second, 100*time.Millisecond, msgAndArgs...) + + if !t.Failed() { + return + } + + if expect.Closed != got.Closed { + t.Logf("expecting 'closed' to be %d, got %d instead", expect.Closed, got.Closed) + } + + if expect.OpenFiles != got.OpenFiles { + t.Logf("expecting 'open_files' to be %d, got %d instead", expect.OpenFiles, got.OpenFiles) + } + + if expect.Running != got.Running { + t.Logf("expecting 'running' to be %d, got %d instead", expect.Running, got.Running) + } + + if expect.Started != got.Started { + t.Logf("expecting 'started' to be %d, got %d instead", expect.Started, got.Started) + } +} + +func compareMetrics(t *testing.T, expect, got LegacyHarvesterMetrics) { + t.Helper() + + if expect.Closed != got.Closed { + t.Errorf("expecting 'closed' to be %d, got %d instead", expect.Closed, got.Closed) + } + + if expect.OpenFiles != got.OpenFiles { + t.Errorf("expecting 'open_files' to be %d, got %d instead", expect.OpenFiles, got.OpenFiles) + } + + if expect.Running != got.Running { + t.Errorf("expecting 'running' to be %d, got %d instead", expect.Running, got.Running) + } + + if expect.Started != got.Started { + t.Errorf("expecting 'started' to be %d, got %d instead", expect.Started, got.Started) + } +} + +type LegacyHarvesterMetrics struct { + Closed int `json:"closed"` + OpenFiles int `json:"open_files"` + Running int `json:"running"` + Started int `json:"started"` +} + +func getHarvesterMetrics(t *testing.T) LegacyHarvesterMetrics { + // The host is ignored because we're connecting via Unix sockets. + resp, err := http.Get("http://localhost:5066/stats") + if err != nil { + t.Fatalf("could not execute HTTP call: %s", err) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("could not read request body: %s", err) + } + + type foo struct { + F struct { + H LegacyHarvesterMetrics `json:"harvester"` + } `json:"filebeat"` + } + + m := struct { + F struct { + H LegacyHarvesterMetrics `json:"harvester"` + } `json:"filebeat"` + }{} + if err := json.Unmarshal(body, &m); err != nil { + t.Fatalf("could not unmarshal request body: %s", err) + } + + return m.F.H +} diff --git a/filebeat/input/filestream/logger.go b/filebeat/input/filestream/logger.go index 7963f11308d..7b644fd0d87 100644 --- a/filebeat/input/filestream/logger.go +++ b/filebeat/input/filestream/logger.go @@ -28,8 +28,11 @@ func loggerWithEvent(logger *logp.Logger, event loginp.FSEvent, src loginp.Sourc "operation", event.Op.String(), "source_name", src.Name(), ) - if event.Info != nil && event.Info.Sys() != nil { - log = log.With("os_id", file.GetOSState(event.Info)) + if event.Descriptor.Fingerprint != "" { + log = log.With("fingerprint", event.Descriptor.Fingerprint) + } + if event.Descriptor.Info != nil && event.Descriptor.Info.Sys() != nil { + log = log.With("os_id", file.GetOSState(event.Descriptor.Info)) } if event.NewPath != "" { log = log.With("new_path", event.NewPath) diff --git a/filebeat/input/filestream/prospector.go b/filebeat/input/filestream/prospector.go index 1e4a9c91c7f..336461fede5 100644 --- a/filebeat/input/filestream/prospector.go +++ b/filebeat/input/filestream/prospector.go @@ -78,12 +78,12 @@ func (p *fileProspector) Init( return "", nil } - fi, ok := files[fm.Source] + fd, ok := files[fm.Source] if !ok { return "", fm } - newKey := newID(p.identifier.GetSource(loginp.FSEvent{NewPath: fm.Source, Info: fi})) + newKey := newID(p.identifier.GetSource(loginp.FSEvent{NewPath: fm.Source, Descriptor: fd})) return newKey, fm }) @@ -109,13 +109,13 @@ func (p *fileProspector) Init( return "", nil } - fi, ok := files[fm.Source] + fd, ok := files[fm.Source] if !ok { return "", fm } if fm.IdentifierName != identifierName { - newKey := p.identifier.GetSource(loginp.FSEvent{NewPath: fm.Source, Info: fi}).Name() + newKey := p.identifier.GetSource(loginp.FSEvent{NewPath: fm.Source, Descriptor: fd}).Name() fm.IdentifierName = identifierName return newKey, fm } @@ -188,7 +188,7 @@ func (p *fileProspector) onFSEvent( } if p.isFileIgnored(log, event, ignoreSince) { - err := updater.ResetCursor(src, state{Offset: event.Info.Size()}) + err := updater.ResetCursor(src, state{Offset: event.Descriptor.Info.Size()}) if err != nil { log.Errorf("setting cursor for ignored file: %v", err) } @@ -224,12 +224,12 @@ func (p *fileProspector) onFSEvent( func (p *fileProspector) isFileIgnored(log *logp.Logger, fe loginp.FSEvent, ignoreInactiveSince time.Time) bool { if p.ignoreOlder > 0 { now := time.Now() - if now.Sub(fe.Info.ModTime()) > p.ignoreOlder { + if now.Sub(fe.Descriptor.Info.ModTime()) > p.ignoreOlder { log.Debugf("Ignore file because ignore_older reached. File %s", fe.NewPath) return true } } - if !ignoreInactiveSince.IsZero() && fe.Info.ModTime().Sub(ignoreInactiveSince) <= 0 { + if !ignoreInactiveSince.IsZero() && fe.Descriptor.Info.ModTime().Sub(ignoreInactiveSince) <= 0 { log.Debugf("Ignore file because ignore_since.* reached time %v. File %s", p.ignoreInactiveSince, fe.NewPath) return true } diff --git a/filebeat/input/filestream/prospector_creator.go b/filebeat/input/filestream/prospector_creator.go index 75a5e8dc3aa..5142704a614 100644 --- a/filebeat/input/filestream/prospector_creator.go +++ b/filebeat/input/filestream/prospector_creator.go @@ -24,6 +24,8 @@ import ( loginp "github.com/elastic/beats/v7/filebeat/input/filestream/internal/input-logfile" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" + conf "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" ) const ( @@ -36,16 +38,25 @@ const ( var experimentalWarning sync.Once func newProspector(config config) (loginp.Prospector, error) { + err := checkConfigCompatibility(config.FileWatcher, config.FileIdentity) + if err != nil { + return nil, err + } + filewatcher, err := newFileWatcher(config.Paths, config.FileWatcher) if err != nil { return nil, fmt.Errorf("error while creating filewatcher %w", err) } - identifier, err := newFileIdentifier(config.FileIdentity, getIdentifierSuffix(config)) + identifier, err := newFileIdentifier(config.FileIdentity, config.Reader.Parsers.Suffix) if err != nil { return nil, fmt.Errorf("error while creating file identifier: %w", err) } + logp.L(). + With("filestream_id", config.ID). + Debugf("file identity is set to %s", identifier.Name()) + fileprospector := fileProspector{ filewatcher: filewatcher, identifier: identifier, @@ -104,6 +115,22 @@ func newProspector(config config) (loginp.Prospector, error) { return nil, fmt.Errorf("no such rotation method: %s", rotationMethod) } -func getIdentifierSuffix(config config) string { - return config.Reader.Parsers.Suffix +func checkConfigCompatibility(fileWatcher, fileIdentifier *conf.Namespace) error { + var fwCfg struct { + Fingerprint struct { + Enabled bool `config:"enabled"` + } `config:"fingerprint"` + } + + if fileWatcher != nil && fileIdentifier != nil && fileIdentifier.Name() == fingerprintName { + err := fileWatcher.Config().Unpack(&fwCfg) + if err != nil { + return fmt.Errorf("failed to parse file watcher configuration: %w", err) + } + if !fwCfg.Fingerprint.Enabled { + return fmt.Errorf("fingerprint file identity can be used only when fingerprint is enabled in the scanner") + } + } + + return nil } diff --git a/filebeat/input/filestream/prospector_creator_test.go b/filebeat/input/filestream/prospector_creator_test.go index bb87cc7118d..c49488ffd9c 100644 --- a/filebeat/input/filestream/prospector_creator_test.go +++ b/filebeat/input/filestream/prospector_creator_test.go @@ -21,31 +21,95 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + conf "github.com/elastic/elastic-agent-libs/config" ) -func TestCreateProspector_SetIgnoreInactiveSince(t *testing.T) { - testCases := map[string]struct { - ignore_inactive_since string - }{ - "ignore_inactive_since set to since_last_start": { - ignore_inactive_since: "since_last_start", - }, - "ignore_inactive_since set to since_first_start": { - ignore_inactive_since: "since_first_start", - }, - "ignore_inactive_since not set": { - ignore_inactive_since: "", - }, - } - for name, test := range testCases { - test := test - t.Run(name, func(t *testing.T) { - c := config{ - IgnoreInactive: ignoreInactiveSettings[test.ignore_inactive_since], - } - p, _ := newProspector(c) - fileProspector := p.(*fileProspector) - assert.Equal(t, fileProspector.ignoreInactiveSince, ignoreInactiveSettings[test.ignore_inactive_since]) - }) - } +func TestCreateProspector(t *testing.T) { + t.Run("SetIgnoreInactiveSince", func(t *testing.T) { + testCases := map[string]struct { + ignore_inactive_since string + }{ + "ignore_inactive_since set to since_last_start": { + ignore_inactive_since: "since_last_start", + }, + "ignore_inactive_since set to since_first_start": { + ignore_inactive_since: "since_first_start", + }, + "ignore_inactive_since not set": { + ignore_inactive_since: "", + }, + } + for name, test := range testCases { + test := test + t.Run(name, func(t *testing.T) { + c := config{ + IgnoreInactive: ignoreInactiveSettings[test.ignore_inactive_since], + } + p, _ := newProspector(c) + fileProspector := p.(*fileProspector) + assert.Equal(t, fileProspector.ignoreInactiveSince, ignoreInactiveSettings[test.ignore_inactive_since]) + }) + } + }) + t.Run("file watcher and file identity compatibility", func(t *testing.T) { + cases := []struct { + name string + cfgStr string + err string + }{ + { + name: "returns no error for a fully default config", + cfgStr: ` +paths: ['some'] +`, + }, + { + name: "returns no error when fingerprint and identity is configured", + cfgStr: ` +paths: ['some'] +file_identity.fingerprint: ~ +prospector.scanner.fingerprint.enabled: true +`, + }, + { + name: "returns no error when fingerprint and other identity is configured", + cfgStr: ` +paths: ['some'] +file_identity.path: ~ +prospector.scanner.fingerprint.enabled: true +`, + }, + { + name: "returns error when fingerprint is disabled but fingerprint identity is configured", + cfgStr: ` +paths: ['some'] +file_identity.fingerprint: ~ +prospector.scanner.fingerprint.enabled: false +`, + err: "fingerprint file identity can be used only when fingerprint is enabled in the scanner", + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + c, err := conf.NewConfigWithYAML([]byte(tc.cfgStr), tc.cfgStr) + require.NoError(t, err) + + cfg := defaultConfig() + err = c.Unpack(&cfg) + require.NoError(t, err) + + _, err = newProspector(cfg) + if tc.err == "" { + require.NoError(t, err) + return + } + + require.Error(t, err) + require.Contains(t, err.Error(), tc.err) + }) + } + }) } diff --git a/filebeat/input/filestream/prospector_test.go b/filebeat/input/filestream/prospector_test.go index c3860856e58..834784c81da 100644 --- a/filebeat/input/filestream/prospector_test.go +++ b/filebeat/input/filestream/prospector_test.go @@ -21,6 +21,7 @@ package filestream import ( "context" "fmt" + "io/fs" "io/ioutil" "os" "sync" @@ -39,7 +40,7 @@ import ( func TestProspector_InitCleanIfRemoved(t *testing.T) { testCases := map[string]struct { entries map[string]loginp.Value - filesOnDisk map[string]os.FileInfo + filesOnDisk map[string]loginp.FileDescriptor cleanRemoved bool expectedCleanedKeys []string }{ @@ -108,7 +109,7 @@ func TestProspector_InitUpdateIdentifiers(t *testing.T) { testCases := map[string]struct { entries map[string]loginp.Value - filesOnDisk map[string]os.FileInfo + filesOnDisk map[string]loginp.FileDescriptor expectedUpdatedKeys map[string]string }{ "prospector init does not update keys if there are no entries": { @@ -137,8 +138,8 @@ func TestProspector_InitUpdateIdentifiers(t *testing.T) { }, }, }, - filesOnDisk: map[string]os.FileInfo{ - tmpFileName: fi, + filesOnDisk: map[string]loginp.FileDescriptor{ + tmpFileName: {Info: fi}, }, expectedUpdatedKeys: map[string]string{"not_path::key1": "path::" + tmpFileName}, }, @@ -170,8 +171,8 @@ func TestProspectorNewAndUpdatedFiles(t *testing.T) { }{ "two new files": { events: []loginp.FSEvent{ - {Op: loginp.OpCreate, NewPath: "/path/to/file", Info: testFileInfo{}}, - {Op: loginp.OpCreate, NewPath: "/path/to/other/file", Info: testFileInfo{}}, + {Op: loginp.OpCreate, NewPath: "/path/to/file", Descriptor: createTestFileDescriptor()}, + {Op: loginp.OpCreate, NewPath: "/path/to/other/file", Descriptor: createTestFileDescriptor()}, }, expectedEvents: []harvesterEvent{ harvesterStart("path::/path/to/file"), @@ -181,7 +182,7 @@ func TestProspectorNewAndUpdatedFiles(t *testing.T) { }, "one updated file": { events: []loginp.FSEvent{ - {Op: loginp.OpWrite, NewPath: "/path/to/file", Info: testFileInfo{}}, + {Op: loginp.OpWrite, NewPath: "/path/to/file", Descriptor: createTestFileDescriptor()}, }, expectedEvents: []harvesterEvent{ harvesterStart("path::/path/to/file"), @@ -190,8 +191,8 @@ func TestProspectorNewAndUpdatedFiles(t *testing.T) { }, "one updated then truncated file": { events: []loginp.FSEvent{ - {Op: loginp.OpWrite, NewPath: "/path/to/file", Info: testFileInfo{}}, - {Op: loginp.OpTruncate, NewPath: "/path/to/file", Info: testFileInfo{}}, + {Op: loginp.OpWrite, NewPath: "/path/to/file", Descriptor: createTestFileDescriptor()}, + {Op: loginp.OpTruncate, NewPath: "/path/to/file", Descriptor: createTestFileDescriptor()}, }, expectedEvents: []harvesterEvent{ harvesterStart("path::/path/to/file"), @@ -202,14 +203,14 @@ func TestProspectorNewAndUpdatedFiles(t *testing.T) { "old files with ignore older configured": { events: []loginp.FSEvent{ { - Op: loginp.OpCreate, - NewPath: "/path/to/file", - Info: testFileInfo{"/path/to/file", 5, minuteAgo, nil}, + Op: loginp.OpCreate, + NewPath: "/path/to/file", + Descriptor: createTestFileDescriptorWithInfo(testFileInfo{"/path/to/file", 5, minuteAgo, nil}), }, { - Op: loginp.OpWrite, - NewPath: "/path/to/other/file", - Info: testFileInfo{"/path/to/other/file", 5, minuteAgo, nil}, + Op: loginp.OpWrite, + NewPath: "/path/to/other/file", + Descriptor: createTestFileDescriptorWithInfo(testFileInfo{"/path/to/other/file", 5, minuteAgo, nil}), }, }, ignoreOlder: 10 * time.Second, @@ -220,14 +221,14 @@ func TestProspectorNewAndUpdatedFiles(t *testing.T) { "newer files with ignore older": { events: []loginp.FSEvent{ { - Op: loginp.OpCreate, - NewPath: "/path/to/file", - Info: testFileInfo{"/path/to/file", 5, minuteAgo, nil}, + Op: loginp.OpCreate, + NewPath: "/path/to/file", + Descriptor: createTestFileDescriptorWithInfo(testFileInfo{"/path/to/file", 5, minuteAgo, nil}), }, { - Op: loginp.OpWrite, - NewPath: "/path/to/other/file", - Info: testFileInfo{"/path/to/other/file", 5, minuteAgo, nil}, + Op: loginp.OpWrite, + NewPath: "/path/to/other/file", + Descriptor: createTestFileDescriptorWithInfo(testFileInfo{"/path/to/other/file", 5, minuteAgo, nil}), }, }, ignoreOlder: 5 * time.Minute, @@ -265,14 +266,14 @@ func TestProspectorHarvesterUpdateIgnoredFiles(t *testing.T) { minuteAgo := time.Now().Add(-1 * time.Minute) eventCreate := loginp.FSEvent{ - Op: loginp.OpCreate, - NewPath: "/path/to/file", - Info: testFileInfo{"/path/to/file", 5, minuteAgo, nil}, + Op: loginp.OpCreate, + NewPath: "/path/to/file", + Descriptor: createTestFileDescriptorWithInfo(testFileInfo{"/path/to/file", 5, minuteAgo, nil}), } eventUpdated := loginp.FSEvent{ - Op: loginp.OpWrite, - NewPath: "/path/to/file", - Info: testFileInfo{"/path/to/file", 10, time.Now(), nil}, + Op: loginp.OpWrite, + NewPath: "/path/to/file", + Descriptor: createTestFileDescriptorWithInfo(testFileInfo{"/path/to/file", 10, time.Now(), nil}), } expectedEvents := []harvesterEvent{ harvesterStart("path::/path/to/file"), @@ -328,13 +329,13 @@ func TestProspectorDeletedFile(t *testing.T) { }{ "one deleted file without clean removed": { events: []loginp.FSEvent{ - {Op: loginp.OpDelete, OldPath: "/path/to/file", Info: testFileInfo{}}, + {Op: loginp.OpDelete, OldPath: "/path/to/file", Descriptor: createTestFileDescriptor()}, }, cleanRemoved: false, }, "one deleted file with clean removed": { events: []loginp.FSEvent{ - {Op: loginp.OpDelete, OldPath: "/path/to/file", Info: testFileInfo{}}, + {Op: loginp.OpDelete, OldPath: "/path/to/file", Descriptor: createTestFileDescriptor()}, }, cleanRemoved: true, }, @@ -377,10 +378,10 @@ func TestProspectorRenamedFile(t *testing.T) { "one renamed file without rename tracker": { events: []loginp.FSEvent{ { - Op: loginp.OpRename, - OldPath: "/old/path/to/file", - NewPath: "/new/path/to/file", - Info: testFileInfo{}, + Op: loginp.OpRename, + OldPath: "/old/path/to/file", + NewPath: "/new/path/to/file", + Descriptor: createTestFileDescriptor(), }, }, expectedEvents: []harvesterEvent{ @@ -392,10 +393,10 @@ func TestProspectorRenamedFile(t *testing.T) { "one renamed file with rename tracker": { events: []loginp.FSEvent{ { - Op: loginp.OpRename, - OldPath: "/old/path/to/file", - NewPath: "/new/path/to/file", - Info: testFileInfo{}, + Op: loginp.OpRename, + OldPath: "/old/path/to/file", + NewPath: "/new/path/to/file", + Descriptor: createTestFileDescriptor(), }, }, trackRename: true, @@ -406,10 +407,10 @@ func TestProspectorRenamedFile(t *testing.T) { "one renamed file with rename tracker with close renamed": { events: []loginp.FSEvent{ { - Op: loginp.OpRename, - OldPath: "/old/path/to/file", - NewPath: "/new/path/to/file", - Info: testFileInfo{}, + Op: loginp.OpRename, + OldPath: "/old/path/to/file", + NewPath: "/new/path/to/file", + Descriptor: createTestFileDescriptor(), }, }, trackRename: true, @@ -503,7 +504,7 @@ func (t *testHarvesterGroup) StopHarvesters() error { type mockFileWatcher struct { events []loginp.FSEvent - filesOnDisk map[string]os.FileInfo + filesOnDisk map[string]loginp.FileDescriptor outputCount, eventCount int @@ -523,7 +524,7 @@ func newMockFileWatcher(events []loginp.FSEvent, eventCount int) *mockFileWatche // newMockFileWatcherWithFiles creates an FSWatch mock to // get the required file information from the file system using // the GetFiles function. -func newMockFileWatcherWithFiles(filesOnDisk map[string]os.FileInfo) *mockFileWatcher { +func newMockFileWatcherWithFiles(filesOnDisk map[string]loginp.FileDescriptor) *mockFileWatcher { return &mockFileWatcher{ filesOnDisk: filesOnDisk, out: make(chan loginp.FSEvent), @@ -542,7 +543,7 @@ func (m *mockFileWatcher) Event() loginp.FSEvent { func (m *mockFileWatcher) Run(_ unison.Canceler) {} -func (m *mockFileWatcher) GetFiles() map[string]os.FileInfo { return m.filesOnDisk } +func (m *mockFileWatcher) GetFiles() map[string]loginp.FileDescriptor { return m.filesOnDisk } type mockMetadataUpdater struct { table map[string]interface{} @@ -668,10 +669,10 @@ func TestOnRenameFileIdentity(t *testing.T) { populateStore: true, events: []loginp.FSEvent{ { - Op: loginp.OpRename, - OldPath: "/old/path/to/file", - NewPath: "/new/path/to/file", - Info: testFileInfo{}, + Op: loginp.OpRename, + OldPath: "/old/path/to/file", + NewPath: "/new/path/to/file", + Descriptor: createTestFileDescriptor(), }, }, }, @@ -681,10 +682,10 @@ func TestOnRenameFileIdentity(t *testing.T) { populateStore: false, events: []loginp.FSEvent{ { - Op: loginp.OpRename, - OldPath: "/old/path/to/file", - NewPath: "/new/path/to/file", - Info: testFileInfo{}, + Op: loginp.OpRename, + OldPath: "/old/path/to/file", + NewPath: "/new/path/to/file", + Descriptor: createTestFileDescriptor(), }, }, }, @@ -721,3 +722,29 @@ func TestOnRenameFileIdentity(t *testing.T) { }) } } + +type testFileInfo struct { + name string + size int64 + time time.Time + sys interface{} +} + +func (t testFileInfo) Name() string { return t.name } +func (t testFileInfo) Size() int64 { return t.size } +func (t testFileInfo) Mode() os.FileMode { return 0 } +func (t testFileInfo) ModTime() time.Time { return t.time } +func (t testFileInfo) IsDir() bool { return false } +func (t testFileInfo) Sys() interface{} { return t.sys } + +func createTestFileDescriptor() loginp.FileDescriptor { + return createTestFileDescriptorWithInfo(testFileInfo{}) +} + +func createTestFileDescriptorWithInfo(fi fs.FileInfo) loginp.FileDescriptor { + return loginp.FileDescriptor{ + Info: fi, + Fingerprint: "fingerprint", + Filename: "filename", + } +} diff --git a/filebeat/input/journald/config.go b/filebeat/input/journald/config.go index 60f5881341b..5f18cbce211 100644 --- a/filebeat/input/journald/config.go +++ b/filebeat/input/journald/config.go @@ -48,6 +48,10 @@ type config struct { // MaxBackoff is the limit of the backoff time. MaxBackoff time.Duration `config:"max_backoff" validate:"min=0,nonzero"` + // Since is the relative time offset from now to provide journal + // entries from. If Since is nil, no offset is applied. + Since *time.Duration `config:"since"` + // Seek is the method to read from journals. Seek journalread.SeekMode `config:"seek"` @@ -100,7 +104,11 @@ func (im *bwcIncludeMatches) Unpack(c *ucfg.Config) error { return c.Unpack((*journalfield.IncludeMatches)(im)) } -var errInvalidSeekFallback = errors.New("invalid setting for cursor_seek_fallback") +var ( + errInvalidSeekFallback = errors.New("invalid setting for cursor_seek_fallback") + errInvalidSeek = errors.New("invalid setting for seek") + errInvalidSeekSince = errors.New("incompatible setting for since and seek or cursor_seek_fallback") +) func defaultConfig() config { return config{ @@ -113,8 +121,26 @@ func defaultConfig() config { } func (c *config) Validate() error { - if c.CursorSeekFallback != journalread.SeekHead && c.CursorSeekFallback != journalread.SeekTail { + if c.Seek == journalread.SeekInvalid { + return errInvalidSeek + } + switch c.CursorSeekFallback { + case journalread.SeekHead, journalread.SeekTail, journalread.SeekSince: + default: return errInvalidSeekFallback } + if c.Since == nil { + switch { + case c.Seek == journalread.SeekSince, + c.Seek == journalread.SeekCursor && c.CursorSeekFallback == journalread.SeekSince: + return errInvalidSeekSince + default: + return nil + } + } + needSince := c.Seek == journalread.SeekSince || (c.Seek == journalread.SeekCursor && c.CursorSeekFallback == journalread.SeekSince) + if !needSince { + return errInvalidSeekSince + } return nil } diff --git a/filebeat/input/journald/config_test.go b/filebeat/input/journald/config_test.go index e5205d8161d..cf6abe848b5 100644 --- a/filebeat/input/journald/config_test.go +++ b/filebeat/input/journald/config_test.go @@ -20,12 +20,17 @@ package journald import ( + "errors" + "fmt" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + jr "github.com/elastic/beats/v7/filebeat/input/journald/pkg/journalread" conf "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" ) func TestConfigIncludeMatches(t *testing.T) { @@ -62,3 +67,96 @@ include_matches: verify(t, yaml) }) } + +func TestConfigValidate(t *testing.T) { + t.Run("table", func(t *testing.T) { + + nameOf := [...]string{ + jr.SeekInvalid: "invalid", + jr.SeekHead: "head", + jr.SeekTail: "tail", + jr.SeekCursor: "cursor", + jr.SeekSince: "since", + } + + modes := []jr.SeekMode{ + jr.SeekInvalid, + jr.SeekHead, + jr.SeekTail, + jr.SeekCursor, + jr.SeekSince, + } + const n = jr.SeekSince + 1 + + errSeek := errInvalidSeek + errFall := errInvalidSeekFallback + errSince := errInvalidSeekSince + // Want is the tables of expectations: seek in major, fallback in minor. + want := map[bool][n][n]error{ + false: { // No since option set. + jr.SeekInvalid: {jr.SeekInvalid: errSeek, jr.SeekHead: errSeek, jr.SeekTail: errSeek, jr.SeekCursor: errSeek, jr.SeekSince: errSeek}, + jr.SeekHead: {jr.SeekInvalid: errFall, jr.SeekHead: nil, jr.SeekTail: nil, jr.SeekCursor: errFall, jr.SeekSince: nil}, + jr.SeekTail: {jr.SeekInvalid: errFall, jr.SeekHead: nil, jr.SeekTail: nil, jr.SeekCursor: errFall, jr.SeekSince: nil}, + jr.SeekCursor: {jr.SeekInvalid: errFall, jr.SeekHead: nil, jr.SeekTail: nil, jr.SeekCursor: errFall, jr.SeekSince: errSince}, + jr.SeekSince: {jr.SeekInvalid: errFall, jr.SeekHead: errSince, jr.SeekTail: errSince, jr.SeekCursor: errFall, jr.SeekSince: errSince}, + }, + true: { // Since option set. + jr.SeekInvalid: {jr.SeekInvalid: errSeek, jr.SeekHead: errSeek, jr.SeekTail: errSeek, jr.SeekCursor: errSeek, jr.SeekSince: errSeek}, + jr.SeekHead: {jr.SeekInvalid: errFall, jr.SeekHead: errSince, jr.SeekTail: errSince, jr.SeekCursor: errFall, jr.SeekSince: errSince}, + jr.SeekTail: {jr.SeekInvalid: errFall, jr.SeekHead: errSince, jr.SeekTail: errSince, jr.SeekCursor: errFall, jr.SeekSince: errSince}, + jr.SeekCursor: {jr.SeekInvalid: errFall, jr.SeekHead: errSince, jr.SeekTail: errSince, jr.SeekCursor: errFall, jr.SeekSince: nil}, + jr.SeekSince: {jr.SeekInvalid: errFall, jr.SeekHead: nil, jr.SeekTail: nil, jr.SeekCursor: errFall, jr.SeekSince: nil}, + }, + } + + for setSince := range want { + for _, seek := range modes { + for _, fallback := range modes { + name := fmt.Sprintf("seek_%s_fallback_%s_since_%t", nameOf[seek], nameOf[fallback], setSince) + t.Run(name, func(t *testing.T) { + cfg := config{Seek: seek, CursorSeekFallback: fallback} + if setSince { + cfg.Since = new(time.Duration) + } + got := cfg.Validate() + if !errors.Is(got, want[setSince][seek][fallback]) { + t.Errorf("unexpected error: got:%v want:%v", got, want[setSince][seek][fallback]) + } + }) + } + } + } + }) + + t.Run("use", func(t *testing.T) { + logger := logp.L() + for seek := jr.SeekInvalid; seek <= jr.SeekSince+1; seek++ { + for seekFallback := jr.SeekInvalid; seekFallback <= jr.SeekSince+1; seekFallback++ { + for _, since := range []*time.Duration{nil, new(time.Duration)} { + for _, pos := range []string{"", "defined"} { + // Construct a config with fields checked by Validate. + cfg := config{ + Since: since, + Seek: seek, + CursorSeekFallback: seekFallback, + } + if err := cfg.Validate(); err != nil { + continue + } + + // Confirm we never get to seek since mode with a nil since. + cp := checkpoint{Position: pos} + mode, _ := seekBy(logger, cp, cfg.Seek, cfg.CursorSeekFallback) + if mode == jr.SeekSince { + if cfg.Since == nil { + // If we reach here we would have panicked in Run. + t.Errorf("got nil since in valid seek since mode: seek=%d seek_fallback=%d since=%d cp=%+v", + seek, seekFallback, since, cp) + } + } + } + } + } + } + }) +} diff --git a/filebeat/input/journald/input.go b/filebeat/input/journald/input.go index e4d3d858dd6..c32d677ffa4 100644 --- a/filebeat/input/journald/input.go +++ b/filebeat/input/journald/input.go @@ -40,6 +40,7 @@ import ( type journald struct { Backoff time.Duration MaxBackoff time.Duration + Since *time.Duration Seek journalread.SeekMode CursorSeekFallback journalread.SeekMode Matches journalfield.IncludeMatches @@ -104,6 +105,7 @@ func configure(cfg *conf.C) ([]cursor.Source, cursor.Input, error) { return sources, &journald{ Backoff: config.Backoff, MaxBackoff: config.MaxBackoff, + Since: config.Since, Seek: config.Seek, CursorSeekFallback: config.CursorSeekFallback, Matches: journalfield.IncludeMatches(config.Matches), @@ -140,7 +142,13 @@ func (inp *journald) Run( } defer reader.Close() - if err := reader.Seek(seekBy(ctx.Logger, currentCheckpoint, inp.Seek, inp.CursorSeekFallback)); err != nil { + mode, pos := seekBy(ctx.Logger, currentCheckpoint, inp.Seek, inp.CursorSeekFallback) + if mode == journalread.SeekSince { + err = reader.SeekRealtimeUsec(uint64(time.Now().Add(*inp.Since).UnixMicro())) + } else { + err = reader.Seek(mode, pos) + } + if err != nil { log.Error("Continue from current position. Seek failed with: %v", err) } @@ -168,7 +176,10 @@ func (inp *journald) Run( func (inp *journald) open(log *logp.Logger, canceler input.Canceler, src cursor.Source) (*journalread.Reader, error) { backoff := backoff.NewExpBackoff(canceler.Done(), inp.Backoff, inp.MaxBackoff) reader, err := journalread.Open(log, src.Name(), backoff, - withFilters(inp.Matches), withUnits(inp.Units), withTransports(inp.Transports), withSyslogIdentifiers(inp.Identifiers)) + withFilters(inp.Matches), + withUnits(inp.Units), + withTransports(inp.Transports), + withSyslogIdentifiers(inp.Identifiers)) if err != nil { return nil, sderr.Wrap(err, "failed to create reader for %{path} journal", src.Name()) } @@ -223,12 +234,14 @@ func withSyslogIdentifiers(identifiers []string) func(*sdjournal.Journal) error // seekBy tries to find the last known position in the journal, so we can continue collecting // from the last known position. // The checkpoint is ignored if the user has configured the input to always -// seek to the head/tail of the journal on startup. -func seekBy(log *logp.Logger, cp checkpoint, seek, defaultSeek journalread.SeekMode) (journalread.SeekMode, string) { - mode := seek +// seek to the head/tail/since of the journal on startup. +func seekBy(log *logp.Logger, cp checkpoint, seek, defaultSeek journalread.SeekMode) (mode journalread.SeekMode, pos string) { + mode = seek if mode == journalread.SeekCursor && cp.Position == "" { mode = defaultSeek - if mode != journalread.SeekHead && mode != journalread.SeekTail { + switch mode { + case journalread.SeekHead, journalread.SeekTail, journalread.SeekSince: + default: log.Error("Invalid option for cursor_seek_fallback") mode = journalread.SeekHead } diff --git a/filebeat/input/journald/input_filtering_test.go b/filebeat/input/journald/input_filtering_test.go index 625104a491c..a985f1f5258 100644 --- a/filebeat/input/journald/input_filtering_test.go +++ b/filebeat/input/journald/input_filtering_test.go @@ -23,6 +23,7 @@ import ( "context" "path" "testing" + "time" "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -215,3 +216,94 @@ func TestInputIncludeMatches(t *testing.T) { }) } } + +// TestInputSeek test the output of various seek modes while reading +// from input-multiline-parser.journal. +func TestInputSeek(t *testing.T) { + // timeOfFirstEvent is the @timestamp on the "pam_unix" message. + var timeOfFirstEvent = time.Date(2021, time.November, 22, 17, 10, 4, 51729000, time.UTC) + + var allMessages = []string{ + "pam_unix(sudo:session): session closed for user root", + "Started Outputs some log lines.", + "1st line", + "2nd line", + "3rd line", + "4th line", + "5th line", + "6th line", + } + + tests := map[string]struct { + config mapstr.M + expectedMessages []string + }{ + "seek head": { + config: map[string]any{ + "seek": "head", + }, + expectedMessages: allMessages, + }, + "seek tail": { + config: map[string]any{ + "seek": "tail", + }, + expectedMessages: nil, // No messages are expected for seek=tail. + }, + "seek cursor": { + config: map[string]any{ + "seek": "cursor", + }, + expectedMessages: allMessages, + }, + "seek cursor with tail fallback": { + config: map[string]any{ + "seek": "cursor", + "cursor_seek_fallback": "tail", + }, + expectedMessages: nil, // No messages are expected because it will fall back to seek=tail. + }, + "seek since": { + config: map[string]any{ + "seek": "since", + // Query using one microsecond after the first event so that the first event + // is not returned. Note that journald uses microsecond precision for times. + "since": -1 * time.Since(timeOfFirstEvent.Add(time.Microsecond)), + }, + expectedMessages: allMessages[1:], + }, + "seek cursor with since fallback": { + config: map[string]any{ + "seek": "cursor", + "cursor_seek_fallback": "since", + // Query using one microsecond after the first event so that the first event + // is not returned. Note that journald uses microsecond precision for times. + "since": -1 * time.Since(timeOfFirstEvent.Add(time.Microsecond)), + }, + expectedMessages: allMessages[1:], + }, + } + + for name, testCase := range tests { + t.Run(name, func(t *testing.T) { + env := newInputTestingEnvironment(t) + conf := mapstr.M{ + "paths": []string{path.Join("testdata", "input-multiline-parser.journal")}, + } + conf.DeepUpdate(testCase.config) + inp := env.mustCreateInput(conf) + + ctx, cancelInput := context.WithCancel(context.Background()) + env.startInput(ctx, inp) + defer cancelInput() + + env.waitUntilEventCount(len(testCase.expectedMessages)) + + for idx, event := range env.pipeline.GetAllEvents() { + if got, expected := event.Fields["message"], testCase.expectedMessages[idx]; got != expected { + t.Fatalf("expecting event message %q, got %q", expected, got) + } + } + }) + } +} diff --git a/filebeat/input/journald/pkg/journalread/mode.go b/filebeat/input/journald/pkg/journalread/mode.go index 36132ffe11d..3c6fa923361 100644 --- a/filebeat/input/journald/pkg/journalread/mode.go +++ b/filebeat/input/journald/pkg/journalread/mode.go @@ -31,12 +31,15 @@ const ( SeekTail // SeekCursor option seeks to the position specified in the cursor SeekCursor + // SeekSince option seeks to the position specified by the since option + SeekSince ) var seekModes = map[string]SeekMode{ "head": SeekHead, "tail": SeekTail, "cursor": SeekCursor, + "since": SeekSince, } // Unpack validates and unpack "seek" config options. It returns an error if diff --git a/filebeat/input/journald/pkg/journalread/mode_test.go b/filebeat/input/journald/pkg/journalread/mode_test.go index aef0ed4150c..7b323a06be5 100644 --- a/filebeat/input/journald/pkg/journalread/mode_test.go +++ b/filebeat/input/journald/pkg/journalread/mode_test.go @@ -27,6 +27,7 @@ func TestMode_Unpack(t *testing.T) { "head": SeekHead, "tail": SeekTail, "cursor": SeekCursor, + "since": SeekSince, } for str, want := range tests { diff --git a/filebeat/input/journald/pkg/journalread/reader.go b/filebeat/input/journald/pkg/journalread/reader.go index 571de214b55..9994c0aad7c 100644 --- a/filebeat/input/journald/pkg/journalread/reader.go +++ b/filebeat/input/journald/pkg/journalread/reader.go @@ -54,6 +54,7 @@ type journal interface { GetEntry() (*sdjournal.JournalEntry, error) SeekHead() error SeekTail() error + SeekRealtimeUsec(usec uint64) error SeekCursor(string) error } @@ -146,6 +147,13 @@ func (r *Reader) Seek(mode SeekMode, cursor string) (err error) { return err } +// SeekRealtimeUsec moves the read pointer to the entry with the +// specified CLOCK_REALTIME timestamp. This corresponds to +// sd_journal_seek_realtime_usec. +func (r *Reader) SeekRealtimeUsec(usec uint64) error { + return r.journal.SeekRealtimeUsec(usec) +} + // Next reads a new journald entry from the journal. It blocks if there is // currently no entry available in the journal, or until an error has occurred. func (r *Reader) Next(cancel canceler) (*sdjournal.JournalEntry, error) { diff --git a/filebeat/input/mqtt/client.go b/filebeat/input/mqtt/client.go index bece7ee2bbf..f5803d1d340 100644 --- a/filebeat/input/mqtt/client.go +++ b/filebeat/input/mqtt/client.go @@ -28,6 +28,7 @@ func createClientOptions(config mqttInputConfig, onConnectHandler func(client li SetClientID(config.ClientID). SetUsername(config.Username). SetPassword(config.Password). + SetCleanSession(config.CleanSession). SetConnectRetry(true). SetOnConnectHandler(onConnectHandler) diff --git a/filebeat/input/mqtt/config.go b/filebeat/input/mqtt/config.go index f78707e53c1..a729ff346af 100644 --- a/filebeat/input/mqtt/config.go +++ b/filebeat/input/mqtt/config.go @@ -28,9 +28,10 @@ type mqttInputConfig struct { Topics []string `config:"topics" validate:"required,min=1"` QoS int `config:"qos" validate:"min=0,max=2"` - ClientID string `config:"client_id" validate:"nonzero"` - Username string `config:"username"` - Password string `config:"password"` + ClientID string `config:"client_id" validate:"nonzero"` + Username string `config:"username"` + Password string `config:"password"` + CleanSession bool `config:"clean_session"` TLS *tlscommon.Config `config:"ssl"` } @@ -38,8 +39,9 @@ type mqttInputConfig struct { // The default config for the mqtt input. func defaultConfig() mqttInputConfig { return mqttInputConfig{ - ClientID: "filebeat", - Topics: []string{"#"}, + ClientID: "filebeat", + Topics: []string{"#"}, + CleanSession: true, } } diff --git a/filebeat/input/syslog/format_check.go b/filebeat/input/syslog/format_check.go index 5ba3e551003..8d646e10f7d 100644 --- a/filebeat/input/syslog/format_check.go +++ b/filebeat/input/syslog/format_check.go @@ -17,7 +17,7 @@ // Code generated by ragel DO NOT EDIT. // -//line parser/format_check.rl:1 +// line-disable-directive parser/format_check.rl:1 package syslog //line format_check.go:8 @@ -27,7 +27,7 @@ const format_check_error int = 0 const format_check_en_main int = 1 -//line parser/format_check.rl:9 +// line-disable-directive parser/format_check.rl:9 func IsRFC5424Format(data []byte) bool { var p, cs int @@ -143,7 +143,7 @@ func IsRFC5424Format(data []byte) bool { } goto st0 tr9: -//line parser/format_check.rl:17 + // line-disable-directive parser/format_check.rl:17 isRFC5424 = true @@ -294,7 +294,7 @@ func IsRFC5424Format(data []byte) bool { } } -//line parser/format_check.rl:28 + // line-disable-directive parser/format_check.rl:28 return isRFC5424 } diff --git a/filebeat/input/syslog/rfc3164_parser.go b/filebeat/input/syslog/rfc3164_parser.go index d717ee7be4c..63d43cdf200 100644 --- a/filebeat/input/syslog/rfc3164_parser.go +++ b/filebeat/input/syslog/rfc3164_parser.go @@ -17,7 +17,7 @@ // Code generated by ragel DO NOT EDIT. // -//line parser/rfc3164_parser.rl:1 +// line-disable-directive parser/rfc3164_parser.rl:1 package syslog //line rfc3164_parser.go:8 @@ -28,7 +28,7 @@ const syslog_rfc3154_error int = -1 const syslog_rfc3154_en_main int = 0 const syslog_rfc3154_en_catch_all int = 1 -//line parser/rfc3164_parser.rl:9 +// line-disable-directive parser/rfc3164_parser.rl:9 var ( noDuplicates = []byte{'-', '.'} @@ -332,17 +332,17 @@ func ParserRFC3164(data []byte, event *event) { } goto tr0 tr0: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p goto st2 tr134: -//line parser/common.rl:107 + // line-disable-directive parser/common.rl:107 event.SetSequence(data[tok:p]) -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -355,17 +355,17 @@ func ParserRFC3164(data []byte, event *event) { //line rfc3164_parser.go:342 goto st2 tr1: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p goto st3 tr135: -//line parser/common.rl:107 + // line-disable-directive parser/common.rl:107 event.SetSequence(data[tok:p]) -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -399,17 +399,17 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr14: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p goto st4 tr136: -//line parser/common.rl:107 + // line-disable-directive parser/common.rl:107 event.SetSequence(data[tok:p]) -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -452,7 +452,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr18: -//line parser/common.rl:19 + // line-disable-directive parser/common.rl:19 event.SetYear(data[tok:p]) @@ -468,7 +468,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr19: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -493,7 +493,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr21: -//line parser/common.rl:23 + // line-disable-directive parser/common.rl:23 event.SetMonthNumeric(data[tok:p]) @@ -509,7 +509,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr22: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -542,7 +542,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr24: -//line parser/common.rl:27 + // line-disable-directive parser/common.rl:27 event.SetDay(data[tok:p]) @@ -561,7 +561,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr25: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -586,7 +586,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr28: -//line parser/common.rl:31 + // line-disable-directive parser/common.rl:31 event.SetHour(data[tok:p]) @@ -602,7 +602,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr29: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -627,7 +627,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr31: -//line parser/common.rl:35 + // line-disable-directive parser/common.rl:35 event.SetMinute(data[tok:p]) @@ -643,7 +643,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr32: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -684,19 +684,19 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr34: -//line parser/common.rl:39 + // line-disable-directive parser/common.rl:39 event.SetSecond(data[tok:p]) goto st23 tr61: -//line parser/common.rl:103 + // line-disable-directive parser/common.rl:103 event.SetTimeZone(data[tok:p]) goto st23 tr68: -//line parser/common.rl:43 + // line-disable-directive parser/common.rl:43 event.SetNanosecond(data[tok:p]) @@ -732,11 +732,11 @@ func ParserRFC3164(data []byte, event *event) { } goto tr0 tr39: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p -//line parser/common.rl:80 + // line-disable-directive parser/common.rl:80 if p-1 > 0 { for _, b := range noDuplicates { @@ -751,7 +751,7 @@ func ParserRFC3164(data []byte, event *event) { goto st24 tr42: -//line parser/common.rl:80 + // line-disable-directive parser/common.rl:80 if p-1 > 0 { for _, b := range noDuplicates { @@ -796,11 +796,11 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr40: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p -//line parser/common.rl:80 + // line-disable-directive parser/common.rl:80 if p-1 > 0 { for _, b := range noDuplicates { @@ -815,7 +815,7 @@ func ParserRFC3164(data []byte, event *event) { goto st25 tr43: -//line parser/common.rl:80 + // line-disable-directive parser/common.rl:80 if p-1 > 0 { for _, b := range noDuplicates { @@ -867,7 +867,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr45: -//line parser/common.rl:91 + // line-disable-directive parser/common.rl:91 event.SetHostname(data[tok:p]) @@ -891,7 +891,7 @@ func ParserRFC3164(data []byte, event *event) { } goto tr47 tr47: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -917,7 +917,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st27 tr49: -//line parser/common.rl:95 + // line-disable-directive parser/common.rl:95 event.SetProgram(data[tok:p]) @@ -949,7 +949,7 @@ func ParserRFC3164(data []byte, event *event) { st_case_29: goto tr0 tr50: -//line parser/common.rl:95 + // line-disable-directive parser/common.rl:95 event.SetProgram(data[tok:p]) @@ -965,7 +965,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr52: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -984,7 +984,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr54: -//line parser/common.rl:99 + // line-disable-directive parser/common.rl:99 event.SetPid(data[tok:p]) @@ -1012,7 +1012,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr46: -//line parser/common.rl:80 + // line-disable-directive parser/common.rl:80 if p-1 > 0 { for _, b := range noDuplicates { @@ -1025,7 +1025,7 @@ func ParserRFC3164(data []byte, event *event) { } } -//line parser/common.rl:91 + // line-disable-directive parser/common.rl:91 event.SetHostname(data[tok:p]) @@ -1068,7 +1068,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr57: -//line parser/common.rl:80 + // line-disable-directive parser/common.rl:80 if p-1 > 0 { for _, b := range noDuplicates { @@ -1083,7 +1083,7 @@ func ParserRFC3164(data []byte, event *event) { goto st35 tr58: -//line parser/common.rl:80 + // line-disable-directive parser/common.rl:80 if p-1 > 0 { for _, b := range noDuplicates { @@ -1096,7 +1096,7 @@ func ParserRFC3164(data []byte, event *event) { } } -//line parser/common.rl:91 + // line-disable-directive parser/common.rl:91 event.SetHostname(data[tok:p]) @@ -1139,11 +1139,11 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr41: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p -//line parser/common.rl:80 + // line-disable-directive parser/common.rl:80 if p-1 > 0 { for _, b := range noDuplicates { @@ -1158,7 +1158,7 @@ func ParserRFC3164(data []byte, event *event) { goto st36 tr44: -//line parser/common.rl:80 + // line-disable-directive parser/common.rl:80 if p-1 > 0 { for _, b := range noDuplicates { @@ -1203,21 +1203,21 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr35: -//line parser/common.rl:39 + // line-disable-directive parser/common.rl:39 event.SetSecond(data[tok:p]) -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p goto st37 tr69: -//line parser/common.rl:43 + // line-disable-directive parser/common.rl:43 event.SetNanosecond(data[tok:p]) -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -1286,19 +1286,19 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr37: -//line parser/common.rl:39 + // line-disable-directive parser/common.rl:39 event.SetSecond(data[tok:p]) goto st42 tr65: -//line parser/common.rl:103 + // line-disable-directive parser/common.rl:103 event.SetTimeZone(data[tok:p]) goto st42 tr71: -//line parser/common.rl:43 + // line-disable-directive parser/common.rl:43 event.SetNanosecond(data[tok:p]) @@ -1317,7 +1317,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr63: -//line parser/common.rl:103 + // line-disable-directive parser/common.rl:103 event.SetTimeZone(data[tok:p]) @@ -1341,7 +1341,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr36: -//line parser/common.rl:39 + // line-disable-directive parser/common.rl:39 event.SetSecond(data[tok:p]) @@ -1357,7 +1357,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr67: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -1392,21 +1392,21 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr38: -//line parser/common.rl:39 + // line-disable-directive parser/common.rl:39 event.SetSecond(data[tok:p]) -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p goto st46 tr72: -//line parser/common.rl:43 + // line-disable-directive parser/common.rl:43 event.SetNanosecond(data[tok:p]) -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -1432,7 +1432,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr26: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -1448,17 +1448,17 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr4: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p goto st48 tr137: -//line parser/common.rl:107 + // line-disable-directive parser/common.rl:107 event.SetSequence(data[tok:p]) -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -1501,7 +1501,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr77: -//line parser/common.rl:15 + // line-disable-directive parser/common.rl:15 event.SetMonth(data[tok:p]) @@ -1543,7 +1543,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr83: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -1562,7 +1562,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr84: -//line parser/common.rl:27 + // line-disable-directive parser/common.rl:27 event.SetDay(data[tok:p]) @@ -1581,7 +1581,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr85: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -1606,7 +1606,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr88: -//line parser/common.rl:31 + // line-disable-directive parser/common.rl:31 event.SetHour(data[tok:p]) @@ -1622,7 +1622,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr89: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -1647,7 +1647,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr91: -//line parser/common.rl:35 + // line-disable-directive parser/common.rl:35 event.SetMinute(data[tok:p]) @@ -1663,7 +1663,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr92: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -1696,7 +1696,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr94: -//line parser/common.rl:39 + // line-disable-directive parser/common.rl:39 event.SetSecond(data[tok:p]) @@ -1712,7 +1712,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr95: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -1739,7 +1739,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr86: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -1755,7 +1755,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr80: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -1771,7 +1771,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr81: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -1795,7 +1795,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr82: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -1882,17 +1882,17 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr5: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p goto st75 tr138: -//line parser/common.rl:107 + // line-disable-directive parser/common.rl:107 event.SetSequence(data[tok:p]) -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -1968,17 +1968,17 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr6: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p goto st82 tr139: -//line parser/common.rl:107 + // line-disable-directive parser/common.rl:107 event.SetSequence(data[tok:p]) -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -2054,17 +2054,17 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr7: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p goto st89 tr140: -//line parser/common.rl:107 + // line-disable-directive parser/common.rl:107 event.SetSequence(data[tok:p]) -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -2149,17 +2149,17 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr8: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p goto st95 tr141: -//line parser/common.rl:107 + // line-disable-directive parser/common.rl:107 event.SetSequence(data[tok:p]) -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -2216,17 +2216,17 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr9: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p goto st99 tr142: -//line parser/common.rl:107 + // line-disable-directive parser/common.rl:107 event.SetSequence(data[tok:p]) -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -2251,17 +2251,17 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr10: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p goto st101 tr143: -//line parser/common.rl:107 + // line-disable-directive parser/common.rl:107 event.SetSequence(data[tok:p]) -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -2301,17 +2301,17 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr11: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p goto st104 tr144: -//line parser/common.rl:107 + // line-disable-directive parser/common.rl:107 event.SetSequence(data[tok:p]) -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -2360,7 +2360,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr2: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -2442,7 +2442,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr133: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -2482,7 +2482,7 @@ func ParserRFC3164(data []byte, event *event) { } goto tr134 tr3: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -2498,7 +2498,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr145: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -2562,7 +2562,7 @@ func ParserRFC3164(data []byte, event *event) { } goto st2 tr147: -//line parser/common.rl:7 + // line-disable-directive parser/common.rl:7 event.SetPriority(data[tok:p]) @@ -2608,7 +2608,7 @@ func ParserRFC3164(data []byte, event *event) { st_case_1: goto tr12 tr12: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -2994,7 +2994,7 @@ func ParserRFC3164(data []byte, event *event) { if (p) == eof { switch cs { case 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122: -//line parser/common.rl:11 + // line-disable-directive parser/common.rl:11 event.SetMessage(data[tok:p]) @@ -3004,6 +3004,6 @@ func ParserRFC3164(data []byte, event *event) { } -//line parser/rfc3164_parser.rl:28 + // line-disable-directive parser/rfc3164_parser.rl:28 } diff --git a/filebeat/input/syslog/rfc5424_parser.go b/filebeat/input/syslog/rfc5424_parser.go index 7d9761b5ad4..9d935d8c866 100644 --- a/filebeat/input/syslog/rfc5424_parser.go +++ b/filebeat/input/syslog/rfc5424_parser.go @@ -17,7 +17,7 @@ // Code generated by ragel DO NOT EDIT. // -//line parser/rfc5424_parser.rl:1 +// line-disable-directive parser/rfc5424_parser.rl:1 package syslog //line rfc5424_parser.go:8 @@ -27,7 +27,7 @@ const syslog_rfc5424_error int = 0 const syslog_rfc5424_en_main int = 1 -//line parser/rfc5424_parser.rl:9 +// line-disable-directive parser/rfc5424_parser.rl:9 type machineState struct { sd_id string @@ -1266,7 +1266,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr2: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -1282,7 +1282,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr5: -//line parser/common.rl:7 + // line-disable-directive parser/common.rl:7 event.SetPriority(data[tok:p]) @@ -1298,7 +1298,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr6: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -1317,7 +1317,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr7: -//line parser/common.rl:111 + // line-disable-directive parser/common.rl:111 event.SetVersion(data[tok:p]) @@ -1345,7 +1345,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr586: -//line parser/common.rl:103 + // line-disable-directive parser/common.rl:103 event.SetTimeZone(data[tok:p]) @@ -1361,7 +1361,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr12: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -1380,7 +1380,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr13: -//line parser/common.rl:91 + // line-disable-directive parser/common.rl:91 event.SetHostname(data[tok:p]) @@ -1396,7 +1396,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr15: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -1415,7 +1415,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr16: -//line parser/common.rl:115 + // line-disable-directive parser/common.rl:115 event.SetAppName(data[tok:p]) @@ -1431,7 +1431,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr18: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -1450,7 +1450,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr19: -//line parser/common.rl:119 + // line-disable-directive parser/common.rl:119 event.SetProcID(data[tok:p]) @@ -1466,7 +1466,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr21: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -1485,7 +1485,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr22: -//line parser/common.rl:123 + // line-disable-directive parser/common.rl:123 event.SetMsgID(data[tok:p]) @@ -1519,7 +1519,7 @@ func ParserRFC5424(data []byte, event *event) { st_case_589: goto tr600 tr600: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -1532,7 +1532,7 @@ func ParserRFC5424(data []byte, event *event) { //line rfc5424_parser.go:1519 goto st590 tr25: -//line parser/common.rl:48 + // line-disable-directive parser/common.rl:48 event.data = EventData{} @@ -1560,7 +1560,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr26: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -1589,7 +1589,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr27: -//line parser/common.rl:64 + // line-disable-directive parser/common.rl:64 state.sd_id = string(data[tok:p]) if _, ok := event.data[state.sd_id]; ok { @@ -1623,7 +1623,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr30: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -2259,7 +2259,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr32: -//line parser/common.rl:56 + // line-disable-directive parser/common.rl:56 state.sd_param_name = string(data[tok:p]) @@ -2289,7 +2289,7 @@ func ParserRFC5424(data []byte, event *event) { } goto tr64 tr64: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -2310,11 +2310,11 @@ func ParserRFC5424(data []byte, event *event) { } goto st54 tr67: -//line parser/common.rl:60 + // line-disable-directive parser/common.rl:60 event.SetData(state.sd_id, state.sd_param_name, data, tok, p, state.sd_value_bs) -//line parser/common.rl:52 + // line-disable-directive parser/common.rl:52 state.sd_value_bs = []int{} @@ -2343,7 +2343,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr29: -//line parser/common.rl:64 + // line-disable-directive parser/common.rl:64 state.sd_id = string(data[tok:p]) if _, ok := event.data[state.sd_id]; ok { @@ -2368,17 +2368,17 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr65: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p -//line parser/common.rl:73 + // line-disable-directive parser/common.rl:73 state.sd_value_bs = append(state.sd_value_bs, p) goto st56 tr68: -//line parser/common.rl:73 + // line-disable-directive parser/common.rl:73 state.sd_value_bs = append(state.sd_value_bs, p) @@ -8565,7 +8565,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr10: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -8608,7 +8608,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr559: -//line parser/common.rl:19 + // line-disable-directive parser/common.rl:19 event.SetYear(data[tok:p]) @@ -8627,7 +8627,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr560: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -8652,7 +8652,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr563: -//line parser/common.rl:23 + // line-disable-directive parser/common.rl:23 event.SetMonthNumeric(data[tok:p]) @@ -8674,7 +8674,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr564: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -8699,7 +8699,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr568: -//line parser/common.rl:27 + // line-disable-directive parser/common.rl:27 event.SetDay(data[tok:p]) @@ -8718,7 +8718,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr569: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -8743,7 +8743,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr572: -//line parser/common.rl:31 + // line-disable-directive parser/common.rl:31 event.SetHour(data[tok:p]) @@ -8759,7 +8759,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr573: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -8784,7 +8784,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr575: -//line parser/common.rl:35 + // line-disable-directive parser/common.rl:35 event.SetMinute(data[tok:p]) @@ -8800,7 +8800,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr576: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -8832,21 +8832,21 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr578: -//line parser/common.rl:39 + // line-disable-directive parser/common.rl:39 event.SetSecond(data[tok:p]) -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p goto st566 tr588: -//line parser/common.rl:43 + // line-disable-directive parser/common.rl:43 event.SetNanosecond(data[tok:p]) -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -8898,21 +8898,21 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr580: -//line parser/common.rl:39 + // line-disable-directive parser/common.rl:39 event.SetSecond(data[tok:p]) -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p goto st571 tr590: -//line parser/common.rl:43 + // line-disable-directive parser/common.rl:43 event.SetNanosecond(data[tok:p]) -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -8928,7 +8928,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr579: -//line parser/common.rl:39 + // line-disable-directive parser/common.rl:39 event.SetSecond(data[tok:p]) @@ -8944,7 +8944,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr587: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -9050,7 +9050,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr570: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -9066,7 +9066,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr565: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -9082,7 +9082,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr566: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -9098,7 +9098,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr561: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -9135,7 +9135,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr3: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -9157,7 +9157,7 @@ func ParserRFC5424(data []byte, event *event) { } goto st0 tr4: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p @@ -10965,16 +10965,16 @@ func ParserRFC5424(data []byte, event *event) { if (p) == eof { switch cs { case 590: -//line parser/common.rl:11 + // line-disable-directive parser/common.rl:11 event.SetMessage(data[tok:p]) case 589: -//line parser/common.rl:3 + // line-disable-directive parser/common.rl:3 tok = p -//line parser/common.rl:11 + // line-disable-directive parser/common.rl:11 event.SetMessage(data[tok:p]) @@ -10987,6 +10987,6 @@ func ParserRFC5424(data []byte, event *event) { } } -//line parser/rfc5424_parser.rl:35 + // line-disable-directive parser/rfc5424_parser.rl:35 } diff --git a/filebeat/input/tcp/input.go b/filebeat/input/tcp/input.go index bd15e1560c8..7911123a86e 100644 --- a/filebeat/input/tcp/input.go +++ b/filebeat/input/tcp/input.go @@ -338,6 +338,9 @@ func procNetTCP(path string, addr []string, hasUnspecified bool, addrIsUnspecifi if len(addr) == 0 { return 0, nil } + if len(addr) != len(addrIsUnspecified) { + return 0, errors.New("mismatched address/unspecified lists: please report this") + } b, err := os.ReadFile(path) if err != nil { return 0, err diff --git a/filebeat/input/udp/input.go b/filebeat/input/udp/input.go index f6f2536614a..c250089b8aa 100644 --- a/filebeat/input/udp/input.go +++ b/filebeat/input/udp/input.go @@ -272,7 +272,7 @@ func (m *inputMetrics) poll(addr, addr6 []string, each time.Duration, log *logp. if err != nil { log.Warnf("failed to get initial udp stats from /proc: %v", err) } - rx6, drops6, err := procNetUDP("/proc/net/udp6", addr, hasUnspecified6, addrIsUnspecified6) + rx6, drops6, err := procNetUDP("/proc/net/udp6", addr6, hasUnspecified6, addrIsUnspecified6) if err != nil { log.Warnf("failed to get initial udp6 stats from /proc: %v", err) } @@ -288,7 +288,7 @@ func (m *inputMetrics) poll(addr, addr6 []string, each time.Duration, log *logp. log.Warnf("failed to get udp stats from /proc: %v", err) continue } - rx6, drops6, err := procNetUDP("/proc/net/udp6", addr, hasUnspecified6, addrIsUnspecified6) + rx6, drops6, err := procNetUDP("/proc/net/udp6", addr6, hasUnspecified6, addrIsUnspecified6) if err != nil { log.Warnf("failed to get udp6 stats from /proc: %v", err) continue @@ -333,6 +333,9 @@ func procNetUDP(path string, addr []string, hasUnspecified bool, addrIsUnspecifi if len(addr) == 0 { return 0, 0, nil } + if len(addr) != len(addrIsUnspecified) { + return 0, 0, errors.New("mismatched address/unspecified lists: please report this") + } b, err := os.ReadFile(path) if err != nil { return 0, 0, err diff --git a/filebeat/input/v2/compat/compat.go b/filebeat/input/v2/compat/compat.go index 6c783cfb703..a8d2c0e8cb2 100644 --- a/filebeat/input/v2/compat/compat.go +++ b/filebeat/input/v2/compat/compat.go @@ -22,6 +22,7 @@ package compat import ( "context" + "errors" "fmt" "sync" @@ -127,7 +128,7 @@ func (r *runner) Start() { }, r.connector, ) - if err != nil { + if err != nil && !errors.Is(err, context.Canceled) { log.Errorf("Input '%s' failed with: %+v", name, err) } else { log.Infof("Input '%s' stopped (goroutine)", name) diff --git a/filebeat/magefile.go b/filebeat/magefile.go index 874d47efae8..d96b44f4c25 100644 --- a/filebeat/magefile.go +++ b/filebeat/magefile.go @@ -205,6 +205,7 @@ func IntegTest() { // GoIntegTest starts the docker containers and executes the Go integration tests. func GoIntegTest(ctx context.Context) error { + mg.Deps(BuildSystemTestBinary) return devtools.GoIntegTestFromHost(ctx, devtools.DefaultGoTestIntegrationFromHostArgs()) } diff --git a/filebeat/modules.d/apache.yml.disabled b/filebeat/modules.d/apache.yml.disabled index d4fbc61659d..cd58ed77b3c 100644 --- a/filebeat/modules.d/apache.yml.disabled +++ b/filebeat/modules.d/apache.yml.disabled @@ -1,5 +1,5 @@ # Module: apache -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-apache.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-apache.html - module: apache # Access logs diff --git a/filebeat/modules.d/auditd.yml.disabled b/filebeat/modules.d/auditd.yml.disabled index 8bcedafdee9..b63d14ffc27 100644 --- a/filebeat/modules.d/auditd.yml.disabled +++ b/filebeat/modules.d/auditd.yml.disabled @@ -1,5 +1,5 @@ # Module: auditd -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-auditd.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-auditd.html - module: auditd log: diff --git a/filebeat/modules.d/elasticsearch.yml.disabled b/filebeat/modules.d/elasticsearch.yml.disabled index 75236f1a664..33ea085f784 100644 --- a/filebeat/modules.d/elasticsearch.yml.disabled +++ b/filebeat/modules.d/elasticsearch.yml.disabled @@ -1,5 +1,5 @@ # Module: elasticsearch -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-elasticsearch.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-elasticsearch.html - module: elasticsearch # Server log diff --git a/filebeat/modules.d/haproxy.yml.disabled b/filebeat/modules.d/haproxy.yml.disabled index 5863c5bbdf8..cb0a107fb5f 100644 --- a/filebeat/modules.d/haproxy.yml.disabled +++ b/filebeat/modules.d/haproxy.yml.disabled @@ -1,5 +1,5 @@ # Module: haproxy -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-haproxy.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-haproxy.html - module: haproxy # All logs diff --git a/filebeat/modules.d/icinga.yml.disabled b/filebeat/modules.d/icinga.yml.disabled index 10ab79616eb..1f0ba5e4de4 100644 --- a/filebeat/modules.d/icinga.yml.disabled +++ b/filebeat/modules.d/icinga.yml.disabled @@ -1,5 +1,5 @@ # Module: icinga -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-icinga.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-icinga.html - module: icinga # Main logs diff --git a/filebeat/modules.d/iis.yml.disabled b/filebeat/modules.d/iis.yml.disabled index 868fadedbb0..6be750c8701 100644 --- a/filebeat/modules.d/iis.yml.disabled +++ b/filebeat/modules.d/iis.yml.disabled @@ -1,5 +1,5 @@ # Module: iis -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-iis.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-iis.html - module: iis # Access logs diff --git a/filebeat/modules.d/kafka.yml.disabled b/filebeat/modules.d/kafka.yml.disabled index fd7b0013739..0cc4fbf9fe3 100644 --- a/filebeat/modules.d/kafka.yml.disabled +++ b/filebeat/modules.d/kafka.yml.disabled @@ -1,5 +1,5 @@ # Module: kafka -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-kafka.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-kafka.html - module: kafka # All logs diff --git a/filebeat/modules.d/kibana.yml.disabled b/filebeat/modules.d/kibana.yml.disabled index bc34de819a5..5ade4bf1439 100644 --- a/filebeat/modules.d/kibana.yml.disabled +++ b/filebeat/modules.d/kibana.yml.disabled @@ -1,5 +1,5 @@ # Module: kibana -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-kibana.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-kibana.html - module: kibana # Server logs diff --git a/filebeat/modules.d/logstash.yml.disabled b/filebeat/modules.d/logstash.yml.disabled index fe99eeabae4..501b8bc3321 100644 --- a/filebeat/modules.d/logstash.yml.disabled +++ b/filebeat/modules.d/logstash.yml.disabled @@ -1,5 +1,5 @@ # Module: logstash -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-logstash.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-logstash.html - module: logstash # logs diff --git a/filebeat/modules.d/mongodb.yml.disabled b/filebeat/modules.d/mongodb.yml.disabled index ac31f64bed1..4180e598582 100644 --- a/filebeat/modules.d/mongodb.yml.disabled +++ b/filebeat/modules.d/mongodb.yml.disabled @@ -1,5 +1,5 @@ # Module: mongodb -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-mongodb.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-mongodb.html - module: mongodb # All logs diff --git a/filebeat/modules.d/mysql.yml.disabled b/filebeat/modules.d/mysql.yml.disabled index dd5079648bc..b2c42d1f1cd 100644 --- a/filebeat/modules.d/mysql.yml.disabled +++ b/filebeat/modules.d/mysql.yml.disabled @@ -1,5 +1,5 @@ # Module: mysql -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-mysql.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-mysql.html - module: mysql # Error logs diff --git a/filebeat/modules.d/nats.yml.disabled b/filebeat/modules.d/nats.yml.disabled index 6074f499cad..2cfa45e5c19 100644 --- a/filebeat/modules.d/nats.yml.disabled +++ b/filebeat/modules.d/nats.yml.disabled @@ -1,5 +1,5 @@ # Module: nats -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-nats.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-nats.html - module: nats # All logs diff --git a/filebeat/modules.d/nginx.yml.disabled b/filebeat/modules.d/nginx.yml.disabled index 450b30c0e01..709e52630e9 100644 --- a/filebeat/modules.d/nginx.yml.disabled +++ b/filebeat/modules.d/nginx.yml.disabled @@ -1,5 +1,5 @@ # Module: nginx -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-nginx.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-nginx.html - module: nginx # Access logs diff --git a/filebeat/modules.d/osquery.yml.disabled b/filebeat/modules.d/osquery.yml.disabled index 0740b774a52..2def611ecbb 100644 --- a/filebeat/modules.d/osquery.yml.disabled +++ b/filebeat/modules.d/osquery.yml.disabled @@ -1,5 +1,5 @@ # Module: osquery -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-osquery.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-osquery.html - module: osquery result: diff --git a/filebeat/modules.d/pensando.yml.disabled b/filebeat/modules.d/pensando.yml.disabled index 1002b61bf3e..18a8b7d4efe 100644 --- a/filebeat/modules.d/pensando.yml.disabled +++ b/filebeat/modules.d/pensando.yml.disabled @@ -1,5 +1,5 @@ # Module: pensando -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-pensando.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-pensando.html - module: pensando # Firewall logs diff --git a/filebeat/modules.d/postgresql.yml.disabled b/filebeat/modules.d/postgresql.yml.disabled index 5df32fefc49..bec77dc84f7 100644 --- a/filebeat/modules.d/postgresql.yml.disabled +++ b/filebeat/modules.d/postgresql.yml.disabled @@ -1,5 +1,5 @@ # Module: postgresql -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-postgresql.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-postgresql.html - module: postgresql # All logs diff --git a/filebeat/modules.d/redis.yml.disabled b/filebeat/modules.d/redis.yml.disabled index dfec32f8849..31b022d2bc9 100644 --- a/filebeat/modules.d/redis.yml.disabled +++ b/filebeat/modules.d/redis.yml.disabled @@ -1,5 +1,5 @@ # Module: redis -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-redis.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-redis.html - module: redis # Main logs diff --git a/filebeat/modules.d/santa.yml.disabled b/filebeat/modules.d/santa.yml.disabled index 9655b1afb59..4707b903ce8 100644 --- a/filebeat/modules.d/santa.yml.disabled +++ b/filebeat/modules.d/santa.yml.disabled @@ -1,5 +1,5 @@ # Module: santa -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-santa.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-santa.html - module: santa log: diff --git a/filebeat/modules.d/system.yml.disabled b/filebeat/modules.d/system.yml.disabled index 4171c65f7ad..1302c6374da 100644 --- a/filebeat/modules.d/system.yml.disabled +++ b/filebeat/modules.d/system.yml.disabled @@ -1,5 +1,5 @@ # Module: system -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-system.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-system.html - module: system # Syslog diff --git a/filebeat/modules.d/traefik.yml.disabled b/filebeat/modules.d/traefik.yml.disabled index 440028cc182..cc65ce2de9c 100644 --- a/filebeat/modules.d/traefik.yml.disabled +++ b/filebeat/modules.d/traefik.yml.disabled @@ -1,5 +1,5 @@ # Module: traefik -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-traefik.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-traefik.html - module: traefik # Access logs diff --git a/filebeat/registrar/registrar.go b/filebeat/registrar/registrar.go index c7043da249c..3ba8427e55f 100644 --- a/filebeat/registrar/registrar.go +++ b/filebeat/registrar/registrar.go @@ -23,8 +23,6 @@ import ( "sync" "time" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/filebeat/input/file" "github.com/elastic/beats/v7/libbeat/statestore" "github.com/elastic/beats/v7/libbeat/statestore/backend" @@ -102,7 +100,7 @@ func (r *Registrar) GetStates() []file.State { func (r *Registrar) loadStates() error { states, err := readStatesFrom(r.store) if err != nil { - return errors.Wrap(err, "can not load filebeat registry state") + return fmt.Errorf("can not load filebeat registry state: %w", err) } r.states.SetStates(states) diff --git a/go.mod b/go.mod index 99611863897..0d7fcad73ae 100644 --- a/go.mod +++ b/go.mod @@ -47,7 +47,7 @@ require ( github.com/bsm/sarama-cluster v2.1.14-0.20180625083203-7e67d87a6b3f+incompatible github.com/cavaliercoder/badio v0.0.0-20160213150051-ce5280129e9e // indirect github.com/cavaliercoder/go-rpm v0.0.0-20190131055624-7a9c54e3d83e - github.com/cespare/xxhash/v2 v2.1.2 + github.com/cespare/xxhash/v2 v2.2.0 github.com/cloudfoundry-community/go-cfclient v0.0.0-20190808214049-35bcce23fc5f github.com/cloudfoundry/noaa v2.1.0+incompatible github.com/cloudfoundry/sonde-go v0.0.0-20171206171820-b33733203bb4 @@ -69,7 +69,7 @@ require ( github.com/dustin/go-humanize v1.0.1 github.com/eapache/go-resiliency v1.2.0 github.com/eclipse/paho.mqtt.golang v1.3.5 - github.com/elastic/elastic-agent-client/v7 v7.1.2 + github.com/elastic/elastic-agent-client/v7 v7.2.0 github.com/elastic/go-concert v0.2.0 github.com/elastic/go-libaudit/v2 v2.3.2 github.com/elastic/go-licenser v0.4.1 @@ -117,7 +117,7 @@ require ( github.com/josephspurrier/goversioninfo v0.0.0-20190209210621-63e6d1acd3dd github.com/jpillora/backoff v1.0.0 // indirect github.com/lib/pq v1.10.3 - github.com/magefile/mage v1.14.0 + github.com/magefile/mage v1.15.0 github.com/mattn/go-colorable v0.1.12 github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe // indirect github.com/miekg/dns v1.1.42 @@ -158,14 +158,14 @@ require ( golang.org/x/net v0.9.0 golang.org/x/oauth2 v0.7.0 golang.org/x/sync v0.1.0 - golang.org/x/sys v0.7.0 + golang.org/x/sys v0.9.0 golang.org/x/text v0.9.0 golang.org/x/time v0.3.0 golang.org/x/tools v0.6.0 google.golang.org/api v0.103.0 - google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 - google.golang.org/grpc v1.51.0 - google.golang.org/protobuf v1.28.1 + google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f + google.golang.org/grpc v1.53.0 + google.golang.org/protobuf v1.29.1 gopkg.in/inf.v0 v0.9.1 gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect @@ -182,8 +182,8 @@ require ( ) require ( - cloud.google.com/go v0.105.0 - cloud.google.com/go/compute v1.14.0 + cloud.google.com/go v0.107.0 + cloud.google.com/go/compute v1.15.1 cloud.google.com/go/redis v1.10.0 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 @@ -205,8 +205,8 @@ require ( github.com/elastic/elastic-agent-libs v0.3.9 github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3 github.com/elastic/elastic-agent-system-metrics v0.6.1 - github.com/elastic/go-elasticsearch/v8 v8.8.1 - github.com/elastic/mito v1.4.0 + github.com/elastic/go-elasticsearch/v8 v8.9.0 + github.com/elastic/mito v1.5.0 github.com/elastic/toutoumomoma v0.0.0-20221026030040-594ef30cb640 github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15 github.com/google/cel-go v0.15.3 @@ -224,7 +224,7 @@ require ( require ( aqwari.net/xml v0.0.0-20210331023308-d9421b293817 // indirect - cloud.google.com/go/compute/metadata v0.2.1 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v0.8.0 // indirect cloud.google.com/go/longrunning v0.3.0 // indirect code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f // indirect diff --git a/go.sum b/go.sum index 4bdb8d57fdf..55d3696c75e 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,8 @@ cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y= -cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0 h1:qkj22L7bgkl6vIeZDlOY2po43Mx/TIa2Wsa7VR+PEww= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -44,10 +44,10 @@ cloud.google.com/go/bigquery v1.44.0 h1:Wi4dITi+cf9VYp4VH2T9O41w0kCW0uQTELq2Z6tu cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= cloud.google.com/go/bigtable v1.3.0/go.mod h1:z5EyKrPE8OQmeg4h5MNdKvuSnI9CCT49Ki3f23aBzio= -cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0= -cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= -cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= -cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datacatalog v1.8.0 h1:6kZ4RIOW/uT7QWC5SfPfq/G8sYzr/v+UOmOAxy4Z1TE= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= @@ -377,8 +377,9 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -518,14 +519,15 @@ github.com/elastic/dhcp v0.0.0-20200227161230-57ec251c7eb3 h1:lnDkqiRFKm0rxdljqr github.com/elastic/dhcp v0.0.0-20200227161230-57ec251c7eb3/go.mod h1:aPqzac6AYkipvp4hufTyMj5PDIphF3+At8zr7r51xjY= github.com/elastic/elastic-agent-autodiscover v0.6.2 h1:7P3cbMBWXjbzA80rxitQjc+PiWyZ4I4F4LqrCYgYlNc= github.com/elastic/elastic-agent-autodiscover v0.6.2/go.mod h1:yXYKFAG+Py+TcE4CCR8EAbJiYb+6Dz9sCDoWgOveqtU= -github.com/elastic/elastic-agent-client/v7 v7.1.2 h1:p6KvvDMoFCBPvchxcx9cRXpRjsDaii0m/wE3lqQxpmM= -github.com/elastic/elastic-agent-client/v7 v7.1.2/go.mod h1:G3Mk1pHXxvj3wC5FvsGUlPOsvapTB5SfrUmWiJDXT6Q= +github.com/elastic/elastic-agent-client/v7 v7.2.0 h1:WCPu7t+NGrnjzfu76FxyZpAzoQirIEBn1ne6kKHacIU= +github.com/elastic/elastic-agent-client/v7 v7.2.0/go.mod h1:9/amG2K2y2oqx39zURcc+hnqcX+nyJ1cZrLgzsgo5c0= github.com/elastic/elastic-agent-libs v0.3.9 h1:2xbZUOM20Q9ni3dkNjj8r274fub22SuLqi6SedknV7g= github.com/elastic/elastic-agent-libs v0.3.9/go.mod h1:Fy5QqIOax0EOVeQJ2l5Ux+GmJsX549Obllru5R1bHLI= github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3 h1:sb+25XJn/JcC9/VL8HX4r4QXSUq4uTNzGS2kxOE7u1U= github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3/go.mod h1:rWarFM7qYxJKsi9WcV6ONcFjH/NA3niDNpTxO+8/GVI= github.com/elastic/elastic-agent-system-metrics v0.6.1 h1:LCN1lvQTkdUuU/rKlpKyVMDU/G/I8/iZWCaW6K+mo4o= github.com/elastic/elastic-agent-system-metrics v0.6.1/go.mod h1:Bj8XM/uNKm553blQHkGNEICRLGnVEtw8yttmV5vBngA= +github.com/elastic/elastic-transport-go/v8 v8.0.0-20230329154755-1a3c63de0db6/go.mod h1:87Tcz8IVNe6rVSLdBux1o/PEItLtyabHU3naC7IoqKI= github.com/elastic/elastic-transport-go/v8 v8.3.0 h1:DJGxovyQLXGr62e9nDMPSxRyWION0Bh6d9eCFBriiHo= github.com/elastic/elastic-transport-go/v8 v8.3.0/go.mod h1:87Tcz8IVNe6rVSLdBux1o/PEItLtyabHU3naC7IoqKI= github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270 h1:cWPqxlPtir4RoQVCpGSRXmLqjEHpJKbR60rxh1nQZY4= @@ -533,8 +535,8 @@ github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270/go.mod h1:Msl1pdb github.com/elastic/glog v1.0.1-0.20210831205241-7d8b5c89dfc4/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/elastic/go-concert v0.2.0 h1:GAQrhRVXprnNjtvTP9pWJ1d4ToEA4cU5ci7TwTa20xg= github.com/elastic/go-concert v0.2.0/go.mod h1:HWjpO3IAEJUxOeaJOWXWEp7imKd27foxz9V5vegC/38= -github.com/elastic/go-elasticsearch/v8 v8.8.1 h1:/OiP5Yex40q5eWpzFVQIS8jRE7SaEZrFkG9JbE6TXtY= -github.com/elastic/go-elasticsearch/v8 v8.8.1/go.mod h1:GU1BJHO7WeamP7UhuElYwzzHtvf9SDmeVpSSy9+o6Qg= +github.com/elastic/go-elasticsearch/v8 v8.9.0 h1:8xtmYjUkqtahl50E0Bg/wjKI7K63krJrrLipbNj/fCU= +github.com/elastic/go-elasticsearch/v8 v8.9.0/go.mod h1:NGmpvohKiRHXI0Sw4fuUGn6hYOmAXlyCphKpzVBiqDE= github.com/elastic/go-libaudit/v2 v2.3.2 h1:qWNcA3nkwNEGh1UBDbDTVF55KR6SM1W2Ji1LGDqFEpw= github.com/elastic/go-libaudit/v2 v2.3.2/go.mod h1:+ZE0czqmbqtnRkl0fNgpI+HvVVRo/ZMJdcXv/PaKcOo= github.com/elastic/go-licenser v0.4.0/go.mod h1:V56wHMpmdURfibNBggaSBfqgPxyT1Tldns1i87iTEvU= @@ -564,8 +566,8 @@ github.com/elastic/gopacket v1.1.20-0.20211202005954-d412fca7f83a h1:8WfL/X6fK11 github.com/elastic/gopacket v1.1.20-0.20211202005954-d412fca7f83a/go.mod h1:riddUzxTSBpJXk3qBHtYr4qOhFhT6k/1c0E3qkQjQpA= github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= -github.com/elastic/mito v1.4.0 h1:RgosnKOfjIIBeKfSP7h1x6YHrqfBNaao8I5+BMmKXpE= -github.com/elastic/mito v1.4.0/go.mod h1:J0LW+SbpiAoiBUBEBrbH8epwNDFhWWgEWyR/9DpY04c= +github.com/elastic/mito v1.5.0 h1:637UzhwJH8XfHgusGrpL9b7sTkDE+gJ4unf1tDPDtUE= +github.com/elastic/mito v1.5.0/go.mod h1:J0LW+SbpiAoiBUBEBrbH8epwNDFhWWgEWyR/9DpY04c= github.com/elastic/ristretto v0.1.1-0.20220602190459-83b0895ca5b3 h1:ChPwRVv1RR4a0cxoGjKcyWjTEpxYfm5gydMIzo32cAw= github.com/elastic/ristretto v0.1.1-0.20220602190459-83b0895ca5b3/go.mod h1:RAy2GVV4sTWVlNMavv3xhLsk18rxhfhDnombTe6EF5c= github.com/elastic/sarama v1.19.1-0.20220310193331-ebc2b0d8eef3 h1:FzA0/n4iMt8ojGDGRoiFPSHFvvdVIvxOxyLtiFnrLBM= @@ -1176,8 +1178,8 @@ github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2 github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= -github.com/magefile/mage v1.14.0 h1:6QDX3g6z1YvJ4olPhT1wksUcSa/V0a1B+pJb73fBjyo= -github.com/magefile/mage v1.14.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg= +github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -2017,8 +2019,8 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -2261,8 +2263,8 @@ google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 h1:jmIfw8+gSvXcZSgaFAGyInDXeWzUhvYH57G/5GKMn70= -google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -2297,8 +2299,8 @@ google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -2313,8 +2315,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1 h1:7QBf+IK2gx70Ap/hDsOmam3GE0v9HicjfEdAxE62UoM= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/heartbeat/Dockerfile b/heartbeat/Dockerfile index 4f063983998..7c1608c9aad 100644 --- a/heartbeat/Dockerfile +++ b/heartbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.19.10 +FROM golang:1.19.12 RUN \ apt-get update \ @@ -10,7 +10,7 @@ RUN \ && rm -rf /var/lib/apt/lists/* # Use a virtualenv to avoid the PEP668 "externally managed environment" error caused by conflicts -# with the system Python installation. golang:1.19.10 uses Debian 12 which now enforces PEP668. +# with the system Python installation. golang:1.20.6 uses Debian 12 which now enforces PEP668. ENV VIRTUAL_ENV=/opt/venv RUN python3 -m venv $VIRTUAL_ENV ENV PATH="$VIRTUAL_ENV/bin:$PATH" diff --git a/heartbeat/_meta/config/beat.reference.yml.tmpl b/heartbeat/_meta/config/beat.reference.yml.tmpl index bfff82681ce..ec75513e3f0 100644 --- a/heartbeat/_meta/config/beat.reference.yml.tmpl +++ b/heartbeat/_meta/config/beat.reference.yml.tmpl @@ -10,7 +10,7 @@ ############################# Heartbeat ###################################### -# Define a directory to load monitor definitions from. Definitions take the form +# Define a directory from which to load monitor definitions. Definitions take the form # of individual yaml files. heartbeat.config.monitors: # Directory + glob pattern to search for configuration files @@ -25,7 +25,7 @@ heartbeat.monitors: - type: icmp # monitor type `icmp` (requires root) uses ICMP Echo Request to ping # configured hosts - # ID used to uniquely identify this monitor in elasticsearch even if the config changes + # ID used to uniquely identify this monitor in Elasticsearch even if the config changes id: my-monitor # Human readable display name for this service in Uptime UI and elsewhere @@ -43,7 +43,7 @@ heartbeat.monitors: # List of hosts to ping hosts: ["localhost"] - # Configure IP protocol types to ping on if hostnames are configured. + # Configure IP protocol types to ping if hostnames are configured. # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. ipv4: true ipv6: true @@ -55,7 +55,7 @@ heartbeat.monitors: # Waiting duration until another ICMP Echo Request is emitted. wait: 1s - # The tags of the monitors are included in their own field with each + # The tags of the monitors are included in their field with each # transaction published. Tags make it easy to group servers by different # logical properties. #tags: ["service-X", "web-tier"] @@ -81,9 +81,9 @@ heartbeat.monitors: # How often to check for changes #reload.period: 1s -- type: tcp # monitor type `tcp`. Connect via TCP and optionally verify endpoint +- type: tcp # monitor type `tcp`. Connect via TCP and optionally verify the endpoint # by sending/receiving a custom payload - # ID used to uniquely identify this monitor in elasticsearch even if the config changes + # ID used to uniquely identify this monitor in Elasticsearch even if the config changes id: my-monitor # Human readable display name for this service in Uptime UI and elsewhere @@ -97,23 +97,23 @@ heartbeat.monitors: # configure hosts to ping. # Entries can be: - # - plain host name or IP like `localhost`: + # - plain hostname or IP like `localhost`: # Requires ports configs to be checked. If ssl is configured, - # a SSL/TLS based connection will be established. Otherwise plain tcp connection + # an SSL/TLS based connection will be established. Otherwise plain tcp connection # will be established # - hostname + port like `localhost:12345`: - # Connect to port on given host. If ssl is configured, - # a SSL/TLS based connection will be established. Otherwise plain tcp connection + # Connect to port on a given host. If ssl is configured, + # an SSL/TLS based connection will be established. Otherwise plain tcp connection # will be established # - full url syntax. `scheme://:[port]`. The `` can be one of - # `tcp`, `plain`, `ssl` and `tls`. If `tcp`, `plain` is configured, a plain + # `tcp`, `plain`, `ssl`, and `tls`. If `tcp`, `plain` is configured, a plain # tcp connection will be established, even if ssl is configured. # Using `tls`/`ssl`, an SSL connection is established. If no ssl is configured, # system defaults will be used (not supported on windows). - # If `port` is missing in url, the ports setting is required. + # If `port` is missing in url, the port setting is required. hosts: ["localhost:9200"] - # Configure IP protocol types to ping on if hostnames are configured. + # Configure IP protocol types to ping if hostnames are configured. # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. ipv4: true ipv6: true @@ -126,10 +126,10 @@ heartbeat.monitors: #timeout: 16s # Optional payload string to send to remote and expected answer. If none is - # configured, the endpoint is expected to be up if connection attempt was + # configured, the endpoint is expected to be up if a connection attempt was # successful. If only `send_string` is configured, any response will be # accepted as ok. If only `receive_string` is configured, no payload will be - # send, but client expects to receive expected payload on connect. + # send, but the client expects to receive the expected payload on connect. #check: #send: '' #receive: '' @@ -159,8 +159,8 @@ heartbeat.monitors: # Set to true to publish fields with null values in events. #keep_null: false -- type: http # monitor type `http`. Connect via HTTP an optionally verify response - # ID used to uniquely identify this monitor in elasticsearch even if the config changes +- type: http # monitor type `http`. Connect via HTTP and optionally verify the response + # ID used to uniquely identify this monitor in Elasticsearch even if the config changes. id: my-http-monitor # Human readable display name for this service in Uptime UI and elsewhere @@ -170,12 +170,12 @@ heartbeat.monitors: #enabled: true # Configure task schedule - schedule: '@every 5s' # every 5 seconds from start of beat + schedule: '@every 5s' # every 5 seconds from the start of beat # Configure URLs to ping urls: ["http://localhost:9200"] - # Configure IP protocol types to ping on if hostnames are configured. + # Configure IP protocol types to ping if hostnames are configured. # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. ipv4: true ipv6: true @@ -191,7 +191,7 @@ heartbeat.monitors: #username: '' #password: '' - # TLS/SSL connection settings for use with HTTPS endpoint. If not configured + # TLS/SSL connection settings for use with HTTPS endpoint. If not configured, # system defaults will be used. #ssl: # Certificate Authorities @@ -202,7 +202,7 @@ heartbeat.monitors: # Request settings: #check.request: - # Configure HTTP method to use. Only 'HEAD', 'GET' and 'POST' methods are allowed. + # Configure HTTP method to use. Only 'HEAD', 'GET', and 'POST' methods are allowed. #method: "GET" # Dictionary of additional HTTP headers to send: @@ -247,17 +247,17 @@ heartbeat.monitors: #keep_null: false heartbeat.scheduler: - # Limit number of concurrent tasks executed by heartbeat. The task limit if + # Limit the number of concurrent tasks executed by heartbeat. The task limit if # disabled if set to 0. The default is 0. #limit: 0 - # Set the scheduler it's time zone + # Set the scheduler to its time zone #location: '' heartbeat.jobs: # Limit the number of concurrent monitors executed by heartbeat. This differs from # heartbeat.scheduler.limit in that it maps to individual monitors rather than the - # subtasks of monitors. For non-browser monitors a subtask usually corresponds to a + # subtasks of monitors. For non-browser monitors, a subtask usually corresponds to a # single file descriptor. # This feature is most useful for the browser type #browser.limit: 1 diff --git a/heartbeat/_meta/config/beat.yml.tmpl b/heartbeat/_meta/config/beat.yml.tmpl index 30a3660ef51..0d720317fb7 100644 --- a/heartbeat/_meta/config/beat.yml.tmpl +++ b/heartbeat/_meta/config/beat.yml.tmpl @@ -9,7 +9,7 @@ ############################# Heartbeat ###################################### -# Define a directory to load monitor definitions from. Definitions take the form +# Define a directory from which to load monitor definitions. Definitions take the form # of individual yaml files. heartbeat.config.monitors: # Directory + glob pattern to search for configuration files @@ -22,13 +22,13 @@ heartbeat.config.monitors: # Configure monitors inline heartbeat.monitors: - type: http - # Set enabled to true (or delete the following line) to enable this example monitor + # Set enabled to true (or delete the following line) to enable this monitor enabled: false - # ID used to uniquely identify this monitor in elasticsearch even if the config changes + # ID used to uniquely identify this monitor in Elasticsearch even if the config changes id: my-monitor # Human readable display name for this service in Uptime UI and elsewhere name: My Monitor - # List or urls to query + # List of URLs to query urls: ["http://localhost:9200"] # Configure task schedule schedule: '@every 10s' diff --git a/heartbeat/beater/heartbeat.go b/heartbeat/beater/heartbeat.go index 26a58af7301..71aae20668a 100644 --- a/heartbeat/beater/heartbeat.go +++ b/heartbeat/beater/heartbeat.go @@ -44,7 +44,6 @@ import ( "github.com/elastic/beats/v7/libbeat/common/reload" "github.com/elastic/beats/v7/libbeat/esleg/eslegclient" "github.com/elastic/beats/v7/libbeat/management" - "github.com/elastic/beats/v7/libbeat/publisher/pipeline" ) // Heartbeat represents the root datastructure of this beat. @@ -117,17 +116,8 @@ func New(b *beat.Beat, rawConfig *conf.C) (beat.Beater, error) { sched := scheduler.Create(limit, hbregistry.SchedulerRegistry, location, jobConfig, parsedConfig.RunOnce) - pipelineClientFactory := func(p beat.Pipeline) (pipeline.ISyncClient, error) { - if parsedConfig.RunOnce { - client, err := pipeline.NewSyncClient(logp.L(), p, beat.ClientConfig{}) - if err != nil { - return nil, fmt.Errorf("could not create pipeline sync client for run_once: %w", err) - } - return client, nil - } else { - client, err := p.Connect() - return monitors.SyncPipelineClientAdaptor{C: client}, err - } + pipelineClientFactory := func(p beat.Pipeline) (beat.Client, error) { + return p.Connect() } bt := &Heartbeat{ @@ -160,23 +150,33 @@ func (bt *Heartbeat) Run(b *beat.Beat) error { bt.trace.Start() defer bt.trace.Close() + // Adapt local pipeline to synchronized mode if run_once is enabled + pipeline := b.Publisher + var pipelineWrapper monitors.PipelineWrapper = &monitors.NoopPipelineWrapper{} + if bt.config.RunOnce { + sync := &monitors.SyncPipelineWrapper{} + + pipeline = monitors.WithSyncPipelineWrapper(pipeline, sync) + pipelineWrapper = sync + } + logp.L().Info("heartbeat is running! Hit CTRL-C to stop it.") groups, _ := syscall.Getgroups() logp.L().Info("Effective user/group ids: %d/%d, with groups: %v", syscall.Geteuid(), syscall.Getegid(), groups) + waitMonitors := monitors.NewSignalWait() + // It is important this appear before we check for run once mode // In run once mode we depend on these monitors being loaded, but not other more // dynamic types. - stopStaticMonitors, err := bt.RunStaticMonitors(b) + stopStaticMonitors, err := bt.RunStaticMonitors(b, pipeline) if err != nil { return err } defer stopStaticMonitors() if bt.config.RunOnce { - bt.scheduler.WaitForRunOnce() - logp.L().Info("Ending run_once run") - return nil + waitMonitors.Add(monitors.WithLog(bt.scheduler.WaitForRunOnce, "Ending run_once run.")) } if b.Manager.Enabled() { @@ -211,20 +211,34 @@ func (bt *Heartbeat) Run(b *beat.Beat) error { defer bt.scheduler.Stop() - <-bt.done + // Wait until run_once ends or bt is being shut down + waitMonitors.AddChan(bt.done) + waitMonitors.Wait() - if err != nil { - logp.L().Errorf("could not write trace stop event: %s", err) + logp.L().Info("Shutting down, waiting for output to complete") + + // Due to defer's LIFO execution order, waitPublished.Wait() has to be + // located _after_ b.Manager.Stop() or else it will exit early + waitPublished := monitors.NewSignalWait() + defer waitPublished.Wait() + + // Three possible events: global beat, run_once pipeline done and publish timeout + waitPublished.AddChan(bt.done) + waitPublished.Add(monitors.WithLog(pipelineWrapper.Wait, "shutdown: finished publishing events.")) + if bt.config.PublishTimeout > 0 { + logp.Info("shutdown: output timer started. Waiting for max %v.", bt.config.PublishTimeout) + waitPublished.Add(monitors.WithLog(monitors.WaitDuration(bt.config.PublishTimeout), + "shutdown: timed out waiting for pipeline to publish events.")) } - logp.L().Info("Shutting down.") + return nil } // RunStaticMonitors runs the `heartbeat.monitors` portion of the yaml config if present. -func (bt *Heartbeat) RunStaticMonitors(b *beat.Beat) (stop func(), err error) { +func (bt *Heartbeat) RunStaticMonitors(b *beat.Beat, pipeline beat.Pipeline) (stop func(), err error) { runners := make([]cfgfile.Runner, 0, len(bt.config.Monitors)) for _, cfg := range bt.config.Monitors { - created, err := bt.monitorFactory.Create(b.Publisher, cfg) + created, err := bt.monitorFactory.Create(pipeline, cfg) if err != nil { if errors.Is(err, monitors.ErrMonitorDisabled) { logp.L().Info("skipping disabled monitor: %s", err) diff --git a/heartbeat/config/config.go b/heartbeat/config/config.go index 4aaccabd1e4..8a396d46fe8 100644 --- a/heartbeat/config/config.go +++ b/heartbeat/config/config.go @@ -41,6 +41,7 @@ type LocationWithID struct { // Config defines the structure of heartbeat.yml. type Config struct { RunOnce bool `config:"run_once"` + PublishTimeout time.Duration `config:"publish_timeout"` Monitors []*conf.C `config:"monitors"` ConfigMonitors *conf.C `config:"config.monitors"` Scheduler Scheduler `config:"scheduler"` diff --git a/heartbeat/docs/heartbeat-options.asciidoc b/heartbeat/docs/heartbeat-options.asciidoc index 3db44656302..65ca08a938f 100644 --- a/heartbeat/docs/heartbeat-options.asciidoc +++ b/heartbeat/docs/heartbeat-options.asciidoc @@ -133,3 +133,21 @@ heartbeat.run_once: true heartbeat.monitors: # your monitor config here... ---------------------------------------------------------------------- + +[float] +[[publish-timeout]] +=== Publish timeout (Experimental) + +You can configure {beatname_uc} to exit after an elapsed timeout if unable to publish pending events. +This is an experimental feature and is subject to change. + +Note, the `heartbeat.run_once` flag is required for `publish_timeout` to take effect. + +[source,yaml] +---------------------------------------------------------------------- +# heartbeat.yml +heartbeat.publish_timeout: 30s +heartbeat.run_once: true +heartbeat.monitors: +# your monitor config here... +---------------------------------------------------------------------- diff --git a/heartbeat/docs/troubleshooting.asciidoc b/heartbeat/docs/troubleshooting.asciidoc index 12c429715df..ebed4a6863f 100644 --- a/heartbeat/docs/troubleshooting.asciidoc +++ b/heartbeat/docs/troubleshooting.asciidoc @@ -8,6 +8,7 @@ following tips: * <> * <> +* <> * <> //sets block macro for getting-help.asciidoc included in next section @@ -26,5 +27,14 @@ include::{libbeat-dir}/getting-help.asciidoc[] include::{libbeat-dir}/debugging.asciidoc[] +//sets block macro for metrics-in-logs.asciidoc included in next section +[id="understand-{beatname_lc}-logs"] +[role="xpack"] +== Understand metrics in {beatname_uc} logs +++++ +Understand logged metrics +++++ + +include::{libbeat-dir}/metrics-in-logs.asciidoc[] diff --git a/heartbeat/heartbeat.reference.yml b/heartbeat/heartbeat.reference.yml index 1ab668e2bd4..b8281727026 100644 --- a/heartbeat/heartbeat.reference.yml +++ b/heartbeat/heartbeat.reference.yml @@ -10,7 +10,7 @@ ############################# Heartbeat ###################################### -# Define a directory to load monitor definitions from. Definitions take the form +# Define a directory from which to load monitor definitions. Definitions take the form # of individual yaml files. heartbeat.config.monitors: # Directory + glob pattern to search for configuration files @@ -25,7 +25,7 @@ heartbeat.monitors: - type: icmp # monitor type `icmp` (requires root) uses ICMP Echo Request to ping # configured hosts - # ID used to uniquely identify this monitor in elasticsearch even if the config changes + # ID used to uniquely identify this monitor in Elasticsearch even if the config changes id: my-monitor # Human readable display name for this service in Uptime UI and elsewhere @@ -43,7 +43,7 @@ heartbeat.monitors: # List of hosts to ping hosts: ["localhost"] - # Configure IP protocol types to ping on if hostnames are configured. + # Configure IP protocol types to ping if hostnames are configured. # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. ipv4: true ipv6: true @@ -55,7 +55,7 @@ heartbeat.monitors: # Waiting duration until another ICMP Echo Request is emitted. wait: 1s - # The tags of the monitors are included in their own field with each + # The tags of the monitors are included in their field with each # transaction published. Tags make it easy to group servers by different # logical properties. #tags: ["service-X", "web-tier"] @@ -81,9 +81,9 @@ heartbeat.monitors: # How often to check for changes #reload.period: 1s -- type: tcp # monitor type `tcp`. Connect via TCP and optionally verify endpoint +- type: tcp # monitor type `tcp`. Connect via TCP and optionally verify the endpoint # by sending/receiving a custom payload - # ID used to uniquely identify this monitor in elasticsearch even if the config changes + # ID used to uniquely identify this monitor in Elasticsearch even if the config changes id: my-monitor # Human readable display name for this service in Uptime UI and elsewhere @@ -97,23 +97,23 @@ heartbeat.monitors: # configure hosts to ping. # Entries can be: - # - plain host name or IP like `localhost`: + # - plain hostname or IP like `localhost`: # Requires ports configs to be checked. If ssl is configured, - # a SSL/TLS based connection will be established. Otherwise plain tcp connection + # an SSL/TLS based connection will be established. Otherwise plain tcp connection # will be established # - hostname + port like `localhost:12345`: - # Connect to port on given host. If ssl is configured, - # a SSL/TLS based connection will be established. Otherwise plain tcp connection + # Connect to port on a given host. If ssl is configured, + # an SSL/TLS based connection will be established. Otherwise plain tcp connection # will be established # - full url syntax. `scheme://:[port]`. The `` can be one of - # `tcp`, `plain`, `ssl` and `tls`. If `tcp`, `plain` is configured, a plain + # `tcp`, `plain`, `ssl`, and `tls`. If `tcp`, `plain` is configured, a plain # tcp connection will be established, even if ssl is configured. # Using `tls`/`ssl`, an SSL connection is established. If no ssl is configured, # system defaults will be used (not supported on windows). - # If `port` is missing in url, the ports setting is required. + # If `port` is missing in url, the port setting is required. hosts: ["localhost:9200"] - # Configure IP protocol types to ping on if hostnames are configured. + # Configure IP protocol types to ping if hostnames are configured. # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. ipv4: true ipv6: true @@ -126,10 +126,10 @@ heartbeat.monitors: #timeout: 16s # Optional payload string to send to remote and expected answer. If none is - # configured, the endpoint is expected to be up if connection attempt was + # configured, the endpoint is expected to be up if a connection attempt was # successful. If only `send_string` is configured, any response will be # accepted as ok. If only `receive_string` is configured, no payload will be - # send, but client expects to receive expected payload on connect. + # send, but the client expects to receive the expected payload on connect. #check: #send: '' #receive: '' @@ -159,8 +159,8 @@ heartbeat.monitors: # Set to true to publish fields with null values in events. #keep_null: false -- type: http # monitor type `http`. Connect via HTTP an optionally verify response - # ID used to uniquely identify this monitor in elasticsearch even if the config changes +- type: http # monitor type `http`. Connect via HTTP and optionally verify the response + # ID used to uniquely identify this monitor in Elasticsearch even if the config changes. id: my-http-monitor # Human readable display name for this service in Uptime UI and elsewhere @@ -170,12 +170,12 @@ heartbeat.monitors: #enabled: true # Configure task schedule - schedule: '@every 5s' # every 5 seconds from start of beat + schedule: '@every 5s' # every 5 seconds from the start of beat # Configure URLs to ping urls: ["http://localhost:9200"] - # Configure IP protocol types to ping on if hostnames are configured. + # Configure IP protocol types to ping if hostnames are configured. # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. ipv4: true ipv6: true @@ -191,7 +191,7 @@ heartbeat.monitors: #username: '' #password: '' - # TLS/SSL connection settings for use with HTTPS endpoint. If not configured + # TLS/SSL connection settings for use with HTTPS endpoint. If not configured, # system defaults will be used. #ssl: # Certificate Authorities @@ -202,7 +202,7 @@ heartbeat.monitors: # Request settings: #check.request: - # Configure HTTP method to use. Only 'HEAD', 'GET' and 'POST' methods are allowed. + # Configure HTTP method to use. Only 'HEAD', 'GET', and 'POST' methods are allowed. #method: "GET" # Dictionary of additional HTTP headers to send: @@ -247,17 +247,17 @@ heartbeat.monitors: #keep_null: false heartbeat.scheduler: - # Limit number of concurrent tasks executed by heartbeat. The task limit if + # Limit the number of concurrent tasks executed by heartbeat. The task limit if # disabled if set to 0. The default is 0. #limit: 0 - # Set the scheduler it's time zone + # Set the scheduler to its time zone #location: '' heartbeat.jobs: # Limit the number of concurrent monitors executed by heartbeat. This differs from # heartbeat.scheduler.limit in that it maps to individual monitors rather than the - # subtasks of monitors. For non-browser monitors a subtask usually corresponds to a + # subtasks of monitors. For non-browser monitors, a subtask usually corresponds to a # single file descriptor. # This feature is most useful for the browser type #browser.limit: 1 @@ -268,10 +268,10 @@ heartbeat.jobs: # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. +# If this option is not defined, the hostname is used. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. Tags make it easy to group servers by different # logical properties. #tags: ["service-X", "web-tier"] @@ -283,7 +283,7 @@ heartbeat.jobs: # env: staging # If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields +# fields in the output document instead of being grouped under a field # sub-dictionary. Default is false. #fields_under_root: false @@ -295,7 +295,7 @@ heartbeat.jobs: #queue: # Queue type by name (default 'mem') # The memory queue will present all available events (up to the outputs - # bulk_max_size) to the output, the moment the output is ready to server + # bulk_max_size) to the output, the moment the output is ready to serve # another batch of events. #mem: # Max number of events the queue can buffer. @@ -347,7 +347,7 @@ heartbeat.jobs: # length of its retry interval each time, up to this maximum. #max_retry_interval: 30s -# Sets the maximum number of CPUs that can be executing simultaneously. The +# Sets the maximum number of CPUs that can be executed simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: @@ -468,7 +468,7 @@ heartbeat.jobs: # ignore_missing: false # fail_on_error: true # -# The following example copies the value of message to message_copied +# The following example copies the value of the message to message_copied # #processors: # - copy_fields: @@ -478,7 +478,7 @@ heartbeat.jobs: # fail_on_error: true # ignore_missing: false # -# The following example truncates the value of message to 1024 bytes +# The following example truncates the value of the message to 1024 bytes # #processors: # - truncate_fields: @@ -575,7 +575,7 @@ output.elasticsearch: # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. #index: "heartbeat-%{[agent.version]}" - # Optional ingest pipeline. By default no pipeline will be used. + # Optional ingest pipeline. By default, no pipeline will be used. #pipeline: "" # Optional HTTP path @@ -1306,14 +1306,14 @@ output.elasticsearch: # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards are disabled by default and can be enabled either by setting the -# options here, or by using the `-setup` CLI flag or the `setup` command. +# options here or by using the `-setup` CLI flag or the `setup` command. #setup.dashboards.enabled: false # The directory from where to read the dashboards. The default is the `kibana` # folder in the home path. #setup.dashboards.directory: ${path.home}/kibana -# The URL from where to download the dashboards archive. It is used instead of +# The URL from where to download the dashboard archive. It is used instead of # the directory if it has a value. #setup.dashboards.url: @@ -1410,7 +1410,7 @@ setup.template.settings: # Configure index lifecycle management (ILM) to manage the backing indices # of your data streams. -# Enable ILM support. Valid values are true, false. +# Enable ILM support. Valid values are true, or false. #setup.ilm.enabled: true # Set the lifecycle policy name. The default policy name is @@ -1565,25 +1565,25 @@ logging.files: # The name of the files where the logs are written to. #name: heartbeat - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. #rotateeverybytes: 10485760 # = 10MB - # Number of rotated log files to keep. Oldest files will be deleted first. + # Number of rotated log files to keep. The oldest files will be deleted first. #keepfiles: 7 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. #permissions: 0600 - # Enable log file rotation on time intervals in addition to size-based rotation. + # Enable log file rotation on time intervals in addition to the size-based rotation. # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h # are boundary-aligned with minutes, hours, days, weeks, months, and years as # reported by the local system clock. All other intervals are calculated from the # Unix epoch. Defaults to disabled. #interval: 0 - # Rotate existing logs on startup rather than appending to the existing + # Rotate existing logs on startup rather than appending them to the existing # file. Defaults to true. # rotateonstartup: true @@ -1611,7 +1611,7 @@ logging.files: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # In case you specify an additional path, the scheme is required: http://localhost:9200/path # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 #hosts: ["localhost:9200"] @@ -1658,7 +1658,7 @@ logging.files: # Elasticsearch after a network error. The default is 60s. #backoff.max: 60s - # Configure HTTP request timeout before failing an request to Elasticsearch. + # Configure HTTP request timeout before failing a request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. @@ -1755,15 +1755,15 @@ logging.files: # =============================== HTTP Endpoint ================================ -# Each beat can expose internal metrics through a HTTP endpoint. For security +# Each beat can expose internal metrics through an HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. -# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# Stats can be accessed through http://localhost:5066/stats. For pretty JSON output # append ?pretty to the URL. # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# The HTTP endpoint will bind to this hostname, IP address, unix socket, or named pipe. # When using IP addresses, it is recommended to only use localhost. #http.host: localhost @@ -1773,7 +1773,7 @@ logging.files: # Define which user should be owning the named pipe. #http.named_pipe.user: -# Define which the permissions that should be applied to the named pipe, use the Security +# Define which permissions should be applied to the named pipe, use the Security # Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with # `http.user`. #http.named_pipe.security_descriptor: diff --git a/heartbeat/heartbeat.yml b/heartbeat/heartbeat.yml index e76be2b3c8a..76876b9c7ff 100644 --- a/heartbeat/heartbeat.yml +++ b/heartbeat/heartbeat.yml @@ -9,7 +9,7 @@ ############################# Heartbeat ###################################### -# Define a directory to load monitor definitions from. Definitions take the form +# Define a directory from which to load monitor definitions. Definitions take the form # of individual yaml files. heartbeat.config.monitors: # Directory + glob pattern to search for configuration files @@ -22,13 +22,13 @@ heartbeat.config.monitors: # Configure monitors inline heartbeat.monitors: - type: http - # Set enabled to true (or delete the following line) to enable this example monitor + # Set enabled to true (or delete the following line) to enable this monitor enabled: false - # ID used to uniquely identify this monitor in elasticsearch even if the config changes + # ID used to uniquely identify this monitor in Elasticsearch even if the config changes id: my-monitor # Human readable display name for this service in Uptime UI and elsewhere name: My Monitor - # List or urls to query + # List of URLs to query urls: ["http://localhost:9200"] # Configure task schedule schedule: '@every 10s' @@ -53,7 +53,7 @@ setup.template.settings: # all the transactions sent by a single shipper in the web interface. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. #tags: ["service-X", "web-tier"] @@ -144,7 +144,7 @@ processors: #logging.level: debug # At debug level, you can selectively enable logging only for some components. -# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# To enable all selectors, use ["*"]. Examples of other selectors are "beat", # "publisher", "service". #logging.selectors: ["*"] @@ -162,7 +162,7 @@ processors: #monitoring.cluster_uuid: # Uncomment to send the metrics to Elasticsearch. Most settings from the -# Elasticsearch output are accepted here as well. +# Elasticsearch outputs are accepted here as well. # Note that the settings should point to your Elasticsearch *monitoring* cluster. # Any setting that is not set is automatically inherited from the Elasticsearch # output configuration, so if you have the Elasticsearch output configured such diff --git a/heartbeat/monitors/factory.go b/heartbeat/monitors/factory.go index 0ba9cbb04d4..9e6d4449d31 100644 --- a/heartbeat/monitors/factory.go +++ b/heartbeat/monitors/factory.go @@ -38,7 +38,6 @@ import ( "github.com/elastic/beats/v7/libbeat/processors/add_data_stream" "github.com/elastic/beats/v7/libbeat/processors/add_formatted_index" "github.com/elastic/beats/v7/libbeat/processors/util" - "github.com/elastic/beats/v7/libbeat/publisher/pipeline" "github.com/elastic/beats/v7/libbeat/publisher/pipetool" ) @@ -56,7 +55,7 @@ type RunnerFactory struct { beatLocation *config.LocationWithID } -type PipelineClientFactory func(pipeline beat.Pipeline) (pipeline.ISyncClient, error) +type PipelineClientFactory func(pipeline beat.Pipeline) (beat.Client, error) type publishSettings struct { // Fields and tags to add to monitor. diff --git a/heartbeat/monitors/factory_test.go b/heartbeat/monitors/factory_test.go index bce72f63ccb..e32ac671c8e 100644 --- a/heartbeat/monitors/factory_test.go +++ b/heartbeat/monitors/factory_test.go @@ -325,8 +325,11 @@ func TestDuplicateMonitorIDs(t *testing.T) { } } + c, err := mockPipeline.Connect() + require.NoError(t, err) + // Ensure that an error is returned on a bad config - _, m0Err := newMonitor(badConf, reg, mockPipeline.ConnectSync(), sched.Add, nil, nil) + _, m0Err := newMonitor(badConf, reg, c, sched.Add, nil, nil) require.Error(t, m0Err) // Would fail if the previous newMonitor didn't free the monitor.id diff --git a/heartbeat/monitors/mocks.go b/heartbeat/monitors/mocks.go index bc6695f3be9..9a5ba6a3b3a 100644 --- a/heartbeat/monitors/mocks.go +++ b/heartbeat/monitors/mocks.go @@ -43,7 +43,6 @@ import ( "github.com/elastic/beats/v7/heartbeat/scheduler" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common/atomic" - "github.com/elastic/beats/v7/libbeat/publisher/pipeline" beatversion "github.com/elastic/beats/v7/libbeat/version" ) @@ -80,9 +79,8 @@ func makeMockFactory(pluginsReg *plugin.PluginsReg) (factory *RunnerFactory, sch AddTask: sched.Add, StateLoader: monitorstate.NilStateLoader, PluginsReg: pluginsReg, - PipelineClientFactory: func(pipeline beat.Pipeline) (pipeline.ISyncClient, error) { - c, _ := pipeline.Connect() - return SyncPipelineClientAdaptor{C: c}, nil + PipelineClientFactory: func(pipeline beat.Pipeline) (beat.Client, error) { + return pipeline.Connect() }, }), sched, @@ -164,12 +162,6 @@ func (pc *MockPipeline) ConnectWith(cc beat.ClientConfig) (beat.Client, error) { return c, nil } -// Convenience function for tests -func (pc *MockPipeline) ConnectSync() pipeline.ISyncClient { - c, _ := pc.Connect() - return SyncPipelineClientAdaptor{C: c} -} - func (pc *MockPipeline) PublishedEvents() []*beat.Event { pc.mtx.Lock() defer pc.mtx.Unlock() diff --git a/heartbeat/monitors/monitor.go b/heartbeat/monitors/monitor.go index 17271086d16..3d9823ea88f 100644 --- a/heartbeat/monitors/monitor.go +++ b/heartbeat/monitors/monitor.go @@ -22,7 +22,6 @@ import ( "sync" "github.com/elastic/beats/v7/heartbeat/monitors/wrappers/monitorstate" - "github.com/elastic/beats/v7/libbeat/publisher/pipeline" "github.com/mitchellh/hashstructure" @@ -63,9 +62,9 @@ type Monitor struct { internalsMtx sync.Mutex close func() error - // pubClient accepts an ISyncClient as the lowest common denominator of client - // since async clients are a subset of sync clients - pubClient pipeline.ISyncClient + // pubClient accepts a generic beat.Client. Pipeline synchronicity is implemented + // at client wrapper-level + pubClient beat.Client // stats is the countersRecorder used to record lifecycle events // for global metrics + telemetry @@ -89,7 +88,7 @@ func checkMonitorConfig(config *conf.C, registrar *plugin.PluginsReg) error { func newMonitor( config *conf.C, registrar *plugin.PluginsReg, - pubClient pipeline.ISyncClient, + pubClient beat.Client, taskAdder scheduler.AddTask, stateLoader monitorstate.StateLoader, onStop func(*Monitor), @@ -106,7 +105,7 @@ func newMonitor( func newMonitorUnsafe( config *conf.C, registrar *plugin.PluginsReg, - pubClient pipeline.ISyncClient, + pubClient beat.Client, addTask scheduler.AddTask, stateLoader monitorstate.StateLoader, onStop func(*Monitor), diff --git a/heartbeat/monitors/monitor_test.go b/heartbeat/monitors/monitor_test.go index 560e46fcdea..3176d27a2fa 100644 --- a/heartbeat/monitors/monitor_test.go +++ b/heartbeat/monitors/monitor_test.go @@ -69,7 +69,9 @@ func testMonitorConfig(t *testing.T, conf *conf.C, eventValidator validator.Vali sched := scheduler.Create(1, monitoring.NewRegistry(), time.Local, nil, false) defer sched.Stop() - mon, err := newMonitor(conf, reg, pipel.ConnectSync(), sched.Add, nil, nil) + c, err := pipel.Connect() + require.NoError(t, err) + mon, err := newMonitor(conf, reg, c, sched.Add, nil, nil) require.NoError(t, err) mon.Start() @@ -116,7 +118,9 @@ func TestCheckInvalidConfig(t *testing.T) { sched := scheduler.Create(1, monitoring.NewRegistry(), time.Local, nil, false) defer sched.Stop() - m, err := newMonitor(serverMonConf, reg, pipel.ConnectSync(), sched.Add, nil, nil) + c, err := pipel.Connect() + require.NoError(t, err) + m, err := newMonitor(serverMonConf, reg, c, sched.Add, nil, nil) require.Error(t, err) // This could change if we decide the contract for newMonitor should always return a monitor require.Nil(t, m, "For this test to work we need a nil value for the monitor.") diff --git a/heartbeat/monitors/pipeline.go b/heartbeat/monitors/pipeline.go new file mode 100644 index 00000000000..cde1f8b9256 --- /dev/null +++ b/heartbeat/monitors/pipeline.go @@ -0,0 +1,91 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package monitors + +import ( + "sync" + + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/common/acker" + "github.com/elastic/beats/v7/libbeat/publisher/pipetool" + "github.com/elastic/elastic-agent-libs/logp" +) + +// Defines a synchronous pipeline wrapper interface +type PipelineWrapper interface { + Wait() +} + +type NoopPipelineWrapper struct { +} + +// Noop +func (n *NoopPipelineWrapper) Wait() { +} + +// Pipeline wrapper that implements synchronous op. Calling Wait() on this client will block until all +// events passed through this pipeline (and any of the linked clients) are ACKed, safe to use concurrently. +type SyncPipelineWrapper struct { + wg sync.WaitGroup +} + +// Used to wrap every client and track emitted vs acked events. +type wrappedClient struct { + wg *sync.WaitGroup + client beat.Client +} + +// returns a new pipeline with the provided SyncPipelineClientWrapper. +func WithSyncPipelineWrapper(pipeline beat.Pipeline, pw *SyncPipelineWrapper) beat.Pipeline { + pipeline = pipetool.WithACKer(pipeline, acker.TrackingCounter(func(_, total int) { + logp.L().Debugf("ack callback receives with events count of %d", total) + pw.onACK(total) + })) + + pipeline = pipetool.WithClientWrapper(pipeline, func(client beat.Client) beat.Client { + return &wrappedClient{ + wg: &pw.wg, + client: client, + } + }) + + return pipeline +} + +func (c *wrappedClient) Publish(event beat.Event) { + c.wg.Add(1) + c.client.Publish(event) +} + +func (c *wrappedClient) PublishAll(events []beat.Event) { + c.wg.Add(len(events)) + c.client.PublishAll(events) +} + +func (c *wrappedClient) Close() error { + return c.client.Close() +} + +// waits until ACK is received for every event that was sent +func (s *SyncPipelineWrapper) Wait() { + s.wg.Wait() +} + +func (s *SyncPipelineWrapper) onACK(n int) { + s.wg.Add(-1 * n) +} diff --git a/heartbeat/monitors/pipeline_test.go b/heartbeat/monitors/pipeline_test.go new file mode 100644 index 00000000000..2a7e6a24457 --- /dev/null +++ b/heartbeat/monitors/pipeline_test.go @@ -0,0 +1,126 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package monitors + +import ( + "context" + "testing" + "time" + + "github.com/elastic/beats/v7/heartbeat/eventext" + "github.com/elastic/beats/v7/heartbeat/monitors/jobs" + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/elastic-agent-libs/mapstr" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSyncPipelineWrapper(t *testing.T) { + defineJob := func(fields mapstr.M) func(event *beat.Event) (j []jobs.Job, e error) { + return func(event *beat.Event) (j []jobs.Job, e error) { + eventext.MergeEventFields(event, fields) + return nil, nil + } + } + simpleJob := defineJob(mapstr.M{"foo": "bar"}) + + testCases := []struct { + name string + job jobs.Job + acks int + }{ + { + "simple", + simpleJob, + 1, + }, + { + "one cont", + func(event *beat.Event) (j []jobs.Job, e error) { + _, _ = simpleJob(event) + return []jobs.Job{simpleJob}, nil + }, + 2, + }, + { + "multiple conts", + func(event *beat.Event) (j []jobs.Job, e error) { + _, _ = simpleJob(event) + return []jobs.Job{ + defineJob(mapstr.M{"baz": "bot"}), + defineJob(mapstr.M{"blah": "blargh"}), + }, nil + }, + 3, + }, + { + "cancelled cont", + func(event *beat.Event) (j []jobs.Job, e error) { + eventext.CancelEvent(event) + return []jobs.Job{simpleJob}, nil + }, + 1, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + done := make(chan bool) + pipel := &MockPipeline{} + sync := &SyncPipelineWrapper{} + wrapped := WithSyncPipelineWrapper(pipel, sync) + + client, err := wrapped.Connect() + require.NoError(t, err) + queue := runPublishJob(tc.job, client) + for { + if len(queue) == 0 { + break + } + tf := queue[0] + queue = queue[1:] + conts := tf(context.Background()) + queue = append(queue, conts...) + } + err = client.Close() + require.NoError(t, err) + + go func() { + sync.Wait() + done <- true + }() + + wait := time.After(1000 * time.Millisecond) + select { + case <-done: + assert.Fail(t, "pipeline exited before events were published") + case <-wait: + } + + sync.onACK(tc.acks) + + wait = time.After(1000 * time.Millisecond) + select { + case <-done: + case <-wait: + assert.Fail(t, "pipeline exceeded timeout after every event acked") + } + }) + } +} diff --git a/heartbeat/monitors/signalwait.go b/heartbeat/monitors/signalwait.go new file mode 100644 index 00000000000..5832ac9a3af --- /dev/null +++ b/heartbeat/monitors/signalwait.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package monitors + +import ( + "time" + + "github.com/elastic/elastic-agent-libs/logp" +) + +type signalWait struct { + count int // number of potential 'alive' signals + signals chan struct{} +} + +type signaler func() + +func NewSignalWait() *signalWait { + return &signalWait{ + signals: make(chan struct{}, 1), + } +} + +func (s *signalWait) Wait() { + if s.count == 0 { + return + } + + <-s.signals + s.count-- +} + +func (s *signalWait) Add(fn signaler) { + s.count++ + go func() { + fn() + var v struct{} + s.signals <- v + }() +} + +func (s *signalWait) AddChan(c <-chan struct{}) { + s.Add(WaitChannel(c)) +} + +func (s *signalWait) AddTimer(t *time.Timer) { + s.Add(WaitTimer(t)) +} + +func (s *signalWait) AddTimeout(d time.Duration) { + s.Add(WaitDuration(d)) +} + +func (s *signalWait) Signal() { + s.Add(func() {}) +} + +func WaitChannel(c <-chan struct{}) signaler { + return func() { <-c } +} + +func WaitTimer(t *time.Timer) signaler { + return func() { <-t.C } +} + +func WaitDuration(d time.Duration) signaler { + return WaitTimer(time.NewTimer(d)) +} + +func WithLog(s signaler, msg string) signaler { + return func() { + s() + logp.L().Infof("%v", msg) + } +} diff --git a/heartbeat/monitors/signalwait_test.go b/heartbeat/monitors/signalwait_test.go new file mode 100644 index 00000000000..4e3ecd7b5d6 --- /dev/null +++ b/heartbeat/monitors/signalwait_test.go @@ -0,0 +1,235 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package monitors + +import ( + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestSimpleWait(t *testing.T) { + tests := map[string]struct { + number int + }{ + "one wait signals": { + number: 1, + }, + "50 wait signals": { + number: 50, + }, + "100 wait signals": { + number: 100, + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + done := make(chan bool) + + gl := sync.WaitGroup{} + gl.Add(1) // Lock routines indefinitely + + addFn := func(s *signalWait) { + s.Add(func() { + gl.Wait() + done <- true // Just in case, shared channel + }) + } + + signalWait := NewSignalWait() + + for i := 0; i < tt.number; i++ { + addFn(signalWait) + } + + go func() { + signalWait.Wait() + done <- true + }() + + wait := time.After(500 * time.Millisecond) + select { + case <-done: + assert.Fail(t, "found early exit signal") + case <-wait: + } + + signalWait.Add(func() {}) + + wait = time.After(500 * time.Millisecond) + select { + case <-done: + case <-wait: + assert.Fail(t, "signal did not exit on time") + } + }) + } +} + +func TestChannelWait(t *testing.T) { + tests := map[string]struct { + number int + }{ + "one wait signals": { + number: 1, + }, + "50 wait signals": { + number: 50, + }, + "100 wait signals": { + number: 100, + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + done := make(chan bool) + gl := make(chan struct{}) + signalWait := NewSignalWait() + + for i := 0; i < tt.number; i++ { + signalWait.AddChan(gl) + } + + go func() { + signalWait.Wait() + done <- true + }() + + wait := time.After(500 * time.Millisecond) + select { + case <-done: + assert.Fail(t, "found early exit signal") + case <-wait: + } + + d := make(chan struct{}) + signalWait.AddChan(d) + d <- struct{}{} + + wait = time.After(500 * time.Millisecond) + select { + case <-done: + case <-wait: + assert.Fail(t, "signal did not exit on time") + } + }) + } +} + +func TestTimeoutWait(t *testing.T) { + tests := map[string]struct { + number int + }{ + "one wait signals": { + number: 1, + }, + "50 wait signals": { + number: 50, + }, + "100 wait signals": { + number: 100, + }, + } + + for name, tt := range tests { + + t.Run(name, func(t *testing.T) { + done := make(chan bool) + + signalWait := NewSignalWait() + + for i := 0; i < tt.number; i++ { + signalWait.AddTimer(time.NewTimer(time.Hour)) + } + + go func() { + signalWait.Wait() + done <- true + }() + + wait := time.After(500 * time.Millisecond) + select { + case <-done: + assert.Fail(t, "found early exit signal") + case <-wait: + } + + signalWait.AddTimer(time.NewTimer(time.Microsecond)) + + wait = time.After(500 * time.Millisecond) + select { + case <-done: + case <-wait: + assert.Fail(t, "signal did not exit on time") + } + }) + } +} +func TestDurationWait(t *testing.T) { + tests := map[string]struct { + number int + }{ + "one wait signals": { + number: 1, + }, + "50 wait signals": { + number: 50, + }, + "100 wait signals": { + number: 100, + }, + } + + for name, tt := range tests { + + t.Run(name, func(t *testing.T) { + done := make(chan bool) + + signalWait := NewSignalWait() + + for i := 0; i < tt.number; i++ { + signalWait.AddTimeout(time.Hour) + } + + go func() { + signalWait.Wait() + done <- true + }() + + wait := time.After(500 * time.Millisecond) + select { + case <-done: + assert.Fail(t, "found early exit signal") + case <-wait: + } + + signalWait.AddTimeout(time.Microsecond) + + wait = time.After(500 * time.Millisecond) + select { + case <-done: + case <-wait: + assert.Fail(t, "signal did not exit on time") + } + }) + } +} diff --git a/heartbeat/monitors/task.go b/heartbeat/monitors/task.go index 35563d7cd36..038a6c5baa2 100644 --- a/heartbeat/monitors/task.go +++ b/heartbeat/monitors/task.go @@ -21,8 +21,6 @@ import ( "context" "fmt" - "github.com/elastic/beats/v7/libbeat/publisher/pipeline" - "github.com/elastic/beats/v7/heartbeat/eventext" "github.com/elastic/beats/v7/heartbeat/monitors/jobs" "github.com/elastic/beats/v7/heartbeat/scheduler" @@ -39,7 +37,7 @@ type configuredJob struct { config jobConfig monitor *Monitor cancelFn context.CancelFunc - pubClient pipeline.ISyncClient + pubClient beat.Client } func newConfiguredJob(job jobs.Job, config jobConfig, monitor *Monitor) *configuredJob { @@ -76,7 +74,7 @@ func (t *configuredJob) makeSchedulerTaskFunc() scheduler.TaskFunc { } // Start schedules this configuredJob for execution. -func (t *configuredJob) Start(pubClient pipeline.ISyncClient) { +func (t *configuredJob) Start(pubClient beat.Client) { var err error t.pubClient = pubClient @@ -86,8 +84,8 @@ func (t *configuredJob) Start(pubClient pipeline.ISyncClient) { return } - tf := t.makeSchedulerTaskFunc() - t.cancelFn, err = t.monitor.addTask(t.config.Schedule, t.monitor.stdFields.ID, tf, t.config.Type, pubClient.Wait) + tf := t.makeSchedulerTaskFunc() //nolint:typecheck // this is used, linter just doesn't seem to see it + t.cancelFn, err = t.monitor.addTask(t.config.Schedule, t.monitor.stdFields.ID, tf, t.config.Type) if err != nil { logp.L().Info("could not start monitor: %v", err) } @@ -103,7 +101,7 @@ func (t *configuredJob) Stop() { } } -func runPublishJob(job jobs.Job, pubClient pipeline.ISyncClient) []scheduler.TaskFunc { +func runPublishJob(job jobs.Job, pubClient beat.Client) []scheduler.TaskFunc { event := &beat.Event{ Fields: mapstr.M{}, } @@ -125,10 +123,10 @@ func runPublishJob(job jobs.Job, pubClient pipeline.ISyncClient) []scheduler.Tas Meta: event.Meta.Clone(), Fields: event.Fields.Clone(), } - _ = pubClient.Publish(clone) + pubClient.Publish(clone) } else { // no clone needed if no continuations - _ = pubClient.Publish(*event) + pubClient.Publish(*event) } } diff --git a/heartbeat/monitors/task_test.go b/heartbeat/monitors/task_test.go index f2fddccc4e6..37a7f421442 100644 --- a/heartbeat/monitors/task_test.go +++ b/heartbeat/monitors/task_test.go @@ -96,7 +96,8 @@ func Test_runPublishJob(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { pipel := &MockPipeline{} - client := pipel.ConnectSync() + client, err := pipel.Connect() + require.NoError(t, err) queue := runPublishJob(tc.job, client) for { if len(queue) == 0 { @@ -107,8 +108,7 @@ func Test_runPublishJob(t *testing.T) { conts := tf(context.Background()) queue = append(queue, conts...) } - client.Wait() - err := client.Close() + err = client.Close() require.NoError(t, err) require.Len(t, pipel.PublishedEvents(), len(tc.validators)) diff --git a/heartbeat/scheduler/scheduler.go b/heartbeat/scheduler/scheduler.go index 87f25de1af5..eb9ba2f463c 100644 --- a/heartbeat/scheduler/scheduler.go +++ b/heartbeat/scheduler/scheduler.go @@ -161,11 +161,11 @@ func (s *Scheduler) WaitForRunOnce() { // has already stopped. var ErrAlreadyStopped = errors.New("attempted to add job to already stopped scheduler") -type AddTask func(sched Schedule, id string, entrypoint TaskFunc, jobType string, waitForPublish func()) (removeFn context.CancelFunc, err error) +type AddTask func(sched Schedule, id string, entrypoint TaskFunc, jobType string) (removeFn context.CancelFunc, err error) // Add adds the given TaskFunc to the current scheduler. Will return an error if the scheduler // is done. -func (s *Scheduler) Add(sched Schedule, id string, entrypoint TaskFunc, jobType string, waitForPublish func()) (removeFn context.CancelFunc, err error) { +func (s *Scheduler) Add(sched Schedule, id string, entrypoint TaskFunc, jobType string) (removeFn context.CancelFunc, err error) { if errors.Is(s.ctx.Err(), context.Canceled) { return nil, ErrAlreadyStopped } @@ -193,7 +193,6 @@ func (s *Scheduler) Add(sched Schedule, id string, entrypoint TaskFunc, jobType s.stats.activeJobs.Dec() if s.runOnce { - waitForPublish() s.runOnceWg.Done() } else { // Schedule the next run diff --git a/heartbeat/scheduler/scheduler_test.go b/heartbeat/scheduler/scheduler_test.go index 867a50c2962..a133f8cd8e4 100644 --- a/heartbeat/scheduler/scheduler_test.go +++ b/heartbeat/scheduler/scheduler_test.go @@ -93,7 +93,7 @@ func TestSchedulerRun(t *testing.T) { return nil } return []TaskFunc{cont} - }), "http", nil) + }), "http") require.NoError(t, err) removedEvents := uint32(1) @@ -109,7 +109,7 @@ func TestSchedulerRun(t *testing.T) { } // Attempt to execute this twice to see if remove() had any effect removeMtx.Lock() - remove, err = s.Add(testSchedule{}, "removed", testTaskTimes(removedEvents+1, testFn), "http", nil) + remove, err = s.Add(testSchedule{}, "removed", testTaskTimes(removedEvents+1, testFn), "http") require.NoError(t, err) require.NotNil(t, remove) removeMtx.Unlock() @@ -122,7 +122,7 @@ func TestSchedulerRun(t *testing.T) { return nil } return []TaskFunc{cont} - }), "http", nil) + }), "http") require.NoError(t, err) received := make([]string, 0) @@ -160,7 +160,6 @@ func TestScheduler_WaitForRunOnce(t *testing.T) { defer s.Stop() executed := new(uint32) - waits := new(uint32) _, err := s.Add(testSchedule{0}, "runOnce", func(_ context.Context) []TaskFunc { cont := func(_ context.Context) []TaskFunc { @@ -170,12 +169,11 @@ func TestScheduler_WaitForRunOnce(t *testing.T) { return nil } return []TaskFunc{cont} - }, "http", func() { atomic.AddUint32(waits, 1) }) + }, "http") require.NoError(t, err) s.WaitForRunOnce() require.Equal(t, uint32(1), atomic.LoadUint32(executed)) - require.Equal(t, uint32(1), atomic.LoadUint32(waits)) } func TestScheduler_Stop(t *testing.T) { @@ -188,7 +186,7 @@ func TestScheduler_Stop(t *testing.T) { _, err := s.Add(testSchedule{}, "testPostStop", testTaskTimes(1, func(_ context.Context) []TaskFunc { executed <- struct{}{} return nil - }), "http", nil) + }), "http") assert.Equal(t, ErrAlreadyStopped, err) } @@ -287,7 +285,7 @@ func BenchmarkScheduler(b *testing.B) { _, err := s.Add(sched, "testPostStop", func(_ context.Context) []TaskFunc { executed <- struct{}{} return nil - }, "http", nil) + }, "http") assert.NoError(b, err) } diff --git a/libbeat/_meta/config.yml.tmpl b/libbeat/_meta/config.yml.tmpl index 5422d8a1555..978d3331dff 100644 --- a/libbeat/_meta/config.yml.tmpl +++ b/libbeat/_meta/config.yml.tmpl @@ -5,7 +5,7 @@ # all the transactions sent by a single shipper in the web interface. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. #tags: ["service-X", "web-tier"] @@ -25,8 +25,8 @@ # options here or by using the `setup` command. #setup.dashboards.enabled: false -# The URL from where to download the dashboards archive. By default this URL -# has a value which is computed based on the Beat name and version. For released +# The URL from where to download the dashboard archive. By default, this URL +# has a value that is computed based on the Beat name and version. For released # versions, this URL points to the dashboard archive on the artifacts.elastic.co # website. #setup.dashboards.url: @@ -125,7 +125,7 @@ processors: #logging.level: debug # At debug level, you can selectively enable logging only for some components. -# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# To enable all selectors, use ["*"]. Examples of other selectors are "beat", # "publisher", "service". #logging.selectors: ["*"] @@ -143,7 +143,7 @@ processors: #monitoring.cluster_uuid: # Uncomment to send the metrics to Elasticsearch. Most settings from the -# Elasticsearch output are accepted here as well. +# Elasticsearch outputs are accepted here as well. # Note that the settings should point to your Elasticsearch *monitoring* cluster. # Any setting that is not set is automatically inherited from the Elasticsearch # output configuration, so if you have the Elasticsearch output configured such diff --git a/libbeat/_meta/config/general.reference.yml.tmpl b/libbeat/_meta/config/general.reference.yml.tmpl index b56988015b9..382c3f13e31 100644 --- a/libbeat/_meta/config/general.reference.yml.tmpl +++ b/libbeat/_meta/config/general.reference.yml.tmpl @@ -2,10 +2,10 @@ # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. +# If this option is not defined, the hostname is used. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. Tags make it easy to group servers by different # logical properties. #tags: ["service-X", "web-tier"] @@ -17,7 +17,7 @@ # env: staging # If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields +# fields in the output document instead of being grouped under a field # sub-dictionary. Default is false. #fields_under_root: false @@ -29,7 +29,7 @@ #queue: # Queue type by name (default 'mem') # The memory queue will present all available events (up to the outputs - # bulk_max_size) to the output, the moment the output is ready to server + # bulk_max_size) to the output, the moment the output is ready to serve # another batch of events. #mem: # Max number of events the queue can buffer. @@ -81,6 +81,6 @@ # length of its retry interval each time, up to this maximum. #max_retry_interval: 30s -# Sets the maximum number of CPUs that can be executing simultaneously. The +# Sets the maximum number of CPUs that can be executed simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: diff --git a/libbeat/_meta/config/general.yml.tmpl b/libbeat/_meta/config/general.yml.tmpl index 1c85044c1b9..26ef952ee19 100644 --- a/libbeat/_meta/config/general.yml.tmpl +++ b/libbeat/_meta/config/general.yml.tmpl @@ -4,7 +4,7 @@ # all the transactions sent by a single shipper in the web interface. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. #tags: ["service-X", "web-tier"] diff --git a/libbeat/_meta/config/http.reference.yml.tmpl b/libbeat/_meta/config/http.reference.yml.tmpl index 24b09ccd446..35c8e5c2014 100644 --- a/libbeat/_meta/config/http.reference.yml.tmpl +++ b/libbeat/_meta/config/http.reference.yml.tmpl @@ -1,14 +1,14 @@ {{header "HTTP Endpoint"}} -# Each beat can expose internal metrics through a HTTP endpoint. For security +# Each beat can expose internal metrics through an HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. -# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# Stats can be accessed through http://localhost:5066/stats. For pretty JSON output # append ?pretty to the URL. # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# The HTTP endpoint will bind to this hostname, IP address, unix socket, or named pipe. # When using IP addresses, it is recommended to only use localhost. #http.host: localhost @@ -18,7 +18,7 @@ # Define which user should be owning the named pipe. #http.named_pipe.user: -# Define which the permissions that should be applied to the named pipe, use the Security +# Define which permissions should be applied to the named pipe, use the Security # Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with # `http.user`. #http.named_pipe.security_descriptor: diff --git a/libbeat/_meta/config/logging.reference.yml.tmpl b/libbeat/_meta/config/logging.reference.yml.tmpl index f4ca435be01..660bbb73a02 100644 --- a/libbeat/_meta/config/logging.reference.yml.tmpl +++ b/libbeat/_meta/config/logging.reference.yml.tmpl @@ -46,24 +46,24 @@ logging.files: # The name of the files where the logs are written to. #name: {{.BeatName}} - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. #rotateeverybytes: 10485760 # = 10MB - # Number of rotated log files to keep. Oldest files will be deleted first. + # Number of rotated log files to keep. The oldest files will be deleted first. #keepfiles: 7 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. #permissions: 0600 - # Enable log file rotation on time intervals in addition to size-based rotation. + # Enable log file rotation on time intervals in addition to the size-based rotation. # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h # are boundary-aligned with minutes, hours, days, weeks, months, and years as # reported by the local system clock. All other intervals are calculated from the # Unix epoch. Defaults to disabled. #interval: 0 - # Rotate existing logs on startup rather than appending to the existing + # Rotate existing logs on startup rather than appending them to the existing # file. Defaults to true. # rotateonstartup: true diff --git a/libbeat/_meta/config/logging.yml.tmpl b/libbeat/_meta/config/logging.yml.tmpl index 7994c7ce669..00227ad0cdf 100644 --- a/libbeat/_meta/config/logging.yml.tmpl +++ b/libbeat/_meta/config/logging.yml.tmpl @@ -5,6 +5,6 @@ #logging.level: debug # At debug level, you can selectively enable logging only for some components. -# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# To enable all selectors, use ["*"]. Examples of other selectors are "beat", # "publisher", "service". #logging.selectors: ["*"] diff --git a/libbeat/_meta/config/monitoring.reference.yml.tmpl b/libbeat/_meta/config/monitoring.reference.yml.tmpl index c40ad0d0c29..85611c48f0d 100644 --- a/libbeat/_meta/config/monitoring.reference.yml.tmpl +++ b/libbeat/_meta/config/monitoring.reference.yml.tmpl @@ -22,7 +22,7 @@ # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # In case you specify an additional path, the scheme is required: http://localhost:9200/path # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 #hosts: ["localhost:9200"] @@ -69,7 +69,7 @@ # Elasticsearch after a network error. The default is 60s. #backoff.max: 60s - # Configure HTTP request timeout before failing an request to Elasticsearch. + # Configure HTTP request timeout before failing a request to Elasticsearch. #timeout: 90 {{include "ssl.reference.yml.tmpl" . | indent 2 }} diff --git a/libbeat/_meta/config/monitoring.yml.tmpl b/libbeat/_meta/config/monitoring.yml.tmpl index 6253cb167d5..3c8fd0ba0de 100644 --- a/libbeat/_meta/config/monitoring.yml.tmpl +++ b/libbeat/_meta/config/monitoring.yml.tmpl @@ -12,7 +12,7 @@ #monitoring.cluster_uuid: # Uncomment to send the metrics to Elasticsearch. Most settings from the -# Elasticsearch output are accepted here as well. +# Elasticsearch outputs are accepted here as well. # Note that the settings should point to your Elasticsearch *monitoring* cluster. # Any setting that is not set is automatically inherited from the Elasticsearch # output configuration, so if you have the Elasticsearch output configured such diff --git a/libbeat/_meta/config/output-elasticsearch.reference.yml.tmpl b/libbeat/_meta/config/output-elasticsearch.reference.yml.tmpl index 5dd3d83a77d..edc40e632d7 100644 --- a/libbeat/_meta/config/output-elasticsearch.reference.yml.tmpl +++ b/libbeat/_meta/config/output-elasticsearch.reference.yml.tmpl @@ -42,7 +42,7 @@ output.elasticsearch: # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. #index: "{{.BeatIndexPrefix}}-%{[agent.version]}" - # Optional ingest pipeline. By default no pipeline will be used. + # Optional ingest pipeline. By default, no pipeline will be used. #pipeline: "" # Optional HTTP path diff --git a/libbeat/_meta/config/processors.reference.yml.tmpl b/libbeat/_meta/config/processors.reference.yml.tmpl index e6542157757..510bbf29894 100644 --- a/libbeat/_meta/config/processors.reference.yml.tmpl +++ b/libbeat/_meta/config/processors.reference.yml.tmpl @@ -115,7 +115,7 @@ # ignore_missing: false # fail_on_error: true # -# The following example copies the value of message to message_copied +# The following example copies the value of the message to message_copied # #processors: # - copy_fields: @@ -125,7 +125,7 @@ # fail_on_error: true # ignore_missing: false # -# The following example truncates the value of message to 1024 bytes +# The following example truncates the value of the message to 1024 bytes # #processors: # - truncate_fields: diff --git a/libbeat/_meta/config/setup.dashboards.reference.yml.tmpl b/libbeat/_meta/config/setup.dashboards.reference.yml.tmpl index 1d3b0798007..d816843a925 100644 --- a/libbeat/_meta/config/setup.dashboards.reference.yml.tmpl +++ b/libbeat/_meta/config/setup.dashboards.reference.yml.tmpl @@ -2,14 +2,14 @@ # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards are disabled by default and can be enabled either by setting the -# options here, or by using the `-setup` CLI flag or the `setup` command. +# options here or by using the `-setup` CLI flag or the `setup` command. #setup.dashboards.enabled: false # The directory from where to read the dashboards. The default is the `kibana` # folder in the home path. #setup.dashboards.directory: ${path.home}/kibana -# The URL from where to download the dashboards archive. It is used instead of +# The URL from where to download the dashboard archive. It is used instead of # the directory if it has a value. #setup.dashboards.url: diff --git a/libbeat/_meta/config/setup.dashboards.yml.tmpl b/libbeat/_meta/config/setup.dashboards.yml.tmpl index 227b742a86e..35bd916b10e 100644 --- a/libbeat/_meta/config/setup.dashboards.yml.tmpl +++ b/libbeat/_meta/config/setup.dashboards.yml.tmpl @@ -4,8 +4,8 @@ # options here or by using the `setup` command. #setup.dashboards.enabled: false -# The URL from where to download the dashboards archive. By default this URL -# has a value which is computed based on the Beat name and version. For released +# The URL from where to download the dashboard archive. By default, this URL +# has a value that is computed based on the Beat name and version. For released # versions, this URL points to the dashboard archive on the artifacts.elastic.co # website. #setup.dashboards.url: diff --git a/libbeat/_meta/config/setup.ilm.reference.yml.tmpl b/libbeat/_meta/config/setup.ilm.reference.yml.tmpl index 406486c51b2..296bf0872ef 100644 --- a/libbeat/_meta/config/setup.ilm.reference.yml.tmpl +++ b/libbeat/_meta/config/setup.ilm.reference.yml.tmpl @@ -3,7 +3,7 @@ # Configure index lifecycle management (ILM) to manage the backing indices # of your data streams. -# Enable ILM support. Valid values are true, false. +# Enable ILM support. Valid values are true, or false. #setup.ilm.enabled: true # Set the lifecycle policy name. The default policy name is diff --git a/libbeat/api/make_listener_posix.go b/libbeat/api/make_listener_posix.go index f2a52da3dc7..5bfd17fa0b8 100644 --- a/libbeat/api/make_listener_posix.go +++ b/libbeat/api/make_listener_posix.go @@ -20,12 +20,11 @@ package api import ( + "errors" "fmt" "net" "os" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/api/npipe" ) @@ -53,10 +52,9 @@ func makeListener(cfg Config) (net.Listener, error) { if network == "unix" { if _, err := os.Stat(path); !os.IsNotExist(err) { if err := os.Remove(path); err != nil { - return nil, errors.Wrapf( - err, - "cannot remove existing unix socket file at location %s", - path, + return nil, fmt.Errorf( + "cannot remove existing unix socket file at location %s: %w", + path, err, ) } } @@ -70,11 +68,11 @@ func makeListener(cfg Config) (net.Listener, error) { // Ensure file mode if network == "unix" { if err := os.Chmod(path, socketFileMode); err != nil { - return nil, errors.Wrapf( - err, - "could not set mode %d for unix socket file at location %s", + return nil, fmt.Errorf( + "could not set mode %d for unix socket file at location %s: %w", socketFileMode, path, + err, ) } } diff --git a/libbeat/api/make_listener_windows.go b/libbeat/api/make_listener_windows.go index 4f72fcfec2d..3fb5a301da0 100644 --- a/libbeat/api/make_listener_windows.go +++ b/libbeat/api/make_listener_windows.go @@ -20,11 +20,10 @@ package api import ( + "errors" "fmt" "net" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/api/npipe" ) @@ -40,7 +39,7 @@ func makeListener(cfg Config) (net.Listener, error) { if len(cfg.SecurityDescriptor) == 0 { sd, err = npipe.DefaultSD(cfg.User) if err != nil { - return nil, errors.Wrap(err, "cannot generate security descriptor for the named pipe") + return nil, fmt.Errorf("cannot generate security descriptor for the named pipe: %w", err) } } else { sd = cfg.SecurityDescriptor diff --git a/libbeat/beat/event.go b/libbeat/beat/event.go index 28549001c56..54fc3e27cc3 100644 --- a/libbeat/beat/event.go +++ b/libbeat/beat/event.go @@ -110,45 +110,62 @@ func (e *Event) deepUpdate(d mapstr.M, overwrite bool) { if len(d) == 0 { return } - fieldsUpdate := d.Clone() // so we can delete redundant keys - var metaUpdate mapstr.M + // It's supported to update the timestamp using this function. + // However, we must handle it separately since it's a separate field of the event. + timestampValue, timestampExists := d[timestampFieldKey] + if timestampExists { + if overwrite { + _ = e.setTimestamp(timestampValue) + } - for fieldKey, value := range d { - switch fieldKey { + // Temporary delete it from the update map, + // so we can do `e.Fields.DeepUpdate(d)` or + // `e.Fields.DeepUpdateNoOverwrite(d)` later + delete(d, timestampFieldKey) + } - // one of the updates is the timestamp which is not a part of the event fields - case timestampFieldKey: - if overwrite { - _ = e.setTimestamp(value) - } - delete(fieldsUpdate, fieldKey) + // It's supported to update the metadata using this function. + // However, we must handle it separately since it's a separate field of the event. + metaValue, metaExists := d[metadataFieldKey] + if metaExists { + var metaUpdate mapstr.M - // some updates are addressed for the metadata not the fields - case metadataFieldKey: - switch meta := value.(type) { - case mapstr.M: - metaUpdate = meta - case map[string]interface{}: - metaUpdate = mapstr.M(meta) - } + switch meta := metaValue.(type) { + case mapstr.M: + metaUpdate = meta + case map[string]interface{}: + metaUpdate = mapstr.M(meta) + } - delete(fieldsUpdate, fieldKey) + if metaUpdate != nil { + if e.Meta == nil { + e.Meta = mapstr.M{} + } + if overwrite { + e.Meta.DeepUpdate(metaUpdate) + } else { + e.Meta.DeepUpdateNoOverwrite(metaUpdate) + } } + + // Temporary delete it from the update map, + // so we can do `e.Fields.DeepUpdate(d)` or + // `e.Fields.DeepUpdateNoOverwrite(d)` later + delete(d, metadataFieldKey) } - if metaUpdate != nil { - if e.Meta == nil { - e.Meta = mapstr.M{} + // At the end we revert all changes we made to the update map + defer func() { + if timestampExists { + d[timestampFieldKey] = timestampValue } - if overwrite { - e.Meta.DeepUpdate(metaUpdate) - } else { - e.Meta.DeepUpdateNoOverwrite(metaUpdate) + if metaExists { + d[metadataFieldKey] = metaValue } - } + }() - if len(fieldsUpdate) == 0 { + if len(d) == 0 { return } @@ -157,9 +174,9 @@ func (e *Event) deepUpdate(d mapstr.M, overwrite bool) { } if overwrite { - e.Fields.DeepUpdate(fieldsUpdate) + e.Fields.DeepUpdate(d) } else { - e.Fields.DeepUpdateNoOverwrite(fieldsUpdate) + e.Fields.DeepUpdateNoOverwrite(d) } } @@ -228,9 +245,18 @@ func metadataKey(key string) (string, bool) { return "", false } -// SetErrorWithOption sets jsonErr value in the event fields according to addErrKey value. -func (e *Event) SetErrorWithOption(jsonErr mapstr.M, addErrKey bool) { +// SetErrorWithOption sets the event error field with the message when the addErrKey is set to true. +// If you want to include the data and field you can pass them as parameters and will be appended into the +// error as fields with the corresponding name. +func (e *Event) SetErrorWithOption(message string, addErrKey bool, data string, field string) { if addErrKey { - e.Fields["error"] = jsonErr + errorField := mapstr.M{"message": message, "type": "json"} + if data != "" { + errorField["data"] = data + } + if field != "" { + errorField["field"] = field + } + e.Fields["error"] = errorField } } diff --git a/libbeat/beat/event_test.go b/libbeat/beat/event_test.go index 5575f495106..cd165a3c459 100644 --- a/libbeat/beat/event_test.go +++ b/libbeat/beat/event_test.go @@ -18,6 +18,7 @@ package beat import ( + "crypto/rand" "testing" "time" @@ -26,11 +27,59 @@ import ( "github.com/elastic/elastic-agent-libs/mapstr" ) +const ( + propSize = 1024 * 2014 * 10 +) + +var largeProp string + +func init() { + b := make([]byte, propSize) + _, _ = rand.Read(b) + largeProp = string(b) +} + func newEmptyEvent() *Event { return &Event{Fields: mapstr.M{}} } -func TestEventPutGetTimestamp(t *testing.T) { +func newEvent(e mapstr.M) *Event { + n := &mapstr.M{ + "Fields": mapstr.M{ + "large_prop": largeProp, + }, + } + n.DeepUpdate(e) + var ts time.Time + var meta mapstr.M + var fields mapstr.M + var private mapstr.M + + v, ex := (*n)["Timestamp"] + if ex { + ts = v.(time.Time) + } + v, ex = (*n)["Meta"] + if ex { + meta = v.(mapstr.M) + } + v, ex = (*n)["Fields"] + if ex { + fields = v.(mapstr.M) + } + v, ex = (*n)["Private"] + if ex { + private = v.(mapstr.M) + } + return &Event{ + Timestamp: ts, + Meta: meta, + Fields: fields, + Private: private, + } +} + +func BenchmarkTestEventPutGetTimestamp(b *testing.B) { evt := newEmptyEvent() ts := time.Now() @@ -38,17 +87,17 @@ func TestEventPutGetTimestamp(t *testing.T) { v, err := evt.GetValue("@timestamp") if err != nil { - t.Fatal(err) + b.Fatal(err) } - assert.Equal(t, ts, v) - assert.Equal(t, ts, evt.Timestamp) + assert.Equal(b, ts, v) + assert.Equal(b, ts, evt.Timestamp) // The @timestamp is not written into Fields. - assert.Nil(t, evt.Fields["@timestamp"]) + assert.Nil(b, evt.Fields["@timestamp"]) } -func TestDeepUpdate(t *testing.T) { +func BenchmarkTestDeepUpdate(b *testing.B) { ts := time.Now() cases := []struct { @@ -60,37 +109,43 @@ func TestDeepUpdate(t *testing.T) { }{ { name: "does nothing if no update", - event: &Event{}, + event: newEvent(mapstr.M{}), update: mapstr.M{}, - expected: &Event{}, + expected: newEvent(mapstr.M{}), }, { name: "updates timestamp", - event: &Event{}, + event: newEvent(mapstr.M{}), update: mapstr.M{ timestampFieldKey: ts, }, overwrite: true, expected: &Event{ Timestamp: ts, + Fields: mapstr.M{ + "large_prop": largeProp, + }, }, }, { name: "does not overwrite timestamp", - event: &Event{ - Timestamp: ts, - }, + event: newEvent(mapstr.M{ + "Timestamp": ts, + }), update: mapstr.M{ timestampFieldKey: time.Now().Add(time.Hour), }, overwrite: false, expected: &Event{ Timestamp: ts, + Fields: mapstr.M{ + "large_prop": largeProp, + }, }, }, { name: "initializes metadata if nil", - event: &Event{}, + event: newEvent(mapstr.M{}), update: mapstr.M{ metadataFieldKey: mapstr.M{ "first": "new", @@ -102,15 +157,18 @@ func TestDeepUpdate(t *testing.T) { "first": "new", "second": 42, }, + Fields: mapstr.M{ + "large_prop": largeProp, + }, }, }, { name: "updates metadata but does not overwrite", - event: &Event{ - Meta: mapstr.M{ + event: newEvent(mapstr.M{ + "Meta": mapstr.M{ "first": "initial", }, - }, + }), update: mapstr.M{ metadataFieldKey: mapstr.M{ "first": "new", @@ -123,15 +181,18 @@ func TestDeepUpdate(t *testing.T) { "first": "initial", "second": 42, }, + Fields: mapstr.M{ + "large_prop": largeProp, + }, }, }, { name: "updates metadata and overwrites", - event: &Event{ - Meta: mapstr.M{ + event: newEvent(mapstr.M{ + "Meta": mapstr.M{ "first": "initial", }, - }, + }), update: mapstr.M{ metadataFieldKey: mapstr.M{ "first": "new", @@ -144,15 +205,18 @@ func TestDeepUpdate(t *testing.T) { "first": "new", "second": 42, }, + Fields: mapstr.M{ + "large_prop": largeProp, + }, }, }, { name: "updates fields but does not overwrite", - event: &Event{ - Fields: mapstr.M{ + event: newEvent(mapstr.M{ + "Fields": mapstr.M{ "first": "initial", }, - }, + }), update: mapstr.M{ "first": "new", "second": 42, @@ -160,18 +224,19 @@ func TestDeepUpdate(t *testing.T) { overwrite: false, expected: &Event{ Fields: mapstr.M{ - "first": "initial", - "second": 42, + "first": "initial", + "second": 42, + "large_prop": largeProp, }, }, }, { name: "updates metadata and overwrites", - event: &Event{ - Fields: mapstr.M{ + event: newEvent(mapstr.M{ + "Fields": mapstr.M{ "first": "initial", }, - }, + }), update: mapstr.M{ "first": "new", "second": 42, @@ -179,123 +244,125 @@ func TestDeepUpdate(t *testing.T) { overwrite: true, expected: &Event{ Fields: mapstr.M{ - "first": "new", - "second": 42, + "first": "new", + "second": 42, + "large_prop": largeProp, }, }, }, { name: "initializes fields if nil", - event: &Event{}, + event: newEvent(mapstr.M{}), update: mapstr.M{ "first": "new", "second": 42, }, expected: &Event{ Fields: mapstr.M{ - "first": "new", - "second": 42, + "first": "new", + "second": 42, + "large_prop": largeProp, }, }, }, } for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { + b.Run(tc.name, func(b *testing.B) { tc.event.deepUpdate(tc.update, tc.overwrite) - assert.Equal(t, tc.expected.Timestamp, tc.event.Timestamp) - assert.Equal(t, tc.expected.Fields, tc.event.Fields) - assert.Equal(t, tc.expected.Meta, tc.event.Meta) + assert.Equal(b, tc.expected.Timestamp, tc.event.Timestamp) + assert.Equal(b, tc.expected.Fields, tc.event.Fields) + assert.Equal(b, tc.expected.Meta, tc.event.Meta) }) } } -func TestEventMetadata(t *testing.T) { +func BenchmarkTestEventMetadata(b *testing.B) { const id = "123" newMeta := func() mapstr.M { return mapstr.M{"_id": id} } - t.Run("put", func(t *testing.T) { + b.Run("put", func(b *testing.B) { evt := newEmptyEvent() meta := newMeta() evt.PutValue("@metadata", meta) - assert.Equal(t, meta, evt.Meta) - assert.Empty(t, evt.Fields) + assert.Equal(b, meta, evt.Meta) + assert.Empty(b, evt.Fields) }) - t.Run("get", func(t *testing.T) { + b.Run("get", func(b *testing.B) { evt := newEmptyEvent() evt.Meta = newMeta() meta, err := evt.GetValue("@metadata") - assert.NoError(t, err) - assert.Equal(t, evt.Meta, meta) + assert.NoError(b, err) + assert.Equal(b, evt.Meta, meta) }) - t.Run("put sub-key", func(t *testing.T) { + b.Run("put sub-key", func(b *testing.B) { evt := newEmptyEvent() evt.PutValue("@metadata._id", id) - assert.Equal(t, newMeta(), evt.Meta) - assert.Empty(t, evt.Fields) + assert.Equal(b, newMeta(), evt.Meta) + assert.Empty(b, evt.Fields) }) - t.Run("get sub-key", func(t *testing.T) { + b.Run("get sub-key", func(b *testing.B) { evt := newEmptyEvent() evt.Meta = newMeta() v, err := evt.GetValue("@metadata._id") - assert.NoError(t, err) - assert.Equal(t, id, v) + assert.NoError(b, err) + assert.Equal(b, id, v) }) - t.Run("delete", func(t *testing.T) { + b.Run("delete", func(b *testing.B) { evt := newEmptyEvent() evt.Meta = newMeta() err := evt.Delete("@metadata") - assert.NoError(t, err) - assert.Nil(t, evt.Meta) + assert.NoError(b, err) + assert.Nil(b, evt.Meta) }) - t.Run("delete sub-key", func(t *testing.T) { + b.Run("delete sub-key", func(b *testing.B) { evt := newEmptyEvent() evt.Meta = newMeta() err := evt.Delete("@metadata._id") - assert.NoError(t, err) - assert.Empty(t, evt.Meta) + assert.NoError(b, err) + assert.Empty(b, evt.Meta) }) - t.Run("setID", func(t *testing.T) { + b.Run("setID", func(b *testing.B) { evt := newEmptyEvent() evt.SetID(id) - assert.Equal(t, newMeta(), evt.Meta) + assert.Equal(b, newMeta(), evt.Meta) }) - t.Run("put non-metadata", func(t *testing.T) { + b.Run("put non-metadata", func(b *testing.B) { evt := newEmptyEvent() evt.PutValue("@metadataSpecial", id) - assert.Equal(t, mapstr.M{"@metadataSpecial": id}, evt.Fields) + assert.Equal(b, mapstr.M{"@metadataSpecial": id}, evt.Fields) }) - t.Run("delete non-metadata", func(t *testing.T) { + b.Run("delete non-metadata", func(b *testing.B) { evt := newEmptyEvent() evt.Meta = newMeta() err := evt.Delete("@metadataSpecial") - assert.Error(t, err) - assert.Equal(t, newMeta(), evt.Meta) + assert.Error(b, err) + assert.Equal(b, newMeta(), evt.Meta) }) } diff --git a/libbeat/common/jsontransform/expand.go b/libbeat/common/jsontransform/expand.go index c026343a3b0..be07c420074 100644 --- a/libbeat/common/jsontransform/expand.go +++ b/libbeat/common/jsontransform/expand.go @@ -34,7 +34,7 @@ import ( func ExpandFields(logger *logp.Logger, event *beat.Event, m mapstr.M, addErrorKey bool) { if err := expandFields(m); err != nil { logger.Errorf("JSON: failed to expand fields: %s", err) - event.SetErrorWithOption(createJSONError(err.Error()), addErrorKey) + event.SetErrorWithOption(err.Error(), addErrorKey, "", "") } } diff --git a/libbeat/common/jsontransform/jsonhelper.go b/libbeat/common/jsontransform/jsonhelper.go index 87c3c1a1eb2..15d70ae929a 100644 --- a/libbeat/common/jsontransform/jsonhelper.go +++ b/libbeat/common/jsontransform/jsonhelper.go @@ -23,7 +23,6 @@ import ( "time" "github.com/elastic/beats/v7/libbeat/beat" - "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -39,11 +38,9 @@ var ( // WriteJSONKeys writes the json keys to the given event based on the overwriteKeys option and the addErrKey func WriteJSONKeys(event *beat.Event, keys map[string]interface{}, expandKeys, overwriteKeys, addErrKey bool) { - logger := logp.NewLogger("jsonhelper") if expandKeys { if err := expandFields(keys); err != nil { - logger.Errorf("JSON: failed to expand fields: %s", err) - event.SetErrorWithOption(createJSONError(err.Error()), addErrKey) + event.SetErrorWithOption(err.Error(), addErrKey, "", "") return } } @@ -62,16 +59,14 @@ func WriteJSONKeys(event *beat.Event, keys map[string]interface{}, expandKeys, o case "@timestamp": vstr, ok := v.(string) if !ok { - logger.Error("JSON: Won't overwrite @timestamp because value is not string") - event.SetErrorWithOption(createJSONError("@timestamp not overwritten (not string)"), addErrKey) + event.SetErrorWithOption("@timestamp not overwritten (not string)", addErrKey, "", "") continue } // @timestamp must be of format RFC3339 or ISO8601 ts, err := parseTimestamp(vstr) if err != nil { - logger.Errorf("JSON: Won't overwrite @timestamp because of parsing error: %v", err) - event.SetErrorWithOption(createJSONError(fmt.Sprintf("@timestamp not overwritten (parse error on %s)", vstr)), addErrKey) + event.SetErrorWithOption(fmt.Sprintf("@timestamp not overwritten (parse error on %s)", vstr), addErrKey, "", "") continue } event.Timestamp = ts @@ -93,19 +88,17 @@ func WriteJSONKeys(event *beat.Event, keys map[string]interface{}, expandKeys, o event.Meta.DeepUpdate(mapstr.M(m)) default: - event.SetErrorWithOption(createJSONError("failed to update @metadata"), addErrKey) + event.SetErrorWithOption("failed to update @metadata", addErrKey, "", "") } case "type": vstr, ok := v.(string) if !ok { - logger.Error("JSON: Won't overwrite type because value is not string") - event.SetErrorWithOption(createJSONError("type not overwritten (not string)"), addErrKey) + event.SetErrorWithOption("type not overwritten (not string)", addErrKey, "", "") continue } if len(vstr) == 0 || vstr[0] == '_' { - logger.Error("JSON: Won't overwrite type because value is empty or starts with an underscore") - event.SetErrorWithOption(createJSONError(fmt.Sprintf("type not overwritten (invalid value [%s])", vstr)), addErrKey) + event.SetErrorWithOption(fmt.Sprintf("type not overwritten (invalid value [%s])", vstr), addErrKey, "", "") continue } event.Fields[k] = vstr @@ -118,10 +111,6 @@ func WriteJSONKeys(event *beat.Event, keys map[string]interface{}, expandKeys, o event.Fields.DeepUpdate(keys) } -func createJSONError(message string) mapstr.M { - return mapstr.M{"message": message, "type": "json"} -} - func removeKeys(keys map[string]interface{}, names ...string) { for _, name := range names { delete(keys, name) diff --git a/libbeat/common/jsontransform/jsonhelper_test.go b/libbeat/common/jsontransform/jsonhelper_test.go index bed1de4c0fd..c8791b3c610 100644 --- a/libbeat/common/jsontransform/jsonhelper_test.go +++ b/libbeat/common/jsontransform/jsonhelper_test.go @@ -53,6 +53,7 @@ func TestWriteJSONKeys(t *testing.T) { expectedMetadata mapstr.M expectedTimestamp time.Time expectedFields mapstr.M + addErrorKeys bool }{ "overwrite_true": { overwriteKeys: true, @@ -192,6 +193,32 @@ func TestWriteJSONKeys(t *testing.T) { }, }, }, + // This benchmark makes sure that when an error is found in the event, the proper fields are defined and measured + "error_case": { + expandKeys: false, + overwriteKeys: true, + keys: map[string]interface{}{ + "top_b": map[string]interface{}{ + "inner_d.inner_e": "COMPLETELY_NEW_e", + }, + "@timestamp": map[string]interface{}{"when": "now", "another": "yesterday"}, + }, + expectedMetadata: eventMetadata.Clone(), + expectedTimestamp: eventTimestamp, + expectedFields: mapstr.M{ + "error": mapstr.M{ + "message": "@timestamp not overwritten (not string)", + "type": "json", + }, + "top_a": 23, + "top_b": mapstr.M{ + "inner_c": "see", + "inner_d": "dee", + "inner_d.inner_e": "COMPLETELY_NEW_e", + }, + }, + addErrorKeys: true, + }, } for name, test := range tests { @@ -202,10 +229,207 @@ func TestWriteJSONKeys(t *testing.T) { Fields: eventFields.Clone(), } - WriteJSONKeys(event, test.keys, test.expandKeys, test.overwriteKeys, false) + WriteJSONKeys(event, test.keys, test.expandKeys, test.overwriteKeys, test.addErrorKeys) require.Equal(t, test.expectedMetadata, event.Meta) require.Equal(t, test.expectedTimestamp.UnixNano(), event.Timestamp.UnixNano()) require.Equal(t, test.expectedFields, event.Fields) }) } } + +func BenchmarkWriteJSONKeys(b *testing.B) { + now := time.Now() + now = now.Round(time.Second) + + eventTimestamp := time.Date(2020, 01, 01, 01, 01, 00, 0, time.UTC) + eventMetadata := mapstr.M{ + "foo": "bar", + "baz": mapstr.M{ + "qux": 17, + }, + } + eventFields := mapstr.M{ + "top_a": 23, + "top_b": mapstr.M{ + "inner_c": "see", + "inner_d": "dee", + }, + } + + tests := map[string]struct { + keys map[string]interface{} + expandKeys bool + overwriteKeys bool + expectedFields mapstr.M + addErrorKeys bool + }{ + "overwrite_true": { + overwriteKeys: true, + keys: map[string]interface{}{ + "@metadata": map[string]interface{}{ + "foo": "NEW_bar", + "baz": map[string]interface{}{ + "qux": "NEW_qux", + "durrr": "COMPLETELY_NEW", + }, + }, + "@timestamp": now.Format(time.RFC3339), + "top_b": map[string]interface{}{ + "inner_d": "NEW_dee", + "inner_e": "COMPLETELY_NEW_e", + }, + "top_c": "COMPLETELY_NEW_c", + }, + expectedFields: mapstr.M{ + "top_a": 23, + "top_b": mapstr.M{ + "inner_c": "see", + "inner_d": "NEW_dee", + "inner_e": "COMPLETELY_NEW_e", + }, + "top_c": "COMPLETELY_NEW_c", + }, + }, + "overwrite_true_ISO8601": { + overwriteKeys: true, + keys: map[string]interface{}{ + "@metadata": map[string]interface{}{ + "foo": "NEW_bar", + "baz": map[string]interface{}{ + "qux": "NEW_qux", + "durrr": "COMPLETELY_NEW", + }, + }, + "@timestamp": now.Format(iso8601), + "top_b": map[string]interface{}{ + "inner_d": "NEW_dee", + "inner_e": "COMPLETELY_NEW_e", + }, + "top_c": "COMPLETELY_NEW_c", + }, + expectedFields: mapstr.M{ + "top_a": 23, + "top_b": mapstr.M{ + "inner_c": "see", + "inner_d": "NEW_dee", + "inner_e": "COMPLETELY_NEW_e", + }, + "top_c": "COMPLETELY_NEW_c", + }, + }, + "overwrite_false": { + overwriteKeys: false, + keys: map[string]interface{}{ + "@metadata": map[string]interface{}{ + "foo": "NEW_bar", + "baz": map[string]interface{}{ + "qux": "NEW_qux", + "durrr": "COMPLETELY_NEW", + }, + }, + "@timestamp": now.Format(time.RFC3339), + "top_b": map[string]interface{}{ + "inner_d": "NEW_dee", + "inner_e": "COMPLETELY_NEW_e", + }, + "top_c": "COMPLETELY_NEW_c", + }, + expectedFields: mapstr.M{ + "top_a": 23, + "top_b": mapstr.M{ + "inner_c": "see", + "inner_d": "dee", + "inner_e": "COMPLETELY_NEW_e", + }, + "top_c": "COMPLETELY_NEW_c", + }, + }, + "expand_true": { + expandKeys: true, + overwriteKeys: true, + keys: map[string]interface{}{ + "top_b": map[string]interface{}{ + "inner_d.inner_e": "COMPLETELY_NEW_e", + }, + }, + expectedFields: mapstr.M{ + "top_a": 23, + "top_b": mapstr.M{ + "inner_c": "see", + "inner_d": mapstr.M{ + "inner_e": "COMPLETELY_NEW_e", + }, + }, + }, + }, + "expand_false": { + expandKeys: false, + overwriteKeys: true, + keys: map[string]interface{}{ + "top_b": map[string]interface{}{ + "inner_d.inner_e": "COMPLETELY_NEW_e", + }, + }, + expectedFields: mapstr.M{ + "top_a": 23, + "top_b": mapstr.M{ + "inner_c": "see", + "inner_d": "dee", + "inner_d.inner_e": "COMPLETELY_NEW_e", + }, + }, + }, + // This benchmark makes sure that when an error is found in the event, the proper fields are defined and measured + "error_case": { + expandKeys: false, + overwriteKeys: true, + keys: map[string]interface{}{ + "top_b": map[string]interface{}{ + "inner_d.inner_e": "COMPLETELY_NEW_e", + }, + "@timestamp": "invalid string", + }, + expectedFields: mapstr.M{ + "error": mapstr.M{ + "message": "@timestamp not overwritten (parse error on invalid string)", + "type": "json", + }, + "top_a": 23, + "top_b": mapstr.M{ + "inner_c": "see", + "inner_d": "dee", + "inner_d.inner_e": "COMPLETELY_NEW_e", + }, + }, + addErrorKeys: true, + }, + } + + for name, test := range tests { + b.Run(name, func(b *testing.B) { + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + event := &beat.Event{ + Timestamp: eventTimestamp, + Meta: eventMetadata.Clone(), + Fields: eventFields.Clone(), + } + // The WriteJSONKeys will override the keys, so we need to clone it. + keys := clone(test.keys) + b.StartTimer() + WriteJSONKeys(event, keys, test.expandKeys, test.overwriteKeys, test.addErrorKeys) + require.Equal(b, test.expectedFields, event.Fields) + } + }) + } +} + +func clone(a map[string]interface{}) map[string]interface{} { + newMap := make(map[string]interface{}) + for k, v := range a { + newMap[k] = v + } + return newMap +} diff --git a/libbeat/common/streambuf/streambuf.go b/libbeat/common/streambuf/streambuf.go index f28c2157792..48bbabae0d5 100644 --- a/libbeat/common/streambuf/streambuf.go +++ b/libbeat/common/streambuf/streambuf.go @@ -155,7 +155,9 @@ func (b *Buffer) doAppend(data []byte, retainable bool, newCap int) error { b.data = tmp } } - b.data = append(b.data, data...) + tBuf := bytes.NewBuffer(b.data) + tBuf.Write(data) + b.data = tBuf.Bytes() } b.available += len(data) diff --git a/libbeat/docs/index.asciidoc b/libbeat/docs/index.asciidoc index f182d625022..10bf384c7d2 100644 --- a/libbeat/docs/index.asciidoc +++ b/libbeat/docs/index.asciidoc @@ -31,8 +31,8 @@ include::./config-file-format.asciidoc[] include::./upgrading.asciidoc[] -include::./release-notes/breaking/breaking.asciidoc[] - include::./release.asciidoc[] +include::./release-notes/breaking/breaking.asciidoc[] + include::{libbeat-dir}/contributing-to-beats.asciidoc[] diff --git a/libbeat/docs/metrics-in-logs.asciidoc b/libbeat/docs/metrics-in-logs.asciidoc new file mode 100644 index 00000000000..c499e7462f4 --- /dev/null +++ b/libbeat/docs/metrics-in-logs.asciidoc @@ -0,0 +1,242 @@ + + +Every 30 seconds (by default), {beatname_uc} collects a _snapshot_ of metrics about itself. From this snapshot, {beatname_uc} computes a _delta snapshot_; this delta snapshot contains any metrics that have _changed_ since the last snapshot. Note that the values of the metrics are the values when the snapshot is taken, _NOT_ the _difference_ in values from the last snapshot. + +If this delta snapshot contains _any_ metrics (indicating at least one metric that has changed since the last snapshot), this delta snapshot is serialized as JSON and emitted in {beatname_uc}'s logs at the `INFO` log level. Here is an example of such a log entry: + +[source,json] +---- +{"log.level":"info","@timestamp":"2023-07-14T12:50:36.811Z","log.logger":"monitoring","log.origin":{"file.name":"log/log.go","file.line":187},"message":"Non-zero metrics in the last 30s","service.name":"filebeat","monitoring":{"metrics":{"beat":{"cgroup":{"memory":{"mem":{"usage":{"bytes":0}}}},"cpu":{"system":{"ticks":692690,"time":{"ms":60}},"total":{"ticks":3167250,"time":{"ms":150},"value":3167250},"user":{"ticks":2474560,"time":{"ms":90}}},"handles":{"limit":{"hard":1048576,"soft":1048576},"open":32},"info":{"ephemeral_id":"2bab8688-34c0-4522-80af-db86948d547d","uptime":{"ms":617670096},"version":"8.6.2"},"memstats":{"gc_next":57189272,"memory_alloc":43589824,"memory_total":275281335792,"rss":183574528},"runtime":{"goroutines":212}},"filebeat":{"events":{"active":5,"added":52,"done":49},"harvester":{"open_files":6,"running":6,"started":1}},"libbeat":{"config":{"module":{"running":15}},"output":{"events":{"acked":48,"active":0,"batches":6,"total":48},"read":{"bytes":210},"write":{"bytes":26923}},"pipeline":{"clients":15,"events":{"active":5,"filtered":1,"published":51,"total":52},"queue":{"acked":48}}},"registrar":{"states":{"current":14,"update":49},"writes":{"success":6,"total":6}},"system":{"load":{"1":0.91,"15":0.37,"5":0.4,"norm":{"1":0.1138,"15":0.0463,"5":0.05}}}},"ecs.version":"1.6.0"}} +---- + +[discrete] +== Details + +Focussing on the `.monitoring.metrics` field, and formatting the JSON, it's value is: + +[source,json] +---- +{ + "beat": { + "cgroup": { + "memory": { + "mem": { + "usage": { + "bytes": 0 + } + } + } + }, + "cpu": { + "system": { + "ticks": 692690, + "time": { + "ms": 60 + } + }, + "total": { + "ticks": 3167250, + "time": { + "ms": 150 + }, + "value": 3167250 + }, + "user": { + "ticks": 2474560, + "time": { + "ms": 90 + } + } + }, + "handles": { + "limit": { + "hard": 1048576, + "soft": 1048576 + }, + "open": 32 + }, + "info": { + "ephemeral_id": "2bab8688-34c0-4522-80af-db86948d547d", + "uptime": { + "ms": 617670096 + }, + "version": "8.6.2" + }, + "memstats": { + "gc_next": 57189272, + "memory_alloc": 43589824, + "memory_total": 275281335792, + "rss": 183574528 + }, + "runtime": { + "goroutines": 212 + } + }, + "filebeat": { + "events": { + "active": 5, + "added": 52, + "done": 49 + }, + "harvester": { + "open_files": 6, + "running": 6, + "started": 1 + } + }, + "libbeat": { + "config": { + "module": { + "running": 15 + } + }, + "output": { + "events": { + "acked": 48, + "active": 0, + "batches": 6, + "total": 48 + }, + "read": { + "bytes": 210 + }, + "write": { + "bytes": 26923 + } + }, + "pipeline": { + "clients": 15, + "events": { + "active": 5, + "filtered": 1, + "published": 51, + "total": 52 + }, + "queue": { + "acked": 48 + } + } + }, + "registrar": { + "states": { + "current": 14, + "update": 49 + }, + "writes": { + "success": 6, + "total": 6 + } + }, + "system": { + "load": { + "1": 0.91, + "5": 0.4, + "15": 0.37, + "norm": { + "1": 0.1138, + "5": 0.05, + "15": 0.0463 + } + } + } +} +---- + +The following tables explain the meaning of the most important fields under `.monitoring.metrics` and also provide hints that might be helpful in troubleshooting {beatname_uc} issues. + +[cols="1,1,2,2"] +|=== +| Field path (relative to `.monitoring.metrics`) | Type | Meaning | Troubleshooting hints + +| `.beat` | Object | Information that is common to all Beats, e.g. version, goroutines, file handles, CPU, memory | +| `.libbeat` | Object | Information about the publisher pipeline and output, also common to all Beats | +ifeval::["{beatname_lc}"=="filebeat"] +| `.filebeat` | Object | Information specific to {filebeat}, e.g. harvester, events | +endif::[] +|=== + +[cols="1,1,2,2"] +|=== +| Field path (relative to `.monitoring.metrics.beat`) | Type | Meaning | Troubleshooting hints + +| `.runtime.goroutines` | Integer | Number of goroutines running | If this number grows over time, it indicates a goroutine leak +|=== + +[cols="1,1,2,2"] +|=== +| Field path (relative to `.monitoring.metrics.libbeat`) | Type | Meaning | Troubleshooting hints + +| `.pipeline.events.active` | Integer | Number of events currently in the libbeat publisher pipeline. | If this number grows over time, it may indicate that {beatname_uc} is producing events faster than the output can consume them. Consider increasing the number of output workers (if this setting is supported by the output; {es} and {ls} outputs support this setting). The pipeline includes events currently being processed as well as events in the queue. So this metric can sometimes end up slightly higher than the queue size. If this metric reaches the maximum queue size (`queue.mem.events` for the in-memory queue), it almost certainly indicates backpressure on {beatname_uc}, implying that {beatname_uc} may need to temporarily stop ingesting more events from the source until this backpressure is relieved. +| `.output.events.total` | Integer | Number of events currently being processed by the output. | If this number grows over time, it may indicate that the output destination (e.g. {ls} pipeline or {es} cluster) is not able to accept events at the same or faster rate than what {beatname_uc} is sending to it. +| `.output.events.acked` | Integer | Number of events acknowledged by the output destination. | Generally, we want this number to be the same as `.output.events.total` as this indicates that the output destination has reliably received all the events sent to it. +| `.output.events.failed` | Integer | Number of events that {beatname_uc} tried to send to the output destination, but the destination failed to receive them. | Generally, we want this field to be absent or its value to be zero. When the value is greater than zero, it's useful to check {beatname_uc}'s logs right before this log entry's `@timestamp` to see if there are any connectivity issues with the output destination. Note that failed events are not lost or dropped; they will be sent back to the publisher pipeline for retrying later. +|=== + +ifeval::["{beatname_lc}"=="filebeat"] +[cols="1,1,2,2"] +|=== +| Field path (relative to `.monitoring.metrics.filebeat`) | Type | Meaning | Troubleshooting hints + +| `.events.active` | Integer | Number of events being actively processed by {filebeat} (including events {filebeat} has already sent to the libbeat publisher pipeline, but not including events the pipeline has sent to the output). | If this number grows over time, it may indicate that {filebeat} inputs are harvesting events too fast for the pipeline and output to keep up. +|=== +endif::[] + +[discrete] +== Useful commands + +[discrete] +=== Parse monitoring metrics from unstructured {beatname_uc} logs + +For {beatname_uc} versions that emit unstructured logs, the following script can be +used to parse monitoring metrics from such logs: https://github.com/elastic/beats/blob/main/script/metrics_from_log_file.sh. + + +ifeval::["{beatname_lc}"=="filebeat"] +[discrete] +=== Check if {filebeat} is processing events + +[source] +---- +$ cat beat.log | jq -r '[.["@timestamp"],.monitoring.metrics.filebeat.events.active,.monitoring.metrics.libbeat.pipeline.events.active,.monitoring.metrics.libbeat.output.events.total,.monitoring.metrics.libbeat.output.events.acked,.monitoring.metrics.libbeat.output.events.failed//0] | @tsv' | sort +---- + +Example output: + +[source] +---- +2023-07-14T11:24:36.811Z 1 1 38033 38033 0 +2023-07-14T11:25:06.811Z 1 1 17 17 0 +2023-07-14T11:25:36.812Z 1 1 16 16 0 +2023-07-14T11:26:06.811Z 1 1 17 17 0 +2023-07-14T11:26:36.811Z 2 2 21 21 0 +2023-07-14T11:27:06.812Z 1 1 18 18 0 +2023-07-14T11:27:36.811Z 1 1 17 17 0 +2023-07-14T11:28:06.811Z 1 1 18 18 0 +2023-07-14T11:28:36.811Z 1 1 16 16 0 +2023-07-14T11:37:06.811Z 1 1 270 270 0 +2023-07-14T11:37:36.811Z 1 1 16 16 0 +2023-07-14T11:38:06.811Z 1 1 17 17 0 +2023-07-14T11:38:36.811Z 1 1 16 16 0 +2023-07-14T11:41:36.811Z 3 3 323 323 0 +2023-07-14T11:42:06.811Z 3 3 17 17 0 +2023-07-14T11:42:36.812Z 4 4 18 18 0 +2023-07-14T11:43:06.811Z 4 4 17 17 0 +2023-07-14T11:43:36.811Z 2 2 17 17 0 +2023-07-14T11:47:06.811Z 0 0 117 117 0 +2023-07-14T11:47:36.811Z 2 2 14 14 0 +2023-07-14T11:48:06.811Z 3 3 17 17 0 +2023-07-14T11:48:36.811Z 2 2 17 17 0 +2023-07-14T12:49:36.811Z 3 3 2008 1960 48 +2023-07-14T12:50:06.812Z 2 2 18 18 0 +2023-07-14T12:50:36.811Z 5 5 48 48 0 +---- + +The columns here are: + +1. `.@timestamp` +2. `.monitoring.metrics.filebeat.events.active` +3. `.monitoring.metrics.libbeat.pipeline.events.active` +4. `.monitoring.metrics.libbeat.output.events.total` +5. `.monitoring.metrics.libbeat.output.events.acked` +6. `.monitoring.metrics.libbeat.output.events.failed` +endif::[] diff --git a/libbeat/docs/output-cloud.asciidoc b/libbeat/docs/output-cloud.asciidoc index 4ac2b98ef24..2502dfb1a3c 100644 --- a/libbeat/docs/output-cloud.asciidoc +++ b/libbeat/docs/output-cloud.asciidoc @@ -42,8 +42,6 @@ The Cloud ID, which can be found in the {ess} web console, is used by overwrites the `output.elasticsearch.hosts` and `setup.kibana.host` settings. For more on locating and configuring the Cloud ID, see {ece-ref}/ece-cloud-id.html[Configure Beats and Logstash with Cloud ID]. -NOTE: The base64 encoded `cloud.id` found in the {ess} web console does not explicitly specify a port. This means that {beatname_uc} will default to using port 443 when using `cloud.id`, not the commonly configured cloud endpoint port 9243. - ==== `cloud.auth` When specified, the `cloud.auth` overwrites the `output.elasticsearch.username` and diff --git a/libbeat/docs/release-notes/breaking/breaking-7.17.asciidoc b/libbeat/docs/release-notes/breaking/breaking-7.17.asciidoc index 20c0ed20905..d7d73508c6d 100644 --- a/libbeat/docs/release-notes/breaking/breaking-7.17.asciidoc +++ b/libbeat/docs/release-notes/breaking/breaking-7.17.asciidoc @@ -5,14 +5,7 @@ 7.17 ++++ -//NOTE: The notable-breaking-changes tagged regions are re-used in the -//Installation and Upgrade Guide - -// tag::notable-breaking-changes[] - The Docker base image has changed from CentOS 7 to Ubuntu 20.04. This change affects all {beats}. -// end::notable-breaking-changes[] - {see-relnotes} diff --git a/libbeat/docs/release-notes/breaking/breaking-8.0.asciidoc b/libbeat/docs/release-notes/breaking/breaking-8.0.asciidoc index 858e7c55fee..7fe56f44053 100644 --- a/libbeat/docs/release-notes/breaking/breaking-8.0.asciidoc +++ b/libbeat/docs/release-notes/breaking/breaking-8.0.asciidoc @@ -9,11 +9,6 @@ See the <> for a complete list of breaking changes, bug fixes, and enhancements, including changes to beta or experimental functionality. -//NOTE: The notable-breaking-changes tagged regions are re-used in the -//Installation and Upgrade Guide - -//tag::notable-breaking-changes[] - [discrete] ==== {beats} logs are now ECS-compliant @@ -92,7 +87,3 @@ However this caused problems for some users. Starting in version 8.0.0, filesets are disabled by default. You must explicitly enable the filesets you want {filebeat} to use. - -// end::notable-breaking-changes[] - - diff --git a/libbeat/docs/release-notes/breaking/breaking-8.1.asciidoc b/libbeat/docs/release-notes/breaking/breaking-8.1.asciidoc deleted file mode 100644 index 1286a36fb8d..00000000000 --- a/libbeat/docs/release-notes/breaking/breaking-8.1.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -[[breaking-changes-8.1]] - -=== Breaking changes in 8.1 -++++ -8.1 -++++ - -See the <> for a complete list of breaking changes, -bug fixes, and enhancements, including changes to beta or experimental -functionality. - -//NOTE: The notable-breaking-changes tagged regions are re-used in the -//Installation and Upgrade Guide - -//tag::notable-breaking-changes[] - -There are no important breaking changes in this release. - -// end::notable-breaking-changes[] diff --git a/libbeat/docs/release-notes/breaking/breaking-8.2.asciidoc b/libbeat/docs/release-notes/breaking/breaking-8.2.asciidoc deleted file mode 100644 index 26dd74239bd..00000000000 --- a/libbeat/docs/release-notes/breaking/breaking-8.2.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -[[breaking-changes-8.2]] - -=== Breaking changes in 8.2 -++++ -8.2 -++++ - -See the <> for a complete list of breaking changes, -bug fixes, and enhancements, including changes to beta or experimental -functionality. - -//NOTE: The notable-breaking-changes tagged regions are re-used in the -//Installation and Upgrade Guide - -//tag::notable-breaking-changes[] - -There are no breaking changes in this release. - -// end::notable-breaking-changes[] diff --git a/libbeat/docs/release-notes/breaking/breaking-8.3.asciidoc b/libbeat/docs/release-notes/breaking/breaking-8.3.asciidoc deleted file mode 100644 index 2922f1e3d9b..00000000000 --- a/libbeat/docs/release-notes/breaking/breaking-8.3.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -[[breaking-changes-8.3]] - -=== Breaking changes in 8.3 -++++ -8.3 -++++ - -See the <> for a complete list of breaking changes, -bug fixes, and enhancements, including changes to beta or experimental -functionality. - -//NOTE: The notable-breaking-changes tagged regions are re-used in the -//Installation and Upgrade Guide - -//tag::notable-breaking-changes[] - -There are no breaking changes in this release. - -// end::notable-breaking-changes[] diff --git a/libbeat/docs/release-notes/breaking/breaking-8.4.asciidoc b/libbeat/docs/release-notes/breaking/breaking-8.4.asciidoc deleted file mode 100644 index 7a39b2e2cd5..00000000000 --- a/libbeat/docs/release-notes/breaking/breaking-8.4.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -[[breaking-changes-8.4]] - -=== Breaking changes in 8.4 -++++ -8.4 -++++ - -See the <> for a complete list of breaking changes, -bug fixes, and enhancements, including changes to beta or experimental -functionality. - -//NOTE: The notable-breaking-changes tagged regions are re-used in the -//Installation and Upgrade Guide - -//tag::notable-breaking-changes[] - -There are no notable breaking changes in this release. - -// end::notable-breaking-changes[] diff --git a/libbeat/docs/release-notes/breaking/breaking-8.5.asciidoc b/libbeat/docs/release-notes/breaking/breaking-8.5.asciidoc deleted file mode 100644 index 35141ecfb9c..00000000000 --- a/libbeat/docs/release-notes/breaking/breaking-8.5.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -[[breaking-changes-8.5]] - -=== Breaking changes in 8.5 -++++ -8.5 -++++ - -See the <> for a complete list of breaking changes, -bug fixes, and enhancements, including changes to beta or experimental -functionality. - -//NOTE: The notable-breaking-changes tagged regions are re-used in the -//Installation and Upgrade Guide - -//tag::notable-breaking-changes[] - -There are no notable breaking changes in this release. - -// end::notable-breaking-changes[] diff --git a/libbeat/docs/release-notes/breaking/breaking.asciidoc b/libbeat/docs/release-notes/breaking/breaking.asciidoc index bdf4bd8cef0..b66dc6008c7 100644 --- a/libbeat/docs/release-notes/breaking/breaking.asciidoc +++ b/libbeat/docs/release-notes/breaking/breaking.asciidoc @@ -9,23 +9,10 @@ changes, but there are breaking changes between major versions (e.g. 7.x to 8.y). Migrating directly between non consecutive major versions (e.g. 6.x to 8.x) is not recommended. -See the following topics for a description of breaking changes: +See the following topics for a description of breaking changes between major versions: -* <> -* <> -* <> -* <> -* <> * <> -include::breaking-8.5.asciidoc[] - -include::breaking-8.4.asciidoc[] - -include::breaking-8.3.asciidoc[] - -include::breaking-8.2.asciidoc[] - -include::breaking-8.1.asciidoc[] +For breaking changes between minor versions, see the <>. include::breaking-8.0.asciidoc[] diff --git a/libbeat/docs/release-notes/redirects.asciidoc b/libbeat/docs/release-notes/redirects.asciidoc index fd5c8316c64..cd6585651ce 100644 --- a/libbeat/docs/release-notes/redirects.asciidoc +++ b/libbeat/docs/release-notes/redirects.asciidoc @@ -3,3 +3,33 @@ This page has moved. Please see {observability-guide}/whats-new.html[What's new in Observability {minor-version}]. + +[role="exclude",id="breaking-changes-8.5"] +== Breaking changes in 8.5 + +This page no longer exists. +There are no notable breaking changes for 8.5. + +[role="exclude",id="breaking-changes-8.4"] +== Breaking changes in 8.4 + +This page no longer exists. +There are no notable breaking changes for 8.4. + +[role="exclude",id="breaking-changes-8.3"] +== Breaking changes in 8.3 + +This page no longer exists. +There are no notable breaking changes for 8.3. + +[role="exclude",id="breaking-changes-8.2"] +== Breaking changes in 8.2 + +This page no longer exists. +There are no notable breaking changes for 8.2. + +[role="exclude",id="breaking-changes-8.1"] +== Breaking changes in 8.1 + +This page no longer exists. +There are no notable breaking changes for 8.1. diff --git a/libbeat/docs/release.asciidoc b/libbeat/docs/release.asciidoc index f242bd0fb83..28a6af8b08d 100644 --- a/libbeat/docs/release.asciidoc +++ b/libbeat/docs/release.asciidoc @@ -8,6 +8,8 @@ This section summarizes the changes in each release. Also read <> for more detail about changes that affect upgrade. +* <> +* <> * <> * <> * <> diff --git a/libbeat/docs/version.asciidoc b/libbeat/docs/version.asciidoc index a73f993a283..065007266fd 100644 --- a/libbeat/docs/version.asciidoc +++ b/libbeat/docs/version.asciidoc @@ -1,6 +1,6 @@ -:stack-version: 8.9.0 -:doc-branch: master -:go-version: 1.19.10 +:stack-version: 8.10.0 +:doc-branch: main +:go-version: 1.19.12 :release-state: unreleased :python: 3.7 :docker: 1.12 diff --git a/libbeat/magefile.go b/libbeat/magefile.go index 2c3ae40f7b8..5c4b7ada17e 100644 --- a/libbeat/magefile.go +++ b/libbeat/magefile.go @@ -75,7 +75,7 @@ func IntegTest() { // GoIntegTest starts the docker containers and executes the Go integration tests. func GoIntegTest(ctx context.Context) error { - mg.Deps(Fields) + mg.Deps(Fields, devtools.BuildSystemTestBinary) args := devtools.DefaultGoTestIntegrationFromHostArgs() // ES_USER must be admin in order for the Go Integration tests to function because they require // indices:data/read/search diff --git a/libbeat/monitoring/inputmon/httphandler.go b/libbeat/monitoring/inputmon/httphandler.go index 46368ee5f79..d4a8d9e4499 100644 --- a/libbeat/monitoring/inputmon/httphandler.go +++ b/libbeat/monitoring/inputmon/httphandler.go @@ -64,7 +64,14 @@ func (h *handler) allInputs(w http.ResponseWriter, req *http.Request) { return } - metrics := monitoring.CollectStructSnapshot(h.registry, monitoring.Full, false) + filtered := filteredSnapshot(h.registry, requestedType) + + w.Header().Set(contentType, applicationJSON) + serveJSON(w, filtered, requestedPretty) +} + +func filteredSnapshot(r *monitoring.Registry, requestedType string) []map[string]any { + metrics := monitoring.CollectStructSnapshot(r, monitoring.Full, false) filtered := make([]map[string]any, 0, len(metrics)) for _, ifc := range metrics { @@ -84,9 +91,7 @@ func (h *handler) allInputs(w http.ResponseWriter, req *http.Request) { filtered = append(filtered, m) } - - w.Header().Set(contentType, applicationJSON) - serveJSON(w, filtered, requestedPretty) + return filtered } func serveJSON(w http.ResponseWriter, value any, pretty bool) { diff --git a/libbeat/monitoring/inputmon/input.go b/libbeat/monitoring/inputmon/input.go index 39d51d94cbe..7814f79234f 100644 --- a/libbeat/monitoring/inputmon/input.go +++ b/libbeat/monitoring/inputmon/input.go @@ -18,6 +18,7 @@ package inputmon import ( + "encoding/json" "strings" "github.com/google/uuid" @@ -78,3 +79,9 @@ func sanitizeID(id string) string { func globalRegistry() *monitoring.Registry { return monitoring.GetNamespace("dataset").GetRegistry() } + +// MetricSnapshotJSON returns a snapshot of the input metric values from the +// global 'dataset' monitoring namespace encoded as a JSON array (pretty formatted). +func MetricSnapshotJSON() ([]byte, error) { + return json.MarshalIndent(filteredSnapshot(globalRegistry(), ""), "", " ") +} diff --git a/libbeat/monitoring/inputmon/input_test.go b/libbeat/monitoring/inputmon/input_test.go index 011113fb543..e74c463bee2 100644 --- a/libbeat/monitoring/inputmon/input_test.go +++ b/libbeat/monitoring/inputmon/input_test.go @@ -22,6 +22,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent-libs/monitoring" ) @@ -79,3 +80,27 @@ func TestNewInputMonitor(t *testing.T) { }) } } + +func TestMetricSnapshotJSON(t *testing.T) { + require.NoError(t, globalRegistry().Clear()) + t.Cleanup(func() { + require.NoError(t, globalRegistry().Clear()) + }) + + r, cancel := NewInputRegistry("test", "my-id", nil) + defer cancel() + monitoring.NewInt(r, "foo_total").Set(100) + + jsonBytes, err := MetricSnapshotJSON() + require.NoError(t, err) + + const expected = `[ + { + "foo_total": 100, + "id": "my-id", + "input": "test" + } +]` + + assert.Equal(t, expected, string(jsonBytes)) +} diff --git a/libbeat/processors/actions/decode_json_fields.go b/libbeat/processors/actions/decode_json_fields.go index 8dcc39e0dbc..b47ebd646d9 100644 --- a/libbeat/processors/actions/decode_json_fields.go +++ b/libbeat/processors/actions/decode_json_fields.go @@ -122,11 +122,7 @@ func (f *decodeJSONFields) Run(event *beat.Event) (*beat.Event, error) { if err != nil { f.logger.Debugf("Error trying to unmarshal %s", text) errs = append(errs, err.Error()) - event.SetErrorWithOption(mapstr.M{ - "message": "parsing input as JSON: " + err.Error(), - "data": text, - "field": field, - }, f.addErrorKey) + event.SetErrorWithOption(fmt.Sprintf("parsing input as JSON: %s", err.Error()), f.addErrorKey, text, field) continue } diff --git a/libbeat/processors/add_cloud_metadata/provider_aws_ec2_test.go b/libbeat/processors/add_cloud_metadata/provider_aws_ec2_test.go index a66ac00546c..92e3ae7ec2e 100644 --- a/libbeat/processors/add_cloud_metadata/provider_aws_ec2_test.go +++ b/libbeat/processors/add_cloud_metadata/provider_aws_ec2_test.go @@ -310,7 +310,7 @@ func TestRetrieveAWSMetadataEC2(t *testing.T) { Tags: []types.TagDescription{}, }, nil }, - processorOverwrite: false, + processorOverwrite: true, previousEvent: mapstr.M{ "cloud.provider": "aws", }, @@ -349,6 +349,7 @@ func TestRetrieveAWSMetadataEC2(t *testing.T) { config, err := conf.NewConfigFrom(map[string]interface{}{ "overwrite": tc.processorOverwrite, + "providers": []string{"aws"}, }) if err != nil { t.Fatalf("error creating config from map: %s", err.Error()) diff --git a/libbeat/reader/parser/parser_test.go b/libbeat/reader/parser/parser_test.go index 50b416a11d4..d49cf3f2fe0 100644 --- a/libbeat/reader/parser/parser_test.go +++ b/libbeat/reader/parser/parser_test.go @@ -367,6 +367,54 @@ func TestJSONParsersWithFields(t *testing.T) { }, }, }, + "JSON post processor with dotted target key": { + message: reader.Message{ + Content: []byte("{\"key\":\"value\"}"), + Fields: mapstr.M{}, + }, + config: map[string]interface{}{ + "parsers": []map[string]interface{}{ + map[string]interface{}{ + "ndjson": map[string]interface{}{ + "target": "kubernetes.audit", + }, + }, + }, + }, + expectedMessage: reader.Message{ + Content: []byte(""), + Fields: mapstr.M{ + "kubernetes": mapstr.M{ + "audit": mapstr.M{ + "key": "value", + }, + }, + }, + }, + }, + "JSON post processor with non-dotted target key": { + message: reader.Message{ + Content: []byte("{\"key\":\"value\"}"), + Fields: mapstr.M{}, + }, + config: map[string]interface{}{ + "parsers": []map[string]interface{}{ + map[string]interface{}{ + "ndjson": map[string]interface{}{ + "target": "kubernetes", + }, + }, + }, + }, + expectedMessage: reader.Message{ + Content: []byte(""), + Fields: mapstr.M{ + "kubernetes": mapstr.M{ + "key": "value", + }, + }, + }, + }, "JSON post processor with document ID": { message: reader.Message{ Content: []byte("{\"key\":\"value\", \"my-id-field\":\"my-id\"}"), diff --git a/heartbeat/monitors/syncpipeclient.go b/libbeat/reader/readfile/fs_metafields_other.go similarity index 57% rename from heartbeat/monitors/syncpipeclient.go rename to libbeat/reader/readfile/fs_metafields_other.go index 461d53293b2..425b7435fe8 100644 --- a/heartbeat/monitors/syncpipeclient.go +++ b/libbeat/reader/readfile/fs_metafields_other.go @@ -15,28 +15,33 @@ // specific language governing permissions and limitations // under the License. -package monitors +//go:build !windows -import "github.com/elastic/beats/v7/libbeat/beat" +package readfile -type SyncPipelineClientAdaptor struct { - C beat.Client -} +import ( + "fmt" + "os" -func (s SyncPipelineClientAdaptor) Publish(event beat.Event) error { - s.C.Publish(event) - return nil -} + "github.com/elastic/beats/v7/libbeat/common/file" + "github.com/elastic/elastic-agent-libs/mapstr" +) -func (s SyncPipelineClientAdaptor) PublishAll(events []beat.Event) error { - s.C.PublishAll(events) - return nil -} +const ( + deviceIDKey = "log.file.device_id" + inodeKey = "log.file.inode" +) -func (s SyncPipelineClientAdaptor) Close() error { - return s.C.Close() -} +func setFileSystemMetadata(fi os.FileInfo, fields mapstr.M) error { + osstate := file.GetOSState(fi) + _, err := fields.Put(deviceIDKey, osstate.Device) + if err != nil { + return fmt.Errorf("failed to set %q: %w", deviceIDKey, err) + } + _, err = fields.Put(inodeKey, osstate.Inode) + if err != nil { + return fmt.Errorf("failed to set %q: %w", inodeKey, err) + } -func (s SyncPipelineClientAdaptor) Wait() { - // intentionally blank, async pipelines should be empty + return nil } diff --git a/libbeat/reader/readfile/fs_metafields_windows.go b/libbeat/reader/readfile/fs_metafields_windows.go new file mode 100644 index 00000000000..113a74cf829 --- /dev/null +++ b/libbeat/reader/readfile/fs_metafields_windows.go @@ -0,0 +1,50 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package readfile + +import ( + "fmt" + "os" + + "github.com/elastic/beats/v7/libbeat/common/file" + "github.com/elastic/elastic-agent-libs/mapstr" +) + +const ( + idxhiKey = "log.file.idxhi" + idxloKey = "log.file.idxlo" + volKey = "log.file.vol" +) + +func setFileSystemMetadata(fi os.FileInfo, fields mapstr.M) error { + osstate := file.GetOSState(fi) + _, err := fields.Put(idxhiKey, osstate.IdxHi) + if err != nil { + return fmt.Errorf("failed to set %q: %w", idxhiKey, err) + } + _, err = fields.Put(idxloKey, osstate.IdxLo) + if err != nil { + return fmt.Errorf("failed to set %q: %w", idxloKey, err) + } + _, err = fields.Put(volKey, osstate.Vol) + if err != nil { + return fmt.Errorf("failed to set %q: %w", volKey, err) + } + + return nil +} diff --git a/libbeat/reader/readfile/metafields.go b/libbeat/reader/readfile/metafields.go index 5117bc6ec9b..c4c41e980f6 100644 --- a/libbeat/reader/readfile/metafields.go +++ b/libbeat/reader/readfile/metafields.go @@ -18,6 +18,9 @@ package readfile import ( + "fmt" + "os" + "github.com/elastic/beats/v7/libbeat/reader" "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -25,15 +28,17 @@ import ( // Reader produces lines by reading lines from an io.Reader // through a decoder converting the reader it's encoding to utf-8. type FileMetaReader struct { - reader reader.Reader - path string - offset int64 + reader reader.Reader + path string + fi os.FileInfo + fingerprint string + offset int64 } // New creates a new Encode reader from input reader by applying // the given codec. -func NewFilemeta(r reader.Reader, path string, offset int64) reader.Reader { - return &FileMetaReader{r, path, offset} +func NewFilemeta(r reader.Reader, path string, fi os.FileInfo, fingerprint string, offset int64) reader.Reader { + return &FileMetaReader{r, path, fi, fingerprint, offset} } // Next reads the next line from it's initial io.Reader @@ -56,6 +61,17 @@ func (r *FileMetaReader) Next() (reader.Message, error) { }, }) + err = setFileSystemMetadata(r.fi, message.Fields) + if err != nil { + return message, fmt.Errorf("failed to set file system metadata: %w", err) + } + + if r.fingerprint != "" { + _, err = message.Fields.Put("log.file.fingerprint", r.fingerprint) + if err != nil { + return message, fmt.Errorf("failed to set fingerprint: %w", err) + } + } r.offset += int64(message.Bytes) return message, err diff --git a/libbeat/reader/readfile/metafields_other_test.go b/libbeat/reader/readfile/metafields_other_test.go new file mode 100644 index 00000000000..7874d24d4ae --- /dev/null +++ b/libbeat/reader/readfile/metafields_other_test.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build !windows + +package readfile + +import ( + "os" + "syscall" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/elastic/elastic-agent-libs/mapstr" +) + +func createTestFileInfo() os.FileInfo { + return testFileInfo{ + name: "filename", + size: 42, + time: time.Now(), + sys: &syscall.Stat_t{Dev: 17, Ino: 999}, + } +} + +func checkFields(t *testing.T, expected, actual mapstr.M) { + t.Helper() + + dev, err := actual.GetValue(deviceIDKey) + require.NoError(t, err) + require.Equal(t, uint64(17), dev) + err = actual.Delete(deviceIDKey) + require.NoError(t, err) + + inode, err := actual.GetValue(inodeKey) + require.NoError(t, err) + require.Equal(t, uint64(999), inode) + err = actual.Delete(inodeKey) + require.NoError(t, err) + + require.Equal(t, expected, actual) +} diff --git a/libbeat/reader/readfile/metafields_test.go b/libbeat/reader/readfile/metafields_test.go index 0bc4a2d72ec..e759368c268 100644 --- a/libbeat/reader/readfile/metafields_test.go +++ b/libbeat/reader/readfile/metafields_test.go @@ -18,8 +18,11 @@ package readfile import ( + "errors" "io" + "os" "testing" + "time" "github.com/stretchr/testify/require" @@ -27,19 +30,19 @@ import ( "github.com/elastic/elastic-agent-libs/mapstr" ) -func TestMetaFieldsOffset(t *testing.T) { +func TestMetaFields(t *testing.T) { messages := []reader.Message{ - reader.Message{ + { Content: []byte("my line"), Bytes: 7, Fields: mapstr.M{}, }, - reader.Message{ + { Content: []byte("my line again"), Bytes: 13, Fields: mapstr.M{}, }, - reader.Message{ + { Content: []byte(""), Bytes: 10, Fields: mapstr.M{}, @@ -48,10 +51,11 @@ func TestMetaFieldsOffset(t *testing.T) { path := "test/path" offset := int64(0) - in := &FileMetaReader{msgReader(messages), path, offset} + + in := &FileMetaReader{msgReader(messages), path, createTestFileInfo(), "hash", offset} for { msg, err := in.Next() - if err == io.EOF { + if errors.Is(err, io.EOF) { break } @@ -60,15 +64,18 @@ func TestMetaFieldsOffset(t *testing.T) { expectedFields = mapstr.M{ "log": mapstr.M{ "file": mapstr.M{ - "path": path, + "path": path, + "fingerprint": "hash", }, "offset": offset, }, } + checkFields(t, expectedFields, msg.Fields) + } else { + require.Equal(t, expectedFields, msg.Fields) } offset += int64(msg.Bytes) - require.Equal(t, expectedFields, msg.Fields) require.Equal(t, offset, in.offset) } } @@ -96,3 +103,17 @@ func (r *messageReader) Next() (reader.Message, error) { func (r *messageReader) Close() error { return nil } + +type testFileInfo struct { + name string + size int64 + time time.Time + sys interface{} +} + +func (t testFileInfo) Name() string { return t.name } +func (t testFileInfo) Size() int64 { return t.size } +func (t testFileInfo) Mode() os.FileMode { return 0 } +func (t testFileInfo) ModTime() time.Time { return t.time } +func (t testFileInfo) IsDir() bool { return false } +func (t testFileInfo) Sys() interface{} { return t.sys } diff --git a/libbeat/reader/readfile/metafields_windows_test.go b/libbeat/reader/readfile/metafields_windows_test.go new file mode 100644 index 00000000000..37ff5cb4bda --- /dev/null +++ b/libbeat/reader/readfile/metafields_windows_test.go @@ -0,0 +1,72 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package readfile + +import ( + "os" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/elastic/elastic-agent-libs/mapstr" +) + +type winTestInfo struct { + testFileInfo + idxhi uint32 + idxlo uint32 + vol uint32 +} + +func createTestFileInfo() os.FileInfo { + return &winTestInfo{ + testFileInfo: testFileInfo{ + name: "filename", + size: 42, + time: time.Now(), + }, + idxhi: 100, + idxlo: 200, + vol: 300, + } +} + +func checkFields(t *testing.T, expected, actual mapstr.M) { + t.Helper() + + idxhi, err := actual.GetValue(idxhiKey) + require.NoError(t, err) + require.Equal(t, uint64(100), idxhi) + err = actual.Delete(idxhiKey) + require.NoError(t, err) + + idxlo, err := actual.GetValue(idxloKey) + require.NoError(t, err) + require.Equal(t, uint64(200), idxlo) + err = actual.Delete(idxloKey) + require.NoError(t, err) + + vol, err := actual.GetValue(volKey) + require.NoError(t, err) + require.Equal(t, uint64(300), vol) + err = actual.Delete(volKey) + require.NoError(t, err) + + require.Equal(t, expected, actual) +} diff --git a/libbeat/reader/readjson/json.go b/libbeat/reader/readjson/json.go index f3690a79bb7..340503ec8f7 100644 --- a/libbeat/reader/readjson/json.go +++ b/libbeat/reader/readjson/json.go @@ -195,7 +195,9 @@ func (p *JSONParser) Next() (reader.Message, error) { message.Fields = event.Fields message.Meta = event.Meta } else { - message.AddFields(mapstr.M{p.target: jsonFields}) + fields := mapstr.M{} + fields.Put(p.target, jsonFields) + message.AddFields(fields) } return message, err diff --git a/libbeat/reader/syslog/rfc3164_test.go b/libbeat/reader/syslog/rfc3164_test.go index 2f14fd9e22b..d1c75fe574e 100644 --- a/libbeat/reader/syslog/rfc3164_test.go +++ b/libbeat/reader/syslog/rfc3164_test.go @@ -156,7 +156,7 @@ func TestParseRFC3164(t *testing.T) { hostname: "test-host", msg: "this is the message", }, - wantErr: `validation error at position 5: parsing time "24-08-2003T05:14:15-07:00" as "2006-01-02T15:04:05.999999999Z07:00": cannot parse "8-2003T05:14:15-07:00" as "2006"`, + wantErr: `validation error at position 5: parsing time "24-08-2003T05:14:15-07:00" as "2006-01-02T15:04:05.999999999Z07:00": cannot parse "24-08-2003T05:14:15-07:00" as "2006"`, }, "err-eof": { in: "<13>Oct 11 22:14:15 test-", diff --git a/libbeat/reader/syslog/rfc5424_test.go b/libbeat/reader/syslog/rfc5424_test.go index 539278a8c69..47cb9ee0e36 100644 --- a/libbeat/reader/syslog/rfc5424_test.go +++ b/libbeat/reader/syslog/rfc5424_test.go @@ -153,7 +153,7 @@ func TestParseRFC5424(t *testing.T) { msgID: "ID47", rawSDValue: `[exampleSDID@32473 iut="3" eventSource="Application" eventID="1011"][examplePriority@32473 class="high"]`, }, - wantErr: `validation error at position 8: parsing time "10-11-2003T22:14:15.003Z" as "2006-01-02T15:04:05.999999999Z07:00": cannot parse "1-2003T22:14:15.003Z" as "2006"`, + wantErr: "validation error at position 8: parsing time \"10-11-2003T22:14:15.003Z\" as \"2006-01-02T15:04:05.999999999Z07:00\": cannot parse \"10-11-2003T22:14:15.003Z\" as \"2006\"", }, "err-eof": { in: `<13>1 2003-08-24T05:14:15.000003-07:00 test-host su 1234 msg-`, diff --git a/libbeat/tests/compose/wrapper.go b/libbeat/tests/compose/wrapper.go index a0c40afd6de..0649488c621 100644 --- a/libbeat/tests/compose/wrapper.go +++ b/libbeat/tests/compose/wrapper.go @@ -36,7 +36,6 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/client" - "github.com/pkg/errors" "github.com/elastic/elastic-agent-autodiscover/docker" ) @@ -149,7 +148,11 @@ func (d *wrapperDriver) LockFile() string { } func (d *wrapperDriver) Close() error { - return errors.Wrap(d.client.Close(), "failed to close wrapper driver") + err := d.client.Close() + if err != nil { + return fmt.Errorf("failed to close wrapper driver: %w", err) + } + return nil } func (d *wrapperDriver) cmd(ctx context.Context, command string, arg ...string) *exec.Cmd { @@ -231,7 +234,7 @@ func writeToContainer(ctx context.Context, cli *client.Client, id, filename, con opts := types.CopyToContainerOptions{} err = cli.CopyToContainer(ctx, id, filepath.Dir(filename), bytes.NewReader(buf.Bytes()), opts) if err != nil { - return errors.Wrapf(err, "failed to copy environment to container %s", id) + return fmt.Errorf("failed to copy environment to container %s: %w", id, err) } return nil } diff --git a/libbeat/tests/integration/base_test.go b/libbeat/tests/integration/base_test.go new file mode 100644 index 00000000000..38bebb59f2c --- /dev/null +++ b/libbeat/tests/integration/base_test.go @@ -0,0 +1,209 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration + +package integration + +import ( + "runtime" + "syscall" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestBase(t *testing.T) { + cfg := ` +mockbeat: +name: +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.console: + code.json: + pretty: true +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + mockbeat.WaitForLogs("mockbeat start running.", 60*time.Second) + mockbeat.Stop() + mockbeat.WaitForLogs("mockbeat stopped.", 30*time.Second) +} + +func TestSigHUP(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("sighup not supported on windows") + } + cfg := ` +mockbeat: +name: +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.console: + code.json: + pretty: true +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + mockbeat.WaitForLogs("mockbeat start running.", 60*time.Second) + err := mockbeat.Process.Signal(syscall.SIGHUP) + require.NoErrorf(t, err, "error sending SIGHUP to mockbeat") + mockbeat.Stop() + mockbeat.WaitForLogs("mockbeat stopped.", 30*time.Second) +} + +func TestNoConfig(t *testing.T) { + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.Start() + procState, err := mockbeat.Process.Wait() + require.NoError(t, err, "error waiting for mockbeat to exit") + require.Equal(t, 1, procState.ExitCode(), "incorrect exit code") + mockbeat.WaitStdErrContains("error loading config file", 10*time.Second) +} + +func TestInvalidConfig(t *testing.T) { + cfg := ` +test: + test were + : invalid yml +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + procState, err := mockbeat.Process.Wait() + require.NoError(t, err, "error waiting for mockbeat to exit") + require.Equal(t, 1, procState.ExitCode(), "incorrect exit code") + mockbeat.WaitStdErrContains("error loading config file", 10*time.Second) +} + +func TestInvalidCLI(t *testing.T) { + cfg := ` +mockbeat: +name: +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.console: + code.json: + pretty: true +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test", "-d", "config", "-E", "output.console=invalid") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + procState, err := mockbeat.Process.Wait() + require.NoError(t, err, "error waiting for mockbeat to exit") + require.Equal(t, 1, procState.ExitCode(), "incorrect exit code") + mockbeat.WaitStdErrContains("error unpacking config data", 10*time.Second) +} + +func TestConsoleOutput(t *testing.T) { + cfg := ` +mockbeat: +name: +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.console: + code.json: + pretty: false +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test", "-e") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + mockbeat.WaitStdErrContains("mockbeat start running.", 10*time.Second) + mockbeat.WaitStdOutContains("Mockbeat is alive", 10*time.Second) +} + +func TestConsoleBulkMaxSizeOutput(t *testing.T) { + cfg := ` +mockbeat: +name: +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.console: + code.json: + pretty: false + bulk_max_size: 1 +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test", "-e") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + mockbeat.WaitStdErrContains("mockbeat start running.", 10*time.Second) + mockbeat.WaitStdOutContains("Mockbeat is alive", 10*time.Second) +} + +func TestLoggingMetrics(t *testing.T) { + cfg := ` +mockbeat: +name: +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.console: + code.json: + pretty: true +logging: + metrics: + period: 0.1s +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test", "-e") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + mockbeat.WaitStdErrContains("mockbeat start running.", 10*time.Second) + mockbeat.WaitStdErrContains("Non-zero metrics in the last", 10*time.Second) + mockbeat.Stop() + mockbeat.WaitStdErrContains("Total metrics", 10*time.Second) +} + +func TestPersistentUuid(t *testing.T) { + cfg := ` +mockbeat: +name: +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.console: + code.json: + pretty: true +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test", "-e") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + mockbeat.WaitStdErrContains("mockbeat start running.", 10*time.Second) + + metaFile1, err := mockbeat.LoadMeta() + require.NoError(t, err, "error opening meta.json file") + mockbeat.Stop() + mockbeat.WaitStdErrContains("Beat ID: "+metaFile1.UUID.String(), 10*time.Second) + mockbeat.Start() + mockbeat.WaitStdErrContains("mockbeat start running.", 10*time.Second) + metaFile2, err := mockbeat.LoadMeta() + require.Equal(t, metaFile1.UUID.String(), metaFile2.UUID.String()) +} diff --git a/libbeat/tests/integration/ca_pinning_test.go b/libbeat/tests/integration/ca_pinning_test.go new file mode 100644 index 00000000000..51e098885ea --- /dev/null +++ b/libbeat/tests/integration/ca_pinning_test.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration + +package integration + +import ( + "fmt" + "path/filepath" + "testing" + "time" +) + +func TestCAPinningGoodSHA(t *testing.T) { + EnsureESIsRunning(t) + esURL := GetESURL(t, "https") + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + caPath := filepath.Join(mockbeat.TempDir(), "../../../../", "testing", "environments", "docker", "elasticsearch", "pki", "ca", "ca.crt") + cfg := ` +mockbeat: +name: +logging: + level: debug +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.elasticsearch: + hosts: + - %s + username: admin + password: testing + allow_older_versions: true + ssl: + verification_mode: certificate + certificate_authorities: %s + ca_sha256: FDFOtqdUyXZw74YgvAJUC+I67ED1WfcI1qK44Qy2WQM= +` + mockbeat.WriteConfigFile(fmt.Sprintf(cfg, esURL.String(), caPath)) + mockbeat.Start() + mockbeat.WaitForLogs("mockbeat start running.", 60*time.Second) + mockbeat.WaitForLogs("PublishEvents: 1 events have been published", 60*time.Second) +} + +func TestCAPinningBadSHA(t *testing.T) { + EnsureESIsRunning(t) + esURL := GetESURL(t, "https") + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + caPath := filepath.Join(mockbeat.TempDir(), "../../../../", "testing", "environments", "docker", "elasticsearch", "pki", "ca", "ca.crt") + cfg := ` +mockbeat: +name: +logging: + level: debug +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.elasticsearch: + hosts: + - %s + username: admin + password: testing + allow_older_versions: true + ssl: + verification_mode: certificate + certificate_authorities: %s + ca_sha256: bad +` + mockbeat.WriteConfigFile(fmt.Sprintf(cfg, esURL.String(), caPath)) + mockbeat.Start() + mockbeat.WaitForLogs("mockbeat start running.", 60*time.Second) + mockbeat.WaitForLogs("provided CA certificate pins doesn't match any of the certificate authorities used to validate the certificate", 60*time.Second) +} diff --git a/libbeat/tests/integration/cmd_completion_test.go b/libbeat/tests/integration/cmd_completion_test.go new file mode 100644 index 00000000000..d75d3d6b4ab --- /dev/null +++ b/libbeat/tests/integration/cmd_completion_test.go @@ -0,0 +1,55 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration + +package integration + +import ( + "testing" + "time" +) + +func TestShellCompletion(t *testing.T) { + cfg := ` +mockbeat: +name: +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.console: + code.json: + pretty: true +` + tests := map[string]struct { + shell string + expected string + }{ + "bash completion": {shell: "bash", expected: "bash completion for mockbeat"}, + "zsh completion": {shell: "zsh", expected: "#compdef _mockbeat mockbeat"}, + "awesomeshell": {shell: "awesomeshell", expected: "Unknown shell awesomeshell"}, + } + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test", "completion", tc.shell) + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + mockbeat.WaitStdOutContains(tc.expected, 10*time.Second) + }) + } +} diff --git a/libbeat/tests/integration/cmd_export_config_test.go b/libbeat/tests/integration/cmd_export_config_test.go new file mode 100644 index 00000000000..9d5b5b73590 --- /dev/null +++ b/libbeat/tests/integration/cmd_export_config_test.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration + +package integration + +import ( + "testing" + "time" +) + +func TestExportConfig(t *testing.T) { + cfg := ` +mockbeat: +name: +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.console: + code.json: + pretty: true +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test", "export", "config") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + mockbeat.WaitStdOutContains("events: 4096", 10*time.Second) +} + +func TestExportConfigEnvVar(t *testing.T) { + cfg := ` +mockbeat: +name: ${GOOS} +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.console: + code.json: + pretty: true +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test", "export", "config") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + mockbeat.WaitStdOutContains("name: ${GOOS}", 10*time.Second) +} diff --git a/libbeat/tests/integration/cmd_keystore_test.go b/libbeat/tests/integration/cmd_keystore_test.go new file mode 100644 index 00000000000..eb4b697cafa --- /dev/null +++ b/libbeat/tests/integration/cmd_keystore_test.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration + +package integration + +import ( + "fmt" + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +var cfg = ` +mockbeat: +name: +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.console: + code.json: + pretty: true +keystore: + path: %s +` + +func TestKeystoreCreate(t *testing.T) { + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test", "keystore", "create") + keystorePath := filepath.Join(mockbeat.TempDir(), "test.keystore") + mockbeat.WriteConfigFile(fmt.Sprintf(cfg, keystorePath)) + mockbeat.Start() + mockbeat.WaitStdOutContains("Created mockbeat keystore", 10*time.Second) + require.FileExists(t, keystorePath) +} + +func TestKeystoreCreateForce(t *testing.T) { + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test", "keystore", "create", "--force") + keystorePath := filepath.Join(mockbeat.TempDir(), "test.keystore") + mockbeat.WriteConfigFile(fmt.Sprintf(cfg, keystorePath)) + mockbeat.Start() + mockbeat.WaitStdOutContains("Created mockbeat keystore", 10*time.Second) + mockbeat.Stop() + require.FileExists(t, keystorePath) + keystore1, err := os.ReadFile(keystorePath) + require.NoError(t, err) + + mockbeat.Start() + mockbeat.WaitStdOutContains("Created mockbeat keystore", 10*time.Second) + require.FileExists(t, keystorePath) + keystore2, err := os.ReadFile(keystorePath) + require.NoError(t, err) + require.NotEqual(t, keystore1, keystore2, "keystores should be different") +} + +func TestKeystoreRemoveNoKeyNoKeystore(t *testing.T) { + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test", "keystore", "remove", "mykey") + keystorePath := filepath.Join(mockbeat.TempDir(), "test.keystore") + mockbeat.WriteConfigFile(fmt.Sprintf(cfg, keystorePath)) + mockbeat.Start() + mockbeat.WaitStdErrContains("keystore doesn't exist.", 10*time.Second) +} + +func TestKeystoreRemoveNoExistingKey(t *testing.T) { + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + keystorePath := filepath.Join(mockbeat.TempDir(), "test.keystore") + mockbeat.WriteConfigFile(fmt.Sprintf(cfg, keystorePath)) + mockbeat.Start("keystore", "create") + mockbeat.WaitStdOutContains("Created mockbeat keystore", 10*time.Second) + mockbeat.Stop() + + mockbeat.Start("keystore", "remove", "mykey") + mockbeat.WaitStdErrContains("could not find key 'mykey' in the keystore", 10*time.Second) +} + +func TestKeystoreRemoveMultipleExistingKeys(t *testing.T) { + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + keystorePath := filepath.Join(mockbeat.TempDir(), "test.keystore") + mockbeat.WriteConfigFile(fmt.Sprintf(cfg, keystorePath)) + mockbeat.Start("keystore", "create") + mockbeat.WaitStdOutContains("Created mockbeat keystore", 10*time.Second) + mockbeat.Stop() + + mockbeat.Start("keystore", "add", "key1", "--stdin") + fmt.Fprintf(os.Stdin, "pass1") + procState, err := mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + + mockbeat.Start("keystore", "add", "key2", "--stdin") + fmt.Fprintf(os.Stdin, "pass2") + procState, err = mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + + mockbeat.Start("keystore", "add", "key3", "--stdin") + fmt.Fprintf(os.Stdin, "pass3") + procState, err = mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + + mockbeat.Start("keystore", "remove", "key2", "key3") + procState, err = mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + + mockbeat.Start("keystore", "list") + procState, err = mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + mockbeat.WaitStdOutContains("key1", 10*time.Second) +} + +func TestKeystoreList(t *testing.T) { + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + keystorePath := filepath.Join(mockbeat.TempDir(), "test.keystore") + mockbeat.WriteConfigFile(fmt.Sprintf(cfg, keystorePath)) + mockbeat.Start("keystore", "create") + mockbeat.WaitStdOutContains("Created mockbeat keystore", 10*time.Second) + mockbeat.Stop() + + mockbeat.Start("keystore", "add", "key1", "--stdin") + fmt.Fprintf(os.Stdin, "pass1") + procState, err := mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + + mockbeat.Start("keystore", "add", "key2", "--stdin") + fmt.Fprintf(os.Stdin, "pass2") + procState, err = mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + + mockbeat.Start("keystore", "add", "key3", "--stdin") + fmt.Fprintf(os.Stdin, "pass3") + procState, err = mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + + mockbeat.Start("keystore", "list") + procState, err = mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + + mockbeat.WaitStdOutContains("key1", 10*time.Second) + mockbeat.WaitStdOutContains("key2", 10*time.Second) + mockbeat.WaitStdOutContains("key3", 10*time.Second) +} + +func TestKeystoreListEmptyKeystore(t *testing.T) { + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + keystorePath := filepath.Join(mockbeat.TempDir(), "test.keystore") + mockbeat.WriteConfigFile(fmt.Sprintf(cfg, keystorePath)) + mockbeat.Start("keystore", "list") + procState, err := mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") +} + +func TestKeystoreAddSecretFromStdin(t *testing.T) { + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + keystorePath := filepath.Join(mockbeat.TempDir(), "test.keystore") + mockbeat.WriteConfigFile(fmt.Sprintf(cfg, keystorePath)) + + mockbeat.Start("keystore", "create") + procState, err := mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + + mockbeat.Start("keystore", "add", "key1", "--stdin") + fmt.Fprintf(os.Stdin, "pass1") + procState, err = mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") +} + +func TestKeystoreUpdateForce(t *testing.T) { + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + keystorePath := filepath.Join(mockbeat.TempDir(), "test.keystore") + mockbeat.WriteConfigFile(fmt.Sprintf(cfg, keystorePath)) + mockbeat.Start("keystore", "create") + procState, err := mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + + mockbeat.Start("keystore", "add", "key1", "--stdin") + fmt.Fprintf(os.Stdin, "pass1") + procState, err = mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + + mockbeat.Start("keystore", "add", "key1", "--force", "--stdin") + fmt.Fprintf(os.Stdin, "pass2") + procState, err = mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") +} diff --git a/libbeat/tests/integration/cmd_setup_index_management_test.go b/libbeat/tests/integration/cmd_setup_index_management_test.go new file mode 100644 index 00000000000..f099b692445 --- /dev/null +++ b/libbeat/tests/integration/cmd_setup_index_management_test.go @@ -0,0 +1,150 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration + +package integration + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +var IdxMgmtCfg = ` +mockbeat: +name: +logging: + level: debug +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +logging: + level: debug +output.elasticsearch: + hosts: + - %s + username: admin + password: testing + allow_older_versions: true + ` + +func TestSetupIdxMgmt(t *testing.T) { + EnsureESIsRunning(t) + esURL := GetESURL(t, "http") + dataStream := "mockbeat-9.9.9" + policy := "mockbeat" + t.Cleanup(func() { + dsURL, err := FormatDatastreamURL(t, esURL, dataStream) + require.NoError(t, err) + _, _, err = HttpDo(t, http.MethodDelete, dsURL) + require.NoError(t, err) + }) + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(fmt.Sprintf(IdxMgmtCfg, esURL.String())) + mockbeat.Start("setup", "--index-management", "-v", "-e") + procState, err := mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + require.True(t, isTemplateLoaded(t, dataStream)) + require.True(t, isIndexPatternSet(t, "mockbeat-9.9.9")) + require.True(t, isPolicyCreated(t, policy)) +} + +func TestSetupTemplateDisabled(t *testing.T) { + EnsureESIsRunning(t) + dataStream := "mockbeat-9.9.9" + policy := "mockbeat" + esURL := GetESURL(t, "http") + t.Cleanup(func() { + dsURL, err := FormatDatastreamURL(t, esURL, dataStream) + require.NoError(t, err) + _, _, err = HttpDo(t, http.MethodDelete, dsURL) + require.NoError(t, err) + }) + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(fmt.Sprintf(IdxMgmtCfg, esURL.String())) + mockbeat.Start("setup", "--index-management", "-v", "-e") + procState, err := mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + require.True(t, isTemplateLoaded(t, dataStream)) + require.True(t, isIndexPatternSet(t, "mockbeat-9.9.9")) + require.True(t, isPolicyCreated(t, policy)) +} + +func isTemplateLoaded(t *testing.T, dataStream string) bool { + esURL := GetESURL(t, "http") + indexURL, err := FormatIndexTemplateURL(t, esURL, dataStream) + require.NoError(t, err) + status, body, err := HttpDo(t, http.MethodGet, indexURL) + require.NoError(t, err) + require.Equal(t, http.StatusOK, status, "incorrect status code") + + var r IndexTemplateResult + json.Unmarshal(body, &r) + for _, t := range r.IndexTemplates { + if t.Name == dataStream { + return true + } + } + return false +} + +func isIndexPatternSet(t *testing.T, dataStream string) bool { + esURL := GetESURL(t, "http") + indexURL, err := FormatIndexTemplateURL(t, esURL, dataStream) + require.NoError(t, err) + status, body, err := HttpDo(t, http.MethodGet, indexURL) + require.NoError(t, err) + require.Equal(t, http.StatusOK, status, "incorrect status code") + + var r IndexTemplateResult + json.Unmarshal(body, &r) + for _, t := range r.IndexTemplates { + if t.Name == dataStream { + for _, p := range t.IndexTemplate.IndexPatterns { + if p == dataStream { + return true + } + } + } + } + return false +} + +func isPolicyCreated(t *testing.T, policy string) bool { + esURL := GetESURL(t, "http") + policyURL, err := FormatPolicyURL(t, esURL, policy) + require.NoError(t, err) + status, body, err := HttpDo(t, http.MethodGet, policyURL) + require.NoError(t, err) + require.Equal(t, http.StatusOK, status, "incorrect status code") + + if !strings.Contains(string(body), "max_primary_shard_size\":\"50gb") { + return false + } + if !strings.Contains(string(body), "max_age\":\"30d") { + return false + } + return true +} diff --git a/libbeat/tests/integration/cmd_test.go b/libbeat/tests/integration/cmd_test.go new file mode 100644 index 00000000000..08953740014 --- /dev/null +++ b/libbeat/tests/integration/cmd_test.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration + +package integration + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +var CmdTestCfg = ` +mockbeat: +name: +logging: + level: debug +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.elasticsearch: + hosts: + - %s + username: admin + password: testing +` + +func TestCmdTest(t *testing.T) { + esURL := GetESURL(t, "http") + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(fmt.Sprintf(CmdTestCfg, esURL.String())) + mockbeat.Start("test", "config") + procState, err := mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + mockbeat.WaitStdOutContains("Config OK", 10*time.Second) +} + +func TestCmdTestNoConfig(t *testing.T) { + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.Start("test", "config") + procState, err := mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 1, procState.ExitCode(), "incorrect exit code") +} + +func TestCmdTestOutput(t *testing.T) { + esURL := GetESURL(t, "http") + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(fmt.Sprintf(CmdTestCfg, esURL.String())) + mockbeat.Start("test", "output") + procState, err := mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + mockbeat.WaitStdOutContains("parse url... OK", 10*time.Second) + mockbeat.WaitStdOutContains("TLS... WARN secure connection disabled", 10*time.Second) + mockbeat.WaitStdOutContains("talk to server... OK", 10*time.Second) +} + +func TestCmdTestOutputBadHost(t *testing.T) { + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(fmt.Sprintf(CmdTestCfg, "badhost:9200")) + mockbeat.Start("test", "output") + procState, err := mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 1, procState.ExitCode(), "incorrect exit code") + mockbeat.WaitStdOutContains("parse url... OK", 10*time.Second) + mockbeat.WaitStdOutContains("dns lookup... ERROR", 10*time.Second) +} diff --git a/libbeat/tests/integration/cmd_version_test.go b/libbeat/tests/integration/cmd_version_test.go new file mode 100644 index 00000000000..2c078f79c2f --- /dev/null +++ b/libbeat/tests/integration/cmd_version_test.go @@ -0,0 +1,52 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration + +package integration + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestCmdVersion(t *testing.T) { + cfg := ` +mockbeat: +name: +logging: + level: debug +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.console: + code.json: + pretty: true +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start("version") + procState, err := mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + mockbeat.WaitStdOutContains("mockbeat", 10*time.Second) + mockbeat.WaitStdOutContains("version", 10*time.Second) + mockbeat.WaitStdOutContains("9.9.9", 10*time.Second) +} diff --git a/libbeat/tests/integration/dashboard_test.go b/libbeat/tests/integration/dashboard_test.go new file mode 100644 index 00000000000..9eaf9fa5c1c --- /dev/null +++ b/libbeat/tests/integration/dashboard_test.go @@ -0,0 +1,213 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration + +package integration + +import ( + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestDashboardLoadSkip(t *testing.T) { + cfg := ` +mockbeat: +name: +logging: + level: debug +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(cfg) + kURL, _ := GetKibana(t) + esURL := GetESURL(t, "http") + mockbeat.Start("setup", + "--dashboards", + "-E", "setup.dashboards.file="+filepath.Join("./testdata", "testbeat-no-dashboards.zip"), + "-E", "setup.dashboards.beat=testbeat", + "-E", "setup.kibana.protocol=http", + "-E", "setup.kibana.host="+kURL.Hostname(), + "-E", "setup.kibana.port="+kURL.Port(), + "-E", "setup.kibana.username=beats", + "-E", "setup.kibana.password=testing", + "-E", "output.elasticsearch.hosts=['"+esURL.String()+"']", + "-E", "output.elasticsearch.username=admin", + "-E", "output.elasticsearch.password=testing", + "-E", "output.file.enabled=false") + procState, err := mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + mockbeat.WaitStdOutContains("Skipping loading dashboards", 10*time.Second) +} + +func TestDashboardLoad(t *testing.T) { + cfg := ` +mockbeat: +name: +logging: + level: debug +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(cfg) + kURL, _ := GetKibana(t) + esURL := GetESURL(t, "http") + mockbeat.Start("setup", + "--dashboards", + "-E", "setup.dashboards.file="+filepath.Join("./testdata", "testbeat-dashboards.zip"), + "-E", "setup.dashboards.beat=testbeat", + "-E", "setup.kibana.protocol=http", + "-E", "setup.kibana.host="+kURL.Hostname(), + "-E", "setup.kibana.port="+kURL.Port(), + "-E", "setup.kibana.username=beats", + "-E", "setup.kibana.password=testing", + "-E", "output.elasticsearch.hosts=['"+esURL.String()+"']", + "-E", "output.elasticsearch.username=admin", + "-E", "output.elasticsearch.password=testing", + "-E", "output.file.enabled=false") + procState, err := mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + mockbeat.WaitForLogs("Kibana dashboards successfully loaded", 30*time.Second) +} + +func TestDashboardLoadIndexOnly(t *testing.T) { + cfg := ` +mockbeat: +name: +logging: + level: debug +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(cfg) + kURL, _ := GetKibana(t) + esURL := GetESURL(t, "http") + mockbeat.Start("setup", + "--dashboards", + "-E", "setup.dashboards.file="+filepath.Join("./testdata", "testbeat-dashboards.zip"), + "-E", "setup.dashboards.beat=testbeat", + "-E", "setup.dashboards.only_index=true", + "-E", "setup.kibana.protocol=http", + "-E", "setup.kibana.host="+kURL.Hostname(), + "-E", "setup.kibana.port="+kURL.Port(), + "-E", "setup.kibana.username=beats", + "-E", "setup.kibana.password=testing", + "-E", "output.elasticsearch.hosts=['"+esURL.String()+"']", + "-E", "output.elasticsearch.username=admin", + "-E", "output.elasticsearch.password=testing", + "-E", "output.file.enabled=false") + procState, err := mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + mockbeat.WaitForLogs("Kibana dashboards successfully loaded", 30*time.Second) +} + +func TestDashboardExportById(t *testing.T) { + cfg := ` +mockbeat: +name: +logging: + level: debug +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(cfg) + kURL, _ := GetKibana(t) + esURL := GetESURL(t, "http") + mockbeat.Start("setup", + "--dashboards", + "-E", "setup.dashboards.file="+filepath.Join("./testdata", "testbeat-dashboards.zip"), + "-E", "setup.dashboards.beat=testbeat", + "-E", "setup.dashboards.only_index=true", + "-E", "setup.kibana.protocol=http", + "-E", "setup.kibana.host="+kURL.Hostname(), + "-E", "setup.kibana.port="+kURL.Port(), + "-E", "setup.kibana.username=beats", + "-E", "setup.kibana.password=testing", + "-E", "output.elasticsearch.hosts=['"+esURL.String()+"']", + "-E", "output.elasticsearch.username=admin", + "-E", "output.elasticsearch.password=testing", + "-E", "output.file.enabled=false") + procState, err := mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + mockbeat.WaitForLogs("Kibana dashboards successfully loaded", 30*time.Second) + + mockbeat.Start("export", + "dashboard", + "-E", "setup.kibana.protocol=http", + "-E", "setup.kibana.host="+kURL.Hostname(), + "-E", "setup.kibana.port="+kURL.Port(), + "-E", "setup.kibana.username=beats", + "-E", "setup.kibana.password=testing", + "-id", "Metricbeat-system-overview", + "-folder", filepath.Join(mockbeat.TempDir(), "system-overview")) + procState, err = mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + dbPath := filepath.Join(mockbeat.TempDir(), "system-overview", "_meta", "kibana", "8", "dashboard", "Metricbeat-system-overview.json") + require.FileExists(t, dbPath, "dashboard file not exported") + b, err := os.ReadFile(dbPath) + require.NoError(t, err) + require.Contains(t, string(b), "Metricbeat-system-overview") +} + +func TestDashboardExportByUnknownId(t *testing.T) { + cfg := ` +mockbeat: +name: +logging: + level: debug +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(cfg) + kURL, _ := GetKibana(t) + mockbeat.Start("export", + "dashboard", + "-E", "setup.kibana.protocol=http", + "-E", "setup.kibana.host="+kURL.Hostname(), + "-E", "setup.kibana.port="+kURL.Port(), + "-E", "setup.kibana.username=beats", + "-E", "setup.kibana.password=testing", + "-id", "No-such-dashboard", + "-folder", filepath.Join(mockbeat.TempDir(), "system-overview")) + procState, err := mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 1, procState.ExitCode(), "incorrect exit code") +} diff --git a/libbeat/tests/integration/framework.go b/libbeat/tests/integration/framework.go new file mode 100644 index 00000000000..0c91b763257 --- /dev/null +++ b/libbeat/tests/integration/framework.go @@ -0,0 +1,566 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration + +package integration + +import ( + "bufio" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + "sync" + "testing" + "time" + + "github.com/gofrs/uuid" + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/libbeat/common/atomic" +) + +type BeatProc struct { + Args []string + baseArgs []string + Binary string + RestartOnBeatOnExit bool + beatName string + cmdMutex sync.Mutex + configFile string + fullPath string + logFileOffset int64 + t *testing.T + tempDir string + stdout *os.File + stderr *os.File + Process *os.Process +} + +type Meta struct { + UUID uuid.UUID `json:"uuid"` + FirstStart time.Time `json:"first_start"` +} + +type IndexTemplateResult struct { + IndexTemplates []IndexTemplateEntry `json:"index_templates"` +} + +type IndexTemplateEntry struct { + Name string `json:"name"` + IndexTemplate IndexTemplate `json:"index_template"` +} + +type IndexTemplate struct { + IndexPatterns []string `json:"index_patterns"` +} + +type SearchResult struct { + Hits Hits `json:"hits"` +} + +type Hits struct { + Total Total `json:"total"` +} + +type Total struct { + Value int `json:"value"` +} + +// NewBeat createa a new Beat process from the system tests binary. +// It sets some required options like the home path, logging, etc. +// `tempDir` will be used as home and logs directory for the Beat +// `args` will be passed as CLI arguments to the Beat +func NewBeat(t *testing.T, beatName, binary string, args ...string) *BeatProc { + require.FileExistsf(t, binary, "beat binary must exists") + tempDir := createTempDir(t) + configFile := filepath.Join(tempDir, beatName+".yml") + stdoutFile, err := os.Create(filepath.Join(tempDir, "stdout")) + require.NoError(t, err, "error creating stdout file") + stderrFile, err := os.Create(filepath.Join(tempDir, "stderr")) + require.NoError(t, err, "error creating stderr file") + p := BeatProc{ + Binary: binary, + baseArgs: append([]string{ + beatName, + "--systemTest", + "--path.home", tempDir, + "--path.logs", tempDir, + "-E", "logging.to_files=true", + "-E", "logging.files.rotateeverybytes=104857600", // About 100MB + }, args...), + tempDir: tempDir, + beatName: beatName, + configFile: configFile, + t: t, + stdout: stdoutFile, + stderr: stderrFile, + } + return &p +} + +// Start starts the Beat process +// args are extra arguments to be passed to the Beat. +func (b *BeatProc) Start(args ...string) { + t := b.t + fullPath, err := filepath.Abs(b.Binary) + if err != nil { + t.Fatalf("could not get full path from %q, err: %s", b.Binary, err) + } + + b.fullPath = fullPath + b.Args = append(b.baseArgs, args...) + + done := atomic.MakeBool(false) + wg := sync.WaitGroup{} + if b.RestartOnBeatOnExit { + wg.Add(1) + go func() { + defer wg.Done() + for !done.Load() { + b.startBeat() + b.waitBeatToExit() + } + }() + } else { + b.startBeat() + } + + t.Cleanup(func() { + b.cmdMutex.Lock() + // 1. Kill the Beat + if err := b.Process.Signal(os.Interrupt); err != nil { + if !errors.Is(err, os.ErrProcessDone) { + t.Fatalf("could not stop process with PID: %d, err: %s", + b.Process.Pid, err) + } + } + + // Make sure the goroutine restarting the Beat has exited + if b.RestartOnBeatOnExit { + // 2. Set the done flag so the goroutine loop can exit + done.Store(true) + // 3. Release the mutex, keeping it locked + // until now ensures a new process won't + // start. Lock must be released before + // wg.Wait() or there is a possibility of + // deadlock. + b.cmdMutex.Unlock() + // 4. Wait for the goroutine to finish, this helps ensuring + // no other Beat process was started + wg.Wait() + } else { + b.cmdMutex.Unlock() + } + }) +} + +// startBeat starts the Beat process. This method +// does not block nor waits the Beat to finish. +func (b *BeatProc) startBeat() { + b.cmdMutex.Lock() + defer b.cmdMutex.Unlock() + b.stdout.Seek(0, 0) + b.stdout.Truncate(0) + b.stderr.Seek(0, 0) + b.stderr.Truncate(0) + var procAttr os.ProcAttr + procAttr.Files = []*os.File{os.Stdin, b.stdout, b.stderr} + process, err := os.StartProcess(b.fullPath, b.Args, &procAttr) + require.NoError(b.t, err, "error starting beat process") + b.Process = process +} + +// waitBeatToExit blocks until the Beat exits, it returns +// the process' exit code. +// `startBeat` must be called before this method. +func (b *BeatProc) waitBeatToExit() int { + processState, err := b.Process.Wait() + if err != nil { + b.t.Fatalf("error waiting for %q to finish: %s. Exit code: %d", + b.beatName, err, processState.ExitCode()) + } + + return processState.ExitCode() +} + +// Stop stops the Beat process +// Start adds Cleanup function to stop when test ends, only run this if you want to inspect logs after beat shutsdown +func (b *BeatProc) Stop() { + b.cmdMutex.Lock() + defer b.cmdMutex.Unlock() + if err := b.Process.Signal(os.Interrupt); err != nil { + if errors.Is(err, os.ErrProcessDone) { + return + } + b.t.Fatalf("could not stop process with PID: %d, err: %s", b.Process.Pid, err) + } +} + +// LogContains looks for `s` as a substring of every log line, +// it will open the log file on every call, read it until EOF, +// then close it. +func (b *BeatProc) LogContains(s string) bool { + t := b.t + logFile := b.openLogFile() + _, err := logFile.Seek(b.logFileOffset, os.SEEK_SET) + if err != nil { + t.Fatalf("could not set offset for '%s': %s", logFile.Name(), err) + } + + defer func() { + if err := logFile.Close(); err != nil { + // That's not quite a test error, but it can impact + // next executions of LogContains, so treat it as an error + t.Errorf("could not close log file: %s", err) + } + }() + + r := bufio.NewReader(logFile) + for { + data, err := r.ReadBytes('\n') + line := string(data) + b.logFileOffset += int64(len(line)) + + if err != nil { + if err != io.EOF { + t.Fatalf("error reading log file '%s': %s", logFile.Name(), err) + } + break + } + + if strings.Contains(line, s) { + return true + } + } + + return false +} + +// WaitForLogs waits for the specified string s to be present in the logs within +// the given timeout duration and fails the test if s is not found. +// msgAndArgs should be a format string and arguments that will be printed +// if the logs are not found, providing additional context for debugging. +func (b *BeatProc) WaitForLogs(s string, timeout time.Duration, msgAndArgs ...any) { + b.t.Helper() + require.Eventually(b.t, func() bool { + return b.LogContains(s) + }, timeout, 100*time.Millisecond, msgAndArgs...) +} + +// TempDir returns the temporary directory +// used by that Beat, on a successful test, +// the directory is automatically removed. +// On failure, the temporary directory is kept. +func (b *BeatProc) TempDir() string { + return b.tempDir +} + +// WriteConfigFile writes the provided configuration string cfg to a file. +// This file will be used as the configuration file for the Beat. +func (b *BeatProc) WriteConfigFile(cfg string) { + if err := os.WriteFile(b.configFile, []byte(cfg), 0o644); err != nil { + b.t.Fatalf("cannot create config file '%s': %s", b.configFile, err) + } + + b.Args = append(b.Args, "-c", b.configFile) + b.baseArgs = append(b.baseArgs, "-c", b.configFile) +} + +// openLogFile opens the log file for reading and returns it. +// It also registers a cleanup function to close the file +// when the test ends. +func (b *BeatProc) openLogFile() *os.File { + t := b.t + glob := fmt.Sprintf("%s-*.ndjson", filepath.Join(b.tempDir, b.beatName)) + files, err := filepath.Glob(glob) + if err != nil { + t.Fatalf("could not expand log file glob: %s", err) + } + + require.Eventually(t, func() bool { + files, err = filepath.Glob(glob) + if err != nil { + t.Fatalf("could not expand log file glob: %s", err) + } + return len(files) == 1 + }, 5*time.Second, 100*time.Millisecond, + "waiting for log file matching glob '%s' to be created", glob) + + // On a normal operation there must be a single log, if there are more + // than one, then there is an issue and the Beat is logging too much, + // which is enough to stop the test + if len(files) != 1 { + t.Fatalf("there must be only one log file for %s, found: %d", + glob, len(files)) + } + + f, err := os.Open(files[0]) + if err != nil { + t.Fatalf("could not open log file '%s': %s", files[0], err) + } + + return f +} + +// createTempDir creates a temporary directory that will be +// removed after the tests passes. +// +// If the test fails, the temporary directory is not removed. +// +// If the tests are run with -v, the temporary directory will +// be logged. +func createTempDir(t *testing.T) string { + tempDir, err := filepath.Abs(filepath.Join("../../build/integration-tests/", + fmt.Sprintf("%s-%d", t.Name(), time.Now().Unix()))) + if err != nil { + t.Fatal(err) + } + + if err := os.MkdirAll(tempDir, 0o766); err != nil { + t.Fatalf("cannot create tmp dir: %s, msg: %s", err, err.Error()) + } + + cleanup := func() { + if !t.Failed() { + if err := os.RemoveAll(tempDir); err != nil { + t.Errorf("could not remove temp dir '%s': %s", tempDir, err) + } + } else { + t.Logf("Temporary directory saved: %s", tempDir) + } + } + t.Cleanup(cleanup) + + return tempDir +} + +// EnsureESIsRunning ensures Elasticsearch is running and is reachable +// using the default test credentials or the corresponding environment +// variables. +func EnsureESIsRunning(t *testing.T) { + esURL := GetESURL(t, "http") + + ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(500*time.Second)) + defer cancel() + req, err := http.NewRequestWithContext(ctx, http.MethodGet, esURL.String(), nil) + if err != nil { + t.Fatalf("cannot create request to ensure ES is running: %s", err) + } + + u := esURL.User.Username() + p, _ := esURL.User.Password() + req.SetBasicAuth(u, p) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + // If you're reading this message, you probably forgot to start ES + // run `mage compose:Up` from Filebeat's folder to start all + // containers required for integration tests + t.Fatalf("cannot execute HTTP request to ES: '%s', check to make sure ES is running (mage compose:Up)", err) + } + if resp.StatusCode != http.StatusOK { + t.Errorf("unexpected HTTP status: %d, expecting 200 - OK", resp.StatusCode) + } +} + +func (b *BeatProc) FileContains(filename string, match string) string { + file, err := os.Open(filename) + require.NoErrorf(b.t, err, "error opening: %s", filename) + r := bufio.NewReader(file) + for { + line, err := r.ReadString('\n') + if err != nil { + if err != io.EOF { + b.t.Fatalf("error reading log file '%s': %s", file.Name(), err) + } + break + } + if strings.Contains(line, match) { + return line + } + } + return "" +} + +func (b *BeatProc) WaitFileContains(filename string, match string, waitFor time.Duration) string { + var returnValue string + require.Eventuallyf(b.t, + func() bool { + returnValue = b.FileContains(filename, match) + return returnValue != "" + }, waitFor, 100*time.Millisecond, "match string '%s' not found in %s", match, filename) + + return returnValue +} + +func (b *BeatProc) WaitStdErrContains(match string, waitFor time.Duration) string { + return b.WaitFileContains(b.stderr.Name(), match, waitFor) +} + +func (b *BeatProc) WaitStdOutContains(match string, waitFor time.Duration) string { + return b.WaitFileContains(b.stdout.Name(), match, waitFor) +} + +func (b *BeatProc) LoadMeta() (Meta, error) { + m := Meta{} + metaFile, err := os.Open(filepath.Join(b.TempDir(), "data", "meta.json")) + if err != nil { + return m, err + } + defer metaFile.Close() + + metaBytes, err := ioutil.ReadAll(metaFile) + require.NoError(b.t, err, "error reading meta file") + err = json.Unmarshal(metaBytes, &m) + require.NoError(b.t, err, "error unmarshalling meta data") + return m, nil +} + +func GetESURL(t *testing.T, scheme string) url.URL { + t.Helper() + + esHost := os.Getenv("ES_HOST") + if esHost == "" { + esHost = "localhost" + } + + esPort := os.Getenv("ES_PORT") + if esPort == "" { + switch scheme { + case "http": + esPort = "9200" + case "https": + esPort = "9201" + default: + t.Fatalf("could not determine port from env variable: ES_PORT=%s", esPort) + } + } + + user := os.Getenv("ES_USER") + if user == "" { + user = "admin" + } + + pass := os.Getenv("ES_PASS") + if pass == "" { + pass = "testing" + } + + esURL := url.URL{ + Scheme: scheme, + Host: fmt.Sprintf("%s:%s", esHost, esPort), + User: url.UserPassword(user, pass), + } + return esURL +} + +func GetKibana(t *testing.T) (url.URL, *url.Userinfo) { + t.Helper() + + kibanaHost := os.Getenv("KIBANA_HOST") + if kibanaHost == "" { + kibanaHost = "localhost" + } + + kibanaPort := os.Getenv("KIBANA_PORT") + if kibanaPort == "" { + kibanaPort = "5601" + } + + kibanaURL := url.URL{ + Scheme: "http", + Host: fmt.Sprintf("%s:%s", kibanaHost, kibanaPort), + } + kibanaUser := url.UserPassword("beats", "testing") + return kibanaURL, kibanaUser +} + +func HttpDo(t *testing.T, method string, targetURL url.URL) (statusCode int, body []byte, err error) { + t.Helper() + client := &http.Client{} + req, err := http.NewRequest(method, targetURL.String(), nil) + if err != nil { + return 0, nil, fmt.Errorf("error making request, method: %s, url: %s, error: %w", method, targetURL.String(), err) + } + + resp, err := client.Do(req) + if err != nil { + return 0, nil, fmt.Errorf("error doing request, method: %s, url: %s, error: %w", method, targetURL.String(), err) + } + defer resp.Body.Close() + body, err = io.ReadAll(resp.Body) + + if err != nil { + return resp.StatusCode, nil, fmt.Errorf("error reading request, method: %s, url: %s, status code: %d", method, targetURL.String(), resp.StatusCode) + } + return resp.StatusCode, body, nil +} + +func FormatDatastreamURL(t *testing.T, srcURL url.URL, dataStream string) (url.URL, error) { + t.Helper() + path, err := url.JoinPath("/_data_stream", dataStream) + if err != nil { + return url.URL{}, fmt.Errorf("error joining data_stream path: %w", err) + } + srcURL.Path = path + return srcURL, nil +} + +func FormatIndexTemplateURL(t *testing.T, srcURL url.URL, template string) (url.URL, error) { + t.Helper() + path, err := url.JoinPath("/_index_template", template) + if err != nil { + return url.URL{}, fmt.Errorf("error joining index_template path: %w", err) + } + srcURL.Path = path + return srcURL, nil +} + +func FormatPolicyURL(t *testing.T, srcURL url.URL, policy string) (url.URL, error) { + t.Helper() + path, err := url.JoinPath("/_ilm/policy", policy) + if err != nil { + return url.URL{}, fmt.Errorf("error joining ilm policy path: %w", err) + } + srcURL.Path = path + return srcURL, nil +} + +func FormatRefreshURL(t *testing.T, srcURL url.URL) url.URL { + t.Helper() + srcURL.Path = "/_refresh" + return srcURL +} + +func FormatDataStreamSearchURL(t *testing.T, srcURL url.URL, dataStream string) (url.URL, error) { + t.Helper() + path, err := url.JoinPath("/", dataStream, "_search") + if err != nil { + return url.URL{}, fmt.Errorf("error joining ilm policy path: %w", err) + } + srcURL.Path = path + return srcURL, nil +} diff --git a/libbeat/tests/integration/http_test.go b/libbeat/tests/integration/http_test.go new file mode 100644 index 00000000000..bb2f7bde924 --- /dev/null +++ b/libbeat/tests/integration/http_test.go @@ -0,0 +1,150 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration + +package integration + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +type Stats struct { + Libbeat Libbeat `json:"libbeat"` +} + +type Libbeat struct { + Config Config `json:"config"` +} + +type Config struct { + Scans int `json:"scans"` +} + +func TestHttpRoot(t *testing.T) { + cfg := ` +mockbeat: +name: +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.console: + code.json: + pretty: false +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test", "-E", "http.enabled=true") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + mockbeat.WaitForLogs("Starting stats endpoint", 60*time.Second) + + r, err := http.Get("http://localhost:5066") + require.NoError(t, err) + require.Equal(t, http.StatusOK, r.StatusCode, "incorrect status code") + + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + var m map[string]interface{} + err = json.Unmarshal(body, &m) + + require.NoError(t, err) + require.Equal(t, "mockbeat", m["beat"]) + require.Equal(t, "9.9.9", m["version"]) +} + +func TestHttpStats(t *testing.T) { + cfg := ` +mockbeat: +name: +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.console: + code.json: + pretty: false +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test", "-E", "http.enabled=true") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + mockbeat.WaitForLogs("Starting stats endpoint", 60*time.Second) + + r, err := http.Get("http://localhost:5066/stats") + require.NoError(t, err) + require.Equal(t, http.StatusOK, r.StatusCode, "incorrect status code") + + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + var m Stats + + // Setting the value to 1 to make sure 'body' does have 0 in it + m.Libbeat.Config.Scans = 1 + err = json.Unmarshal(body, &m) + + require.NoError(t, err) + require.Equal(t, 0, m.Libbeat.Config.Scans) +} + +func TestHttpError(t *testing.T) { + cfg := ` +mockbeat: +name: +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.console: + code.json: + pretty: false +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test", "-E", "http.enabled=true") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + mockbeat.WaitForLogs("Starting stats endpoint", 60*time.Second) + + r, err := http.Get("http://localhost:5066/not-exist") + require.NoError(t, err) + require.Equal(t, http.StatusNotFound, r.StatusCode, "incorrect status code") +} + +func TestHttpPProfDisabled(t *testing.T) { + cfg := ` +mockbeat: +name: +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.console: + code.json: + pretty: false +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test", "-E", "http.enabled=true") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + mockbeat.WaitForLogs("Starting stats endpoint", 60*time.Second) + + r, err := http.Get("http://localhost:5066/debug/pprof/") + require.NoError(t, err) + require.Equal(t, http.StatusNotFound, r.StatusCode, "incorrect status code") +} diff --git a/libbeat/tests/integration/logging_test.go b/libbeat/tests/integration/logging_test.go new file mode 100644 index 00000000000..3e7b6fdb83a --- /dev/null +++ b/libbeat/tests/integration/logging_test.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration + +package integration + +import ( + "encoding/json" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLoggingConsoleECS(t *testing.T) { + cfg := ` +mockbeat: +name: +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.console: + code.json: + pretty: false +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test", "-e") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + line := mockbeat.WaitStdErrContains("ecs.version", 60*time.Second) + + var m map[string]any + require.NoError(t, json.Unmarshal([]byte(line), &m), "Unmarshaling log line as json") + + _, ok := m["log.level"] + assert.True(t, ok) + + _, ok = m["@timestamp"] + assert.True(t, ok) + + _, ok = m["message"] + assert.True(t, ok) +} + +func TestLoggingFileDefault(t *testing.T) { + cfg := ` +mockbeat: +name: +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.console: + code.json: + pretty: false +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + mockbeat.WaitStdOutContains("Mockbeat is alive!", 60*time.Second) +} diff --git a/libbeat/tests/integration/meta_test.go b/libbeat/tests/integration/meta_test.go new file mode 100644 index 00000000000..e4fc44afb4c --- /dev/null +++ b/libbeat/tests/integration/meta_test.go @@ -0,0 +1,69 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration + +package integration + +import ( + "os" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestMetaFileExists(t *testing.T) { + cfg := ` +mockbeat: +name: +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.console: + code.json: + pretty: false +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + mockbeat.WaitForLogs("mockbeat start running.", 60*time.Second) + _, err := os.Stat(mockbeat.TempDir() + "/data/meta.json") + require.NoError(t, err) +} + +func TestMetaFilePermissions(t *testing.T) { + cfg := ` +mockbeat: +name: +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.console: + code.json: + pretty: false +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + mockbeat.WaitForLogs("mockbeat start running.", 60*time.Second) + stat, err := os.Stat(mockbeat.TempDir() + "/data/meta.json") + require.NoError(t, err) + require.Equal(t, stat.Mode().String(), "-rw-------") +} diff --git a/libbeat/tests/integration/mockserver.go b/libbeat/tests/integration/mockserver.go new file mode 100644 index 00000000000..0a396cb7839 --- /dev/null +++ b/libbeat/tests/integration/mockserver.go @@ -0,0 +1,137 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package integration + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/types/known/structpb" + + "github.com/elastic/beats/v7/libbeat/version" + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent-client/v7/pkg/client/mock" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" +) + +// unitKey is used to identify a unique unit in a map +// the `ID` of a unit in itself is not unique without its type, only `Type` + `ID` is unique +type unitKey struct { + Type client.UnitType + ID string +} + +// NewMockServer creates a GRPC server to mock the Elastic-Agent. +// On the first check in call it will send the first element of `unit` +// as the expected unit, on successive calls, if the Beat has reached +// that state, it will move on to sending the next state. +// It will also validate the features. +// +// if `observedCallback` is not nil, it will be called on every +// check in receiving the `proto.CheckinObserved` sent by the +// Beat and index from `units` that was last sent to the Beat. +// +// If `delay` is not zero, when the Beat state matches the last +// sent units, the server will wait for `delay` before sending the +// the next state. This will block the check in call from the Beat. +func NewMockServer( + units [][]*proto.UnitExpected, + featuresIdxs []uint64, + features []*proto.Features, + observedCallback func(*proto.CheckinObserved, int), + delay time.Duration, +) *mock.StubServerV2 { + i := 0 + agentInfo := &proto.CheckinAgentInfo{ + Id: "elastic-agent-id", + Version: version.GetDefaultVersion(), + Snapshot: true, + } + return &mock.StubServerV2{ + CheckinV2Impl: func(observed *proto.CheckinObserved) *proto.CheckinExpected { + if observedCallback != nil { + observedCallback(observed, i) + } + matches := doesStateMatch(observed, units[i], featuresIdxs[i]) + if !matches { + // send same set of units and features + return &proto.CheckinExpected{ + AgentInfo: agentInfo, + Units: units[i], + Features: features[i], + FeaturesIdx: featuresIdxs[i], + } + } + // delay sending next expected based on delay + if delay > 0 { + <-time.After(delay) + } + // send next set of units and features + i += 1 + if i >= len(units) { + // stay on last index + i = len(units) - 1 + } + return &proto.CheckinExpected{ + AgentInfo: agentInfo, + Units: units[i], + Features: features[i], + FeaturesIdx: featuresIdxs[i], + } + }, + ActionImpl: func(response *proto.ActionResponse) error { + // actions not tested here + return nil + }, + ActionsChan: make(chan *mock.PerformAction, 100), + } +} + +func doesStateMatch( + observed *proto.CheckinObserved, + expectedUnits []*proto.UnitExpected, + expectedFeaturesIdx uint64, +) bool { + if len(observed.Units) != len(expectedUnits) { + return false + } + expectedMap := make(map[unitKey]*proto.UnitExpected) + for _, exp := range expectedUnits { + expectedMap[unitKey{client.UnitType(exp.Type), exp.Id}] = exp + } + for _, unit := range observed.Units { + exp, ok := expectedMap[unitKey{client.UnitType(unit.Type), unit.Id}] + if !ok { + return false + } + if unit.State != exp.State || unit.ConfigStateIdx != exp.ConfigStateIdx { + return false + } + } + + return observed.FeaturesIdx == expectedFeaturesIdx +} + +func RequireNewStruct(t *testing.T, v map[string]interface{}) *structpb.Struct { + str, err := structpb.NewStruct(v) + if err != nil { + require.NoError(t, err, "could not convert map[string]interface{} into structpb") + } + return str +} diff --git a/libbeat/tests/integration/template_test.go b/libbeat/tests/integration/template_test.go new file mode 100644 index 00000000000..2066aa0efcc --- /dev/null +++ b/libbeat/tests/integration/template_test.go @@ -0,0 +1,640 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration + +package integration + +import ( + "encoding/json" + "fmt" + "net/http" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// Test that beat stops in case elasticsearch index is modified and pattern not +func TestIndexModified(t *testing.T) { + mockbeatConfigWithIndex := ` +mockbeat: +output: + elasticsearch: + index: test +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(mockbeatConfigWithIndex) + mockbeat.Start() + procState, err := mockbeat.Process.Wait() + require.NoError(t, err, "error waiting for mockbeat to exit") + require.Equal(t, 1, procState.ExitCode(), "incorrect exit code") + mockbeat.WaitStdErrContains("setup.template.name and setup.template.pattern have to be set if index name is modified", 60*time.Second) +} + +// Test that beat starts running if elasticsearch output is set +func TestIndexNotModified(t *testing.T) { + EnsureESIsRunning(t) + mockbeatConfigWithES := ` +mockbeat: +output: + elasticsearch: + hosts: %s +` + esUrl := GetESURL(t, "http") + cfg := fmt.Sprintf(mockbeatConfigWithES, esUrl.String()) + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + mockbeat.WaitForLogs("mockbeat start running.", 60*time.Second) +} + +// Test that beat stops in case elasticsearch index is modified and pattern not +func TestIndexModifiedNoPattern(t *testing.T) { + cfg := ` +mockbeat: +output: + elasticsearch: + index: test +setup.template: + name: test +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + procState, err := mockbeat.Process.Wait() + require.NoError(t, err, "error waiting for mockbeat to exit") + require.Equal(t, 1, procState.ExitCode(), "incorrect exit code") + mockbeat.WaitStdErrContains("setup.template.name and setup.template.pattern have to be set if index name is modified", 60*time.Second) +} + +// Test that beat stops in case elasticsearch index is modified and name not +func TestIndexModifiedNoName(t *testing.T) { + cfg := ` +mockbeat: +output: + elasticsearch: + index: test +setup.template: + pattern: test +` + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + procState, err := mockbeat.Process.Wait() + require.NoError(t, err, "error waiting for mockbeat to exit") + require.Equal(t, 1, procState.ExitCode(), "incorrect exit code") + mockbeat.WaitStdErrContains("setup.template.name and setup.template.pattern have to be set if index name is modified", 60*time.Second) +} + +// Test that beat starts running if elasticsearch output with modified index and pattern and name are set +func TestIndexWithPatternName(t *testing.T) { + EnsureESIsRunning(t) + mockbeatConfigWithES := ` +mockbeat: +output: + elasticsearch: + hosts: %s +setup.template: + name: test + pattern: test-* +` + + esUrl := GetESURL(t, "http") + cfg := fmt.Sprintf(mockbeatConfigWithES, esUrl.String()) + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + mockbeat.WaitForLogs("mockbeat start running.", 60*time.Second) +} + +// Test loading of json based template +func TestJsonTemplate(t *testing.T) { + EnsureESIsRunning(t) + _, err := os.Stat("../files/template.json") + require.NoError(t, err) + + templateName := "bla" + mockbeatConfigWithES := ` +mockbeat: +output: + elasticsearch: + hosts: %s + username: %s + password: %s + allow_older_versions: true +setup.template: + name: test + pattern: test-* + overwrite: true + json: + enabled: true + path: %s + name: %s +logging: + level: debug +` + + // prepare the config + pwd, err := os.Getwd() + path := filepath.Join(pwd, "../files/template.json") + esUrl := GetESURL(t, "http") + user := esUrl.User.Username() + pass, _ := esUrl.User.Password() + cfg := fmt.Sprintf(mockbeatConfigWithES, esUrl.String(), user, pass, path, templateName) + + // start mockbeat and wait for the relevant log lines + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + mockbeat.WaitForLogs("mockbeat start running.", 60*time.Second) + msg := "Loading json template from file" + mockbeat.WaitForLogs(msg, 60*time.Second) + msg = "Template with name \\\"bla\\\" loaded." + mockbeat.WaitForLogs(msg, 60*time.Second) + + // check effective changes in ES + indexURL, err := FormatIndexTemplateURL(t, esUrl, templateName) + require.NoError(t, err) + status, body, err := HttpDo(t, http.MethodGet, indexURL) + require.NoError(t, err) + require.Equal(t, http.StatusOK, status, "incorrect status code") + + var m IndexTemplateResult + err = json.Unmarshal(body, &m) + require.NoError(t, err) + require.Equal(t, len(m.IndexTemplates), 1) +} + +// Test run cmd with default settings for template +func TestTemplateDefault(t *testing.T) { + EnsureESIsRunning(t) + + mockbeatConfigWithES := ` +mockbeat: +output: + elasticsearch: + hosts: %s + username: %s + password: %s + allow_older_versions: true +logging: + level: debug +` + datastream := "mockbeat-9.9.9" + + // prepare the config + esUrl := GetESURL(t, "http") + user := esUrl.User.Username() + pass, _ := esUrl.User.Password() + cfg := fmt.Sprintf(mockbeatConfigWithES, esUrl.String(), user, pass) + + // make sure Datastream and Index aren't present + dsURL, err := FormatDatastreamURL(t, esUrl, datastream) + require.NoError(t, err) + _, _, err = HttpDo(t, http.MethodDelete, dsURL) + require.NoError(t, err) + + indexURL, err := FormatIndexTemplateURL(t, esUrl, datastream) + require.NoError(t, err) + _, _, err = HttpDo(t, http.MethodDelete, indexURL) + require.NoError(t, err) + + // start mockbeat and wait for the relevant log lines + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + mockbeat.WaitForLogs("mockbeat start running.", 60*time.Second) + mockbeat.WaitForLogs("Template with name \\\"mockbeat-9.9.9\\\" loaded.", 20*time.Second) + mockbeat.WaitForLogs("PublishEvents: 1 events have been published", 20*time.Second) + + status, body, err := HttpDo(t, http.MethodGet, indexURL) + require.NoError(t, err) + require.Equal(t, http.StatusOK, status, "incorrect status code") + + var m IndexTemplateResult + err = json.Unmarshal(body, &m) + require.NoError(t, err) + + require.Equal(t, len(m.IndexTemplates), 1) + require.Equal(t, datastream, m.IndexTemplates[0].Name) + + refreshURL := FormatRefreshURL(t, esUrl) + require.NoError(t, err) + status, body, err = HttpDo(t, http.MethodPost, refreshURL) + require.NoError(t, err) + require.Equal(t, http.StatusOK, status, "incorrect http status") + + searchURL, err := FormatDataStreamSearchURL(t, esUrl, datastream) + require.NoError(t, err) + status, body, err = HttpDo(t, http.MethodGet, searchURL) + require.NoError(t, err) + var results SearchResult + err = json.Unmarshal(body, &results) + require.NoError(t, err) + + require.True(t, results.Hits.Total.Value > 0) +} + +// Test run cmd does not load template when disabled in config +func TestTemplateDisabled(t *testing.T) { + EnsureESIsRunning(t) + + mockbeatConfigWithES := ` +mockbeat: +output: + elasticsearch: + hosts: %s + username: %s + password: %s + allow_older_versions: true +setup.template: + enabled: false +logging: + level: debug +` + datastream := "mockbeat-9.9.9" + + // prepare the config + esUrl := GetESURL(t, "http") + user := esUrl.User.Username() + pass, _ := esUrl.User.Password() + cfg := fmt.Sprintf(mockbeatConfigWithES, esUrl.String(), user, pass) + + dsURL, err := FormatDatastreamURL(t, esUrl, datastream) + require.NoError(t, err) + _, _, err = HttpDo(t, http.MethodDelete, dsURL) + require.NoError(t, err) + + indexURL, err := FormatIndexTemplateURL(t, esUrl, datastream) + require.NoError(t, err) + _, _, err = HttpDo(t, http.MethodDelete, indexURL) + require.NoError(t, err) + + // start mockbeat and wait for the relevant log lines + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start() + mockbeat.WaitForLogs("mockbeat start running.", 60*time.Second) + mockbeat.WaitForLogs("PublishEvents: 1 events have been published", 20*time.Second) + + u := fmt.Sprintf("%s/_index_template/%s", esUrl.String(), datastream) + r, _ := http.Get(u) + require.Equal(t, 404, r.StatusCode, "incorrect status code") +} + +func TestSetupCmd(t *testing.T) { + EnsureESIsRunning(t) + + cfg := ` +mockbeat: +output: + elasticsearch: + hosts: %s + username: %s + password: %s + allow_older_versions: true +logging: + level: debug +` + dataStream := "mockbeat-9.9.9" + policy := "mockbeat" + esURL := GetESURL(t, "http") + user := esURL.User.Username() + pass, _ := esURL.User.Password() + dataStreamURL, err := FormatDatastreamURL(t, esURL, dataStream) + require.NoError(t, err) + templateURL, err := FormatIndexTemplateURL(t, esURL, dataStream) + require.NoError(t, err) + policyURL, err := FormatPolicyURL(t, esURL, policy) + require.NoError(t, err) + t.Cleanup(func() { + _, _, err = HttpDo(t, http.MethodDelete, dataStreamURL) + require.NoError(t, err) + _, _, err = HttpDo(t, http.MethodDelete, templateURL) + require.NoError(t, err) + _, _, err = HttpDo(t, http.MethodDelete, policyURL) + require.NoError(t, err) + }) + // Make sure datastream, template and policy don't exist + _, _, err = HttpDo(t, http.MethodDelete, dataStreamURL) + require.NoError(t, err) + _, _, err = HttpDo(t, http.MethodDelete, templateURL) + require.NoError(t, err) + _, _, err = HttpDo(t, http.MethodDelete, policyURL) + require.NoError(t, err) + + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(fmt.Sprintf(cfg, esURL.String(), user, pass)) + mockbeat.Start("setup", "--index-management") + procState, err := mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + + // check template loaded + status, body, err := HttpDo(t, http.MethodGet, templateURL) + require.NoError(t, err) + require.Equal(t, http.StatusOK, status, "incorrect status code") + + var r IndexTemplateResult + err = json.Unmarshal(body, &r) + require.NoError(t, err) + var found bool + for _, t := range r.IndexTemplates { + if t.Name == dataStream { + found = true + } + } + require.Truef(t, found, "data stream should be in: %v", r.IndexTemplates) + + status, body, err = HttpDo(t, http.MethodGet, policyURL) + require.NoError(t, err) + require.Equal(t, http.StatusOK, status, "incorrect status code") + + require.Truef(t, strings.Contains(string(body), "max_primary_shard_size\":\"50gb"), "primary shard not found in %s", string(body)) + + require.Truef(t, strings.Contains(string(body), "max_age\":\"30d"), "max_age not found in %s", string(body)) +} + +func TestSetupCmdTemplateDisabled(t *testing.T) { + EnsureESIsRunning(t) + + cfg := ` +mockbeat: +output: + elasticsearch: + hosts: %s + username: %s + password: %s + allow_older_versions: true +logging: + level: debug +setup: + template: + enabled: false +` + dataStream := "mockbeat-9.9.9" + policy := "mockbeat" + esURL := GetESURL(t, "http") + user := esURL.User.Username() + pass, _ := esURL.User.Password() + dataStreamURL, err := FormatDatastreamURL(t, esURL, dataStream) + require.NoError(t, err) + templateURL, err := FormatIndexTemplateURL(t, esURL, dataStream) + require.NoError(t, err) + policyURL, err := FormatPolicyURL(t, esURL, policy) + require.NoError(t, err) + t.Cleanup(func() { + _, _, err = HttpDo(t, http.MethodDelete, dataStreamURL) + require.NoError(t, err) + _, _, err = HttpDo(t, http.MethodDelete, templateURL) + require.NoError(t, err) + _, _, err = HttpDo(t, http.MethodDelete, policyURL) + require.NoError(t, err) + }) + // Make sure datastream, template and policy don't exist + _, _, err = HttpDo(t, http.MethodDelete, dataStreamURL) + require.NoError(t, err) + _, _, err = HttpDo(t, http.MethodDelete, templateURL) + require.NoError(t, err) + _, _, err = HttpDo(t, http.MethodDelete, policyURL) + require.NoError(t, err) + + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(fmt.Sprintf(cfg, esURL.String(), user, pass)) + mockbeat.Start("setup", "--index-management") + procState, err := mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + + // check template didn't load + status, body, err := HttpDo(t, http.MethodGet, templateURL) + require.NoError(t, err) + require.Equal(t, http.StatusNotFound, status, "incorrect status code") + + status, body, err = HttpDo(t, http.MethodGet, policyURL) + require.NoError(t, err) + require.Equal(t, http.StatusOK, status, "incorrect status code") + + require.Truef(t, strings.Contains(string(body), "max_primary_shard_size\":\"50gb"), "primary shard not found in %s", string(body)) + + require.Truef(t, strings.Contains(string(body), "max_age\":\"30d"), "max_age not found in %s", string(body)) +} + +func TestSetupCmdTemplateWithOpts(t *testing.T) { + EnsureESIsRunning(t) + + cfg := ` +mockbeat: +output: + elasticsearch: + hosts: %s + username: %s + password: %s + allow_older_versions: true +logging: + level: debug +` + dataStream := "mockbeat-9.9.9" + policy := "mockbeat" + esURL := GetESURL(t, "http") + user := esURL.User.Username() + pass, _ := esURL.User.Password() + dataStreamURL, err := FormatDatastreamURL(t, esURL, dataStream) + require.NoError(t, err) + templateURL, err := FormatIndexTemplateURL(t, esURL, dataStream) + require.NoError(t, err) + policyURL, err := FormatPolicyURL(t, esURL, policy) + require.NoError(t, err) + t.Cleanup(func() { + _, _, err = HttpDo(t, http.MethodDelete, dataStreamURL) + require.NoError(t, err) + _, _, err = HttpDo(t, http.MethodDelete, templateURL) + require.NoError(t, err) + _, _, err = HttpDo(t, http.MethodDelete, policyURL) + require.NoError(t, err) + }) + // Make sure datastream, template and policy don't exist + _, _, err = HttpDo(t, http.MethodDelete, dataStreamURL) + require.NoError(t, err) + _, _, err = HttpDo(t, http.MethodDelete, templateURL) + require.NoError(t, err) + _, _, err = HttpDo(t, http.MethodDelete, policyURL) + require.NoError(t, err) + + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(fmt.Sprintf(cfg, esURL.String(), user, pass)) + mockbeat.Start("setup", "--index-management", "-E", "setup.ilm.enabled=false", "-E", "setup.template.settings.index.number_of_shards=2") + procState, err := mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + + // check template loaded + status, body, err := HttpDo(t, http.MethodGet, templateURL) + require.NoError(t, err) + require.Equalf(t, http.StatusOK, status, "incorrect status code for :%s", templateURL.String()) + require.Truef(t, strings.Contains(string(body), "number_of_shards\":\"2"), "number of shards not found in %s", string(body)) +} + +func TestTemplateCreatedOnIlmPolicyCreated(t *testing.T) { + EnsureESIsRunning(t) + + cfg := ` +mockbeat: +output: + elasticsearch: + hosts: %s + username: %s + password: %s + allow_older_versions: true +logging: + level: debug +` + dataStream := "mockbeat-9.9.9" + policy := "mockbeat" + esURL := GetESURL(t, "http") + user := esURL.User.Username() + pass, _ := esURL.User.Password() + dataStreamURL, err := FormatDatastreamURL(t, esURL, dataStream) + require.NoError(t, err) + templateURL, err := FormatIndexTemplateURL(t, esURL, dataStream) + require.NoError(t, err) + policyURL, err := FormatPolicyURL(t, esURL, policy) + require.NoError(t, err) + t.Cleanup(func() { + _, _, err = HttpDo(t, http.MethodDelete, dataStreamURL) + require.NoError(t, err) + _, _, err = HttpDo(t, http.MethodDelete, templateURL) + require.NoError(t, err) + _, _, err = HttpDo(t, http.MethodDelete, policyURL) + require.NoError(t, err) + }) + // Make sure datastream, template and policy don't exist + _, _, err = HttpDo(t, http.MethodDelete, dataStreamURL) + require.NoError(t, err) + _, _, err = HttpDo(t, http.MethodDelete, templateURL) + require.NoError(t, err) + _, _, err = HttpDo(t, http.MethodDelete, policyURL) + require.NoError(t, err) + + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(fmt.Sprintf(cfg, esURL.String(), user, pass)) + mockbeat.Start("setup", "--index-management", "-E", "setup.ilm.enabled=false") + procState, err := mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + + // check template loaded + status, body, err := HttpDo(t, http.MethodGet, templateURL) + require.NoError(t, err) + require.Equal(t, http.StatusOK, status, "incorrect status code") + + var r IndexTemplateResult + err = json.Unmarshal(body, &r) + require.NoError(t, err) + var found bool + for _, t := range r.IndexTemplates { + if t.Name == dataStream { + found = true + } + } + require.Truef(t, found, "data stream should be in: %v", r.IndexTemplates) + + // check policy not created + status, body, err = HttpDo(t, http.MethodGet, policyURL) + require.NoError(t, err) + require.Equalf(t, http.StatusNotFound, status, "incorrect status code for: %s", policyURL.String()) + + mockbeat.Start("setup", "--index-management", "-E", "setup.template.overwrite=false", "-E", "setup.template.settings.index.number_of_shards=2") + procState, err = mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + + // check policy created + status, body, err = HttpDo(t, http.MethodGet, policyURL) + require.NoError(t, err) + require.Equal(t, http.StatusOK, status, "incorrect status code") + + require.Truef(t, strings.Contains(string(body), "max_primary_shard_size\":\"50gb"), "primary shard not found in %s", string(body)) + + require.Truef(t, strings.Contains(string(body), "max_age\":\"30d"), "max_age not found in %s", string(body)) +} + +func TestExportTemplate(t *testing.T) { + cfg := ` +mockbeat: +output: + console: + enabled: true +logging: + level: debug +` + + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start("export", "template") + procState, err := mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + mockbeat.WaitStdOutContains("mockbeat-9.9.9", 5*time.Second) +} + +func TestExportTemplateDisabled(t *testing.T) { + cfg := ` +mockbeat: +output: + console: + enabled: true +logging: + level: debug +` + + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(cfg) + mockbeat.Start("export", "template", "-E", "setup.template.enabled=false") + procState, err := mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + mockbeat.WaitStdOutContains("mockbeat-9.9.9", 5*time.Second) +} + +func TestExportAbsolutePath(t *testing.T) { + cfg := ` +mockbeat: +output: + console: + enabled: true +logging: + level: debug +` + + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + output := filepath.Join(mockbeat.TempDir(), "template", "mockbeat-9.9.9.json") + t.Cleanup(func() { + os.Remove(output) + }) + mockbeat.WriteConfigFile(cfg) + mockbeat.Start("export", "template", "--dir", mockbeat.TempDir()) + procState, err := mockbeat.Process.Wait() + require.NoError(t, err) + require.Equal(t, 0, procState.ExitCode(), "incorrect exit code") + mockbeat.WaitStdOutContains("Writing to", 5*time.Second) + mockbeat.WaitFileContains(output, "mockbeat-9.9.9", 5*time.Second) +} diff --git a/libbeat/tests/integration/testdata/testbeat-dashboards.zip b/libbeat/tests/integration/testdata/testbeat-dashboards.zip new file mode 100644 index 00000000000..78c185faaaf Binary files /dev/null and b/libbeat/tests/integration/testdata/testbeat-dashboards.zip differ diff --git a/libbeat/tests/integration/testdata/testbeat-no-dashboards.zip b/libbeat/tests/integration/testdata/testbeat-no-dashboards.zip new file mode 100644 index 00000000000..90a9a1a7994 Binary files /dev/null and b/libbeat/tests/integration/testdata/testbeat-no-dashboards.zip differ diff --git a/libbeat/tests/system/requirements.txt b/libbeat/tests/system/requirements.txt index b65d5089c21..6b8cc6d52a0 100644 --- a/libbeat/tests/system/requirements.txt +++ b/libbeat/tests/system/requirements.txt @@ -36,7 +36,7 @@ pyrsistent==0.16.0 pytest==7.3.2 pytest-rerunfailures==9.1.1 pytest-timeout==1.4.2 -PyYAML==5.4.1 +PyYAML==5.3.1 redis==4.4.4 requests==2.31.0 semver==2.8.1 diff --git a/libbeat/tests/system/requirements_aix.txt b/libbeat/tests/system/requirements_aix.txt index 69789e108c2..adc1a1a2171 100644 --- a/libbeat/tests/system/requirements_aix.txt +++ b/libbeat/tests/system/requirements_aix.txt @@ -35,7 +35,7 @@ pyrsistent==0.16.0 pytest==7.3.2 pytest-rerunfailures==9.1.1 pytest-timeout==1.4.2 -PyYAML==5.4.1 +PyYAML==5.3.1 redis==4.4.4 requests==2.31.0 semver==2.8.1 diff --git a/libbeat/tests/system/test_base.py b/libbeat/tests/system/test_base.py deleted file mode 100644 index fbb8b324f16..00000000000 --- a/libbeat/tests/system/test_base.py +++ /dev/null @@ -1,173 +0,0 @@ -from base import BaseTest -from beat import common_tests - -import json -import os -import shutil -import signal -import subprocess -import sys -import unittest - - -class Test(BaseTest, common_tests.TestExportsMixin): - - def test_base(self): - """ - Basic test with exiting Mockbeat normally - """ - self.render_config_template( - ) - - proc = self.start_beat() - self.wait_until(lambda: self.log_contains("mockbeat start running.")) - proc.check_kill_and_wait() - assert self.log_contains("mockbeat stopped.") - - @unittest.skipIf(sys.platform.startswith("win"), "SIGHUP is not available on Windows") - def test_sighup(self): - """ - Basic test with exiting Mockbeat because of SIGHUP - """ - self.render_config_template( - ) - - proc = self.start_beat() - self.wait_until(lambda: self.log_contains("mockbeat start running.")) - proc.proc.send_signal(signal.SIGHUP) - proc.check_wait() - assert self.log_contains("mockbeat stopped.") - - def test_no_config(self): - """ - Tests starting without a config - """ - exit_code = self.run_beat() - - assert exit_code == 1 - assert self.log_contains("error loading config file") is True - - def test_invalid_config(self): - """ - Checks stop on invalid config - """ - shutil.copy(self.beat_path + "/tests/files/invalid.yml", - os.path.join(self.working_dir, "invalid.yml")) - - exit_code = self.run_beat(config="invalid.yml") - - assert exit_code == 1 - assert self.log_contains("error loading config file") is True - - def test_invalid_config_cli_param(self): - """ - Checks CLI overwrite actually overwrites some config variable by - writing an invalid value. - """ - - self.render_config_template( - console={"pretty": "false"} - ) - - # first run with default config, validating config being - # actually correct. - proc = self.start_beat() - self.wait_until(lambda: self.log_contains("mockbeat start running.")) - proc.check_kill_and_wait() - - # start beat with invalid config setting on command line - exit_code = self.run_beat( - extra_args=["-d", "config", "-E", "output.console=invalid"]) - - assert exit_code == 1 - assert self.log_contains("error unpacking config data") is True - - # NOTE(ph): I've removed the code to crash with theses settings, but the test is still usefull if - # more settings are added. - # def test_invalid_config_with_removed_settings(self): - # """ - # Checks if libbeat fails to load if removed settings have been used: - # """ - # self.render_config_template(console={"pretty": "false"}) - - # exit_code = self.run_beat(extra_args=[ - # "-E", "queue_size=2048", - # "-E", "bulk_queue_size=1", - # ]) - - # assert exit_code == 1 - # assert self.log_contains("setting 'queue_size' has been removed") - # assert self.log_contains("setting 'bulk_queue_size' has been removed") - - def test_console_output_timed_flush(self): - """ - outputs/console - timed flush - """ - self.render_config_template( - console={"pretty": "false"} - ) - - proc = self.start_beat(logging_args=["-e"]) - self.wait_until(lambda: self.log_contains("Mockbeat is alive"), - max_timeout=2) - proc.check_kill_and_wait() - - def test_console_output_size_flush(self): - """ - outputs/console - size based flush - """ - self.render_config_template( - console={ - "pretty": "false", - "bulk_max_size": 1, - } - ) - - proc = self.start_beat(logging_args=["-e"]) - self.wait_until(lambda: self.log_contains("Mockbeat is alive"), - max_timeout=2) - proc.check_kill_and_wait() - - def test_logging_metrics(self): - self.render_config_template( - metrics_period="0.1s" - ) - proc = self.start_beat(logging_args=["-e"]) - self.wait_until( - lambda: self.log_contains("Non-zero metrics in the last 100ms"), - max_timeout=2) - proc.check_kill_and_wait() - self.wait_until( - lambda: self.log_contains("Total metrics"), - max_timeout=2) - - def test_persistent_uuid(self): - self.render_config_template() - - # run starts and kills the beat, reading the meta file while - # the beat is alive - def run(): - proc = self.start_beat(extra_args=["-path.home", self.working_dir]) - self.wait_until(lambda: self.log_contains("Mockbeat is alive"), - max_timeout=60) - - # open meta file before killing the beat, checking the file being - # available right after startup - metaFile = os.path.join(self.working_dir, "data", "meta.json") - with open(metaFile) as f: - meta = json.loads(f.read()) - - proc.check_kill_and_wait() - return meta - - meta0 = run() - assert self.log_contains("Beat ID: {}".format(meta0["uuid"])) - - # remove log, restart beat and check meta file did not change - # and same UUID is used in log output. - os.remove(os.path.join(self.working_dir, "mockbeat-" + self.today + ".ndjson")) - meta1 = run() - assert self.log_contains("Beat ID: {}".format(meta1["uuid"])) - - # check meta file did not change between restarts - assert meta0 == meta1 diff --git a/libbeat/tests/system/test_ca_pinning.py b/libbeat/tests/system/test_ca_pinning.py deleted file mode 100644 index 5b4e47b117f..00000000000 --- a/libbeat/tests/system/test_ca_pinning.py +++ /dev/null @@ -1,84 +0,0 @@ -import logging -import os -import pytest -import unittest -from base import BaseTest -from elasticsearch import RequestError -from idxmgmt import IdxMgmt - -INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False) - - -class TestCAPinning(BaseTest): - """ - Test beat CA pinning for elasticsearch - """ - - @unittest.skipUnless(INTEGRATION_TESTS, "integration test") - @pytest.mark.tag('integration') - def test_sending_events_with_a_good_sha256(self): - """ - Test Sending events while using ca pinning with a good sha256 - """ - - ca = os.path.join(self.beat_path, - "..", - "testing", - "environments", - "docker", - "elasticsearch", - "pki", - "ca", - "ca.crt") - - self.render_config_template( - elasticsearch={ - "host": self.get_elasticsearch_url_ssl(), - "user": "admin", - "pass": "testing", - # Use certificate verification only to avoid validating localhost as the hostname. - "ssl_verification_mode": "certificate", - "ssl_certificate_authorities": [ca], - "ssl_ca_sha256": "FDFOtqdUyXZw74YgvAJUC+I67ED1WfcI1qK44Qy2WQM=", - }, - ) - - proc = self.start_beat() - self.wait_until(lambda: self.log_contains("mockbeat start running.")) - self.wait_until(lambda: self.log_contains("PublishEvents: 1 events have been published")) - proc.check_kill_and_wait() - - @unittest.skipUnless(INTEGRATION_TESTS, "integration test") - @pytest.mark.tag('integration') - def test_sending_events_with_a_bad_sha256(self): - """ - Test Sending events while using ca pinning with a bad sha256 - """ - - ca = os.path.join(self.beat_path, - "..", - "testing", - "environments", - "docker", - "elasticsearch", - "pki", - "ca", - "ca.crt") - - self.render_config_template( - elasticsearch={ - "host": self.get_elasticsearch_url_ssl(), - "user": "beats", - "pass": "testing", - # Use certificate verification only to avoid validating localhost as the hostname. - "ssl_verification_mode": "certificate", - "ssl_certificate_authorities": [ca], - "ssl_ca_sha256": "not-good-sha", - }, - ) - - proc = self.start_beat() - self.wait_until(lambda: self.log_contains("mockbeat start running.")) - self.wait_until(lambda: self.log_contains( - "provided CA certificate pins doesn't match any of the certificate authorities used to validate the certificate")) - proc.check_kill_and_wait() diff --git a/libbeat/tests/system/test_cmd_completion.py b/libbeat/tests/system/test_cmd_completion.py deleted file mode 100644 index a06259a5046..00000000000 --- a/libbeat/tests/system/test_cmd_completion.py +++ /dev/null @@ -1,25 +0,0 @@ -from base import BaseTest - - -class TestCommandCompletion(BaseTest): - """ - Test beat completion subcommand - """ - - def setUp(self): - super(BaseTest, self).setUp() - - def test_bash_completion(self): - exit_code = self.run_beat(extra_args=["completion", "bash"]) - assert exit_code == 0 - assert self.log_contains("bash completion for mockbeat") - - def test_zsh_completion(self): - exit_code = self.run_beat(extra_args=["completion", "zsh"]) - assert exit_code == 0 - assert self.log_contains("#compdef _mockbeat mockbeat") - - def test_unknown_completion(self): - exit_code = self.run_beat(extra_args=["completion", "awesomeshell"]) - assert exit_code == 1 - assert self.log_contains("Unknown shell awesomeshell") diff --git a/libbeat/tests/system/test_cmd_export_config.py b/libbeat/tests/system/test_cmd_export_config.py deleted file mode 100644 index f5c4de356d2..00000000000 --- a/libbeat/tests/system/test_cmd_export_config.py +++ /dev/null @@ -1,40 +0,0 @@ -import os -from base import BaseTest - -INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False) - - -class TestCommandExportConfig(BaseTest): - """ - Test beat command `export config` - """ - - def setUp(self): - super(TestCommandExportConfig, self).setUp() - - self.config = "libbeat.yml" - self.output = os.path.join(self.working_dir, self.config) - - def test_default(self): - """ - Test export config works - """ - self.render_config_template(self.beat_name, self.output, file_name='some-file') - exit_code = self.run_beat(extra_args=["export", "config"], config=self.config) - - assert exit_code == 0 - assert self.log_contains("filename: mockbeat") - assert self.log_contains("name: some-file") - - def test_config_environment_variable(self): - """ - Test export config works but doesn"t expose environment variable. - """ - self.render_config_template(self.beat_name, self.output, - file_name="${FILE_NAME}") - exit_code = self.run_beat(extra_args=["export", "config"], config=self.config, - env={'FILE_NAME': 'some-file'}) - - assert exit_code == 0 - assert self.log_contains("filename: mockbeat") - assert self.log_contains("name: ${FILE_NAME}") diff --git a/libbeat/tests/system/test_cmd_keystore.py b/libbeat/tests/system/test_cmd_keystore.py deleted file mode 100644 index f9f1e4d5611..00000000000 --- a/libbeat/tests/system/test_cmd_keystore.py +++ /dev/null @@ -1,144 +0,0 @@ -from os import path -import os -import hashlib - -from keystore import KeystoreBase - - -class TestCommandKeystore(KeystoreBase): - """ - Test keystore subcommand - - """ - - def setUp(self): - super(TestCommandKeystore, self).setUp() - - self.keystore_path = self.working_dir + "/data/keystore" - - self.render_config_template(keystore_path=self.keystore_path) - - if path.exists(self.keystore_path): - os.Remove(self.keystore_path) - - def test_keystore_create(self): - """ - test keystore create command - """ - exit_code = self.run_beat(extra_args=["keystore", "create"]) - - assert exit_code == 0 - assert path.exists(self.keystore_path) - - def test_keystore_create_force(self): - """ - Allow to override - """ - - self.run_beat(extra_args=["keystore", "create"]) - - assert path.exists(self.keystore_path) - digest_before = hashlib.sha256( - open(self.keystore_path, 'rb').read()).digest() - - exit_code = self.run_beat(extra_args=["keystore", "create", "--force"]) - digest_after = hashlib.sha256( - open(self.keystore_path, 'rb').read()).digest() - - assert exit_code == 0 - assert digest_before != digest_after - - def test_keystore_remove_no_key_no_keystore(self): - """ - Remove a key that doesn't exist when the keystore doesn't exist - """ - exit_code = self.run_beat(extra_args=["keystore", "remove", "mykey"]) - assert exit_code == 1 - assert "The filebeat keystore doesn't exist." - - def test_keystore_remove_non_existing_key(self): - """ - Remove a key that doesn't exist in the keystore - """ - self.run_beat(extra_args=["keystore", "create"]) - - exit_code = self.run_beat(extra_args=["keystore", "remove", "mykey"]) - assert exit_code == 1 - - def test_keystore_remove_existing_key(self): - """ - Remove an key present in the keystore - """ - self.run_beat(extra_args=["keystore", "create"]) - - self.add_secret("mykey") - exit_code = self.run_beat(extra_args=["keystore", "remove", "mykey"]) - - assert exit_code == 0 - - def test_keystore_remove_multiples_existing_keys(self): - """ - Remove an key present in the keystore - """ - - self.run_beat(extra_args=["keystore", "create"]) - - self.add_secret("willnotdelete") - self.add_secret("myawesomekey") - self.add_secret("mysuperkey") - - exit_code = self.run_beat( - extra_args=["keystore", "remove", "mysuperkey", "myawesomekey"]) - - assert exit_code == 0 - - exit_code = self.run_beat(extra_args=["keystore", "list"]) - - assert exit_code == 0 - - def test_keystore_list(self): - """ - list the available keys - """ - - self.run_beat(extra_args=["keystore", "create"]) - - self.add_secret("willnotdelete") - self.add_secret("myawesomekey") - self.add_secret("mysuperkey") - - exit_code = self.run_beat(extra_args=["keystore", "list"]) - - assert exit_code == 0 - - assert self.log_contains("willnotdelete") - assert self.log_contains("myawesomekey") - assert self.log_contains("mysuperkey") - - def test_keystore_list_keys_on_an_empty_keystore(self): - """ - List keys on an empty keystore should not return anything - """ - exit_code = self.run_beat(extra_args=["keystore", "list"]) - assert exit_code == 0 - - def test_keystore_add_secret_from_stdin(self): - """ - Add a secret to the store using stdin - """ - self.run_beat(extra_args=["keystore", "create"]) - exit_code = self.add_secret("willnotdelete") - - assert exit_code == 0 - - def test_keystore_update_force(self): - """ - Update an existing key using the --force flag - """ - self.run_beat(extra_args=["keystore", "create"]) - - self.add_secret("superkey") - - exit_code = self.add_secret("mysuperkey", "hello", True) - - assert exit_code == 0 diff --git a/libbeat/tests/system/test_cmd_test.py b/libbeat/tests/system/test_cmd_test.py deleted file mode 100644 index 944d7791fb6..00000000000 --- a/libbeat/tests/system/test_cmd_test.py +++ /dev/null @@ -1,84 +0,0 @@ -import os -import logging -import unittest -import pytest -from base import BaseTest - - -INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False) - - -class TestCommandTest(BaseTest): - """ - Test beat subcommands - """ - - def test_config(self): - """ - Test test config command - """ - self.render_config_template("mockbeat", - os.path.join(self.working_dir, "libbeat.yml")) - - exit_code = self.run_beat( - logging_args=[], - extra_args=["test", "config"], - config="libbeat.yml") - - assert exit_code == 0 - assert self.log_contains("Config OK") - - def test_bad_config(self): - """ - Test test config command with bad config - """ - exit_code = self.run_beat( - logging_args=[], - extra_args=["test", "config"], - config="libbeat-missing.yml") - - assert exit_code == 1 - assert self.log_contains("Config OK") is False - - @unittest.skipUnless(INTEGRATION_TESTS, "integration test") - @pytest.mark.tag('integration') - def test_output(self): - """ - Test test output works - """ - - self.es_client() - logging.getLogger("elasticsearch").setLevel(logging.ERROR) - - self.render_config_template("mockbeat", - os.path.join(self.working_dir, "mockbeat.yml"), - elasticsearch=self.get_elasticsearch_template_config()) - exit_code = self.run_beat( - extra_args=["test", "output"], - config="mockbeat.yml") - - assert exit_code == 0 - assert self.log_contains('parse url... OK') - assert self.log_contains('TLS... WARN secure connection disabled') - assert self.log_contains('talk to server... OK') - - @unittest.skipIf(not INTEGRATION_TESTS, "integration test") - def test_wrong_output(self): - """ - Test test wrong output works - """ - self.render_config_template("mockbeat", - os.path.join(self.working_dir, - "mockbeat.yml"), - elasticsearch={ - "host": 'badhost:9200', - "user": 'admin', - "pass": 'testing' - }) - exit_code = self.run_beat( - extra_args=["test", "output"], - config="mockbeat.yml") - - assert exit_code == 1 - assert self.log_contains('parse url... OK') - assert self.log_contains('dns lookup... ERROR') diff --git a/libbeat/tests/system/test_cmd_version.py b/libbeat/tests/system/test_cmd_version.py deleted file mode 100644 index ace84b99062..00000000000 --- a/libbeat/tests/system/test_cmd_version.py +++ /dev/null @@ -1,34 +0,0 @@ -from base import BaseTest - -import logging -import os - - -INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False) - - -class TestCommandVersion(BaseTest): - """ - Test beat subcommands - """ - - def setUp(self): - super(BaseTest, self).setUp() - - self.elasticsearch_url = self.get_elasticsearch_url() - print("Using elasticsearch: {}".format(self.elasticsearch_url)) - self.es = self.get_elasticsearch_instance(url=self.elasticsearch_url, user='beats') - logging.getLogger("urllib3").setLevel(logging.WARNING) - logging.getLogger("elasticsearch").setLevel(logging.ERROR) - - def test_version(self): - """ - Test version command - """ - exit_code = self.run_beat( - extra_args=["version"], logging_args=["-v", "-d", "*"]) - assert exit_code == 0 - - assert self.log_contains("mockbeat") - assert self.log_contains("version") - assert self.log_contains("9.9.9") diff --git a/libbeat/tests/system/test_dashboard.py b/libbeat/tests/system/test_dashboard.py deleted file mode 100644 index 1341501bd61..00000000000 --- a/libbeat/tests/system/test_dashboard.py +++ /dev/null @@ -1,219 +0,0 @@ -import os -import os.path -import pytest -import json -import re -import requests -import semver -import shutil -import subprocess -import unittest - -from base import BaseTest - -INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False) - - -class Test(BaseTest): - - @unittest.skipUnless(INTEGRATION_TESTS, "integration test") - @pytest.mark.tag('integration') - def test_load_without_dashboard(self): - """ - Test loading without dashboards - """ - self.render_config_template() - beat = self.start_beat( - logging_args=["-e", "-d", "*"], - extra_args=["setup", - "--dashboards", - "-E", "setup.dashboards.file=" + - os.path.join(self.beat_path, "tests", "files", "testbeat-no-dashboards.zip"), - "-E", "setup.dashboards.beat=testbeat", - "-E", "setup.kibana.protocol=http", - "-E", "setup.kibana.host=" + self.get_kibana_host(), - "-E", "setup.kibana.port=" + self.get_kibana_port(), - "-E", "setup.kibana.username=beats", - "-E", "setup.kibana.password=testing", - "-E", "output.elasticsearch.hosts=['" + self.get_host() + "']", - "-E", "output.elasticsearch.username=admin", - "-E", "output.elasticsearch.password=testing", - "-E", "output.file.enabled=false"] - ) - - beat.check_wait(exit_code=0) - - assert self.log_contains("Skipping loading dashboards") - - @unittest.skipUnless(INTEGRATION_TESTS, "integration test") - @pytest.mark.tag('integration') - def test_load_dashboard(self): - """ - Test loading dashboards - """ - self.render_config_template() - beat = self.start_beat( - logging_args=["-e", "-d", "*"], - extra_args=["setup", - "--dashboards", - "-E", "setup.dashboards.file=" + - os.path.join(self.beat_path, "tests", "files", "testbeat-dashboards.zip"), - "-E", "setup.dashboards.beat=testbeat", - "-E", "setup.kibana.protocol=http", - "-E", "setup.kibana.host=" + self.get_kibana_host(), - "-E", "setup.kibana.port=" + self.get_kibana_port(), - "-E", "setup.kibana.username=beats", - "-E", "setup.kibana.password=testing", - "-E", "output.elasticsearch.hosts=['" + self.get_host() + "']", - "-E", "output.elasticsearch.username=admin", - "-E", "output.elasticsearch.password=testing", - "-E", "output.file.enabled=false"] - ) - beat.check_wait(exit_code=0) - - assert self.log_contains("Kibana dashboards successfully loaded") is True - - @unittest.skipUnless(INTEGRATION_TESTS, "integration test") - @pytest.mark.tag('integration') - def test_load_only_index_patterns(self): - """ - Test loading dashboards - """ - self.render_config_template() - beat = self.start_beat( - logging_args=["-e", "-d", "*"], - extra_args=["setup", - "--dashboards", - "-E", "setup.dashboards.file=" + - os.path.join(self.beat_path, "tests", "files", "testbeat-dashboards.zip"), - "-E", "setup.dashboards.beat=testbeat", - "-E", "setup.dashboards.only_index=true", - "-E", "setup.kibana.protocol=http", - "-E", "setup.kibana.host=" + self.get_kibana_host(), - "-E", "setup.kibana.port=" + self.get_kibana_port(), - "-E", "setup.kibana.username=beats", - "-E", "setup.kibana.password=testing", - "-E", "output.elasticsearch.hosts=['" + self.get_host() + "']", - "-E", "output.elasticsearch.username=admin", - "-E", "output.elasticsearch.password=testing", - "-E", "output.file.enabled=false"] - ) - - beat.check_wait(exit_code=0) - - assert self.log_contains("Kibana dashboards successfully loaded") is True - - @unittest.skipUnless(INTEGRATION_TESTS, "integration test") - @pytest.mark.tag('integration') - def test_export_dashboard_cmd_export_dashboard_by_id(self): - """ - Test testbeat export dashboard can export dashboards - """ - self.render_config_template() - self.test_load_dashboard() - beat = self.start_beat( - logging_args=["-e", "-d", "*"], - extra_args=["export", - "dashboard", - "-E", "setup.kibana.protocol=http", - "-E", "setup.kibana.host=" + self.get_kibana_host(), - "-E", "setup.kibana.port=" + self.get_kibana_port(), - "-E", "setup.kibana.username=beats", - "-E", "setup.kibana.password=testing", - "-id", "Metricbeat-system-overview", - "-folder", "system-overview"] - ) - - beat.check_wait(exit_code=0) - self._check_if_dashboard_exported("system-overview") - - @unittest.skipUnless(INTEGRATION_TESTS, "integration test") - @pytest.mark.tag('integration') - def test_export_dashboard_cmd_export_dashboard_by_id_unknown_id(self): - """ - Test testbeat export dashboard fails gracefully when dashboard with unknown ID is requested - """ - self.render_config_template() - beat = self.start_beat( - logging_args=["-e", "-d", "*"], - extra_args=["export", - "dashboard", - "-E", "setup.kibana.protocol=http", - "-E", "setup.kibana.host=" + self.get_kibana_host(), - "-E", "setup.kibana.port=" + self.get_kibana_port(), - "-E", "setup.kibana.username=beats", - "-E", "setup.kibana.password=testing", - "-id", "No-such-dashboard", - "-folder", "system-overview"] - ) - - beat.check_wait(exit_code=1) - - expected_error = re.compile("error exporting dashboard:.*not found", re.IGNORECASE) - assert self.log_contains(expected_error) - - @unittest.skipUnless(INTEGRATION_TESTS, "integration test") - @pytest.mark.tag('integration') - def test_dev_tool_export_dashboard_by_id(self): - """ - Test dev-tools/cmd/dashboards exports dashboard and removes unsupported characters - """ - - self.test_load_dashboard() - - folder_name = "system-overview" - path = os.path.normpath(self.beat_path + "/../dev-tools/cmd/dashboards/export_dashboards.go") - command = path + " -kibana http://" + self.get_kibana_host() + ":" + self.get_kibana_port() - command = "go run " + command + " -dashboard Metricbeat-system-overview -folder " + folder_name - - p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - _, stderr = p.communicate() - assert p.returncode == 0, stderr - - self._check_if_dashboard_exported(folder_name) - - @unittest.skipUnless(INTEGRATION_TESTS, "integration test") - @pytest.mark.tag('integration') - def test_dev_tool_export_dashboard_by_id_unknown_id(self): - """ - Test dev-tools/cmd/dashboards fails gracefully when dashboard with unknown ID is requested - """ - - path = os.path.normpath(self.beat_path + "/../dev-tools/cmd/dashboards/export_dashboards.go") - command = path + " -kibana http://" + self.get_kibana_host() + ":" + self.get_kibana_port() - command = "go run " + command + " -dashboard No-such-dashboard" - - p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - _, stderr = p.communicate() - - assert p.returncode != 0, stderr - - def _check_if_dashboard_exported(self, folder_name): - kibana_semver = semver.VersionInfo.parse(self.get_version()) - dashboard_folder = os.path.join(folder_name, "_meta", "kibana", str(kibana_semver.major), "dashboard") - assert os.path.isdir(dashboard_folder) - - with open(os.path.join(dashboard_folder, "Metricbeat-system-overview.json")) as f: - content = f.read() - assert "Metricbeat-system-overview" in content - - shutil.rmtree(folder_name) - - def get_host(self): - return os.getenv('ES_HOST', 'localhost') + ':' + os.getenv('ES_PORT', '9200') - - def get_kibana_host(self): - return os.getenv('KIBANA_HOST', 'localhost') - - def get_kibana_port(self): - return os.getenv('KIBANA_PORT', '5601') - - def get_version(self): - url = "http://" + self.get_kibana_host() + ":" + self.get_kibana_port() + \ - "/api/status" - - r = requests.get(url, auth=("beats", "testing")) - body = r.json() - version = body["version"]["number"] - - return version diff --git a/libbeat/tests/system/test_http.py b/libbeat/tests/system/test_http.py deleted file mode 100644 index 76e5b40b181..00000000000 --- a/libbeat/tests/system/test_http.py +++ /dev/null @@ -1,56 +0,0 @@ -from base import BaseTest - -import requests -import json - - -class Test(BaseTest): - def setUp(self): - super(BaseTest, self).setUp() - self.render_config_template() - self.proc = self.start_beat(extra_args=["-E", "http.enabled=true"]) - self.wait_until(lambda: self.log_contains("Starting stats endpoint")) - - def tearDown(self): - super(BaseTest, self).tearDown() - # Wait till the beat is completely started so it can handle SIGTERM - self.wait_until(lambda: self.log_contains("mockbeat start running.")) - self.proc.check_kill_and_wait() - - def test_root(self): - """ - Test / http endpoint - """ - r = requests.get("http://localhost:5066") - assert r.status_code == 200 - - data = json.loads(r.content.decode('utf_8')) - - assert data["beat"] == "mockbeat" - assert data["version"] == "9.9.9" - - def test_stats(self): - """ - Test /stats http endpoint - """ - r = requests.get("http://localhost:5066/stats") - assert r.status_code == 200 - - data = json.loads(r.content.decode('utf_8')) - - # Test one data point - assert data["libbeat"]["config"]["scans"] == 0 - - def test_error(self): - """ - Test not existing http endpoint - """ - r = requests.get("http://localhost:5066/not-exist") - assert r.status_code == 404 - - def test_pprof_disabled(self): - """ - Test /debug/pprof/ http endpoint - """ - r = requests.get("http://localhost:5066/debug/pprof/") - assert r.status_code == 404 diff --git a/libbeat/tests/system/test_logging.py b/libbeat/tests/system/test_logging.py deleted file mode 100644 index 4380e25b55c..00000000000 --- a/libbeat/tests/system/test_logging.py +++ /dev/null @@ -1,49 +0,0 @@ -from base import BaseTest - -import re - -ecs_version_log = "\"ecs.version\":" -ecs_timestamp_log = "\"@timestamp\":" -ecs_message_log = "\"message\":" -ecs_log_level_log = "\"log.level\":" - - -class TestLogging(BaseTest): - - def run_beat_with_args(self, msg, logging_args=[], extra_args=[]): - self.render_config_template( - console={"pretty": "false"} - ) - proc = self.start_beat(logging_args=logging_args, extra_args=extra_args) - self.wait_until(lambda: self.log_contains(msg), - max_timeout=2) - proc.check_kill_and_wait() - - def assert_contains_ecs_log(self, logfile=None): - assert self.log_contains(ecs_version_log, logfile=logfile) - assert self.log_contains(ecs_timestamp_log, logfile=logfile) - assert self.log_contains(ecs_message_log, logfile=logfile) - assert self.log_contains(ecs_log_level_log, logfile=logfile) - - def test_console_ecs(self): - """ - logs to console with ECS format - """ - self.run_beat_with_args("mockbeat start running", - logging_args=["-e"]) - self.assert_contains_ecs_log() - - def test_file_default(self): - """ - logs to file with default format - """ - self.run_beat_with_args("Mockbeat is alive!", - logging_args=[]) - self.assert_contains_ecs_log(logfile="logs/mockbeat-"+self.today+".ndjson") - - def test_file_ecs(self): - """ - logs to file with ECS format - """ - self.run_beat_with_args("Mockbeat is alive!") - self.assert_contains_ecs_log(logfile="logs/mockbeat-"+self.today+".ndjson") diff --git a/libbeat/tests/system/test_meta.py b/libbeat/tests/system/test_meta.py deleted file mode 100644 index f816056a109..00000000000 --- a/libbeat/tests/system/test_meta.py +++ /dev/null @@ -1,37 +0,0 @@ -from base import BaseTest - -import os -import stat -import unittest -from beat.beat import INTEGRATION_TESTS - - -class TestMetaFile(BaseTest): - """ - Test meta file - """ - - def setUp(self): - super(BaseTest, self).setUp() - - self.meta_file_path = os.path.join(self.working_dir, "data", "meta.json") - - self.render_config_template() - proc = self.start_beat() - self.wait_until(lambda: self.log_contains("mockbeat start running.")) - proc.check_kill_and_wait() - - @unittest.skipUnless(INTEGRATION_TESTS, "integration test") - def test_is_created(self): - """ - Test that the meta file is created - """ - self.assertTrue(os.path.exists(self.meta_file_path)) - - @unittest.skipUnless(INTEGRATION_TESTS, "integration test") - def test_has_correct_perms(self): - """ - Test that the meta file has correct permissions - """ - perms = stat.S_IMODE(os.lstat(self.meta_file_path).st_mode) - self.assertEqual(perms, 0o600) diff --git a/libbeat/tests/system/test_template.py b/libbeat/tests/system/test_template.py deleted file mode 100644 index 5e1ab7ca909..00000000000 --- a/libbeat/tests/system/test_template.py +++ /dev/null @@ -1,382 +0,0 @@ -import json -import logging -import os -import pytest -import shutil -import unittest - -from base import BaseTest -from idxmgmt import IdxMgmt - -INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False) - - -class Test(BaseTest): - - def test_index_modified(self): - """ - Test that beat stops in case elasticsearch index is modified and pattern not - """ - self.render_config_template( - elasticsearch={"index": "test"}, - ) - - exit_code = self.run_beat() - - assert exit_code == 1 - assert self.log_contains( - "setup.template.name and setup.template.pattern have to be set if index name is modified") - - def test_index_not_modified(self): - """ - Test that beat starts running if elasticsearch output is set - """ - self.render_config_template( - elasticsearch=self.get_elasticsearch_template_config(), - ) - - proc = self.start_beat() - self.wait_until(lambda: self.log_contains("mockbeat start running.")) - proc.check_kill_and_wait() - - def test_index_modified_no_pattern(self): - """ - Test that beat stops in case elasticsearch index is modified and pattern not - """ - self.render_config_template( - elasticsearch={"index": "test"}, - es_template_name="test", - ) - - exit_code = self.run_beat() - - assert exit_code == 1 - assert self.log_contains( - "setup.template.name and setup.template.pattern have to be set if index name is modified") - - def test_index_modified_no_name(self): - """ - Test that beat stops in case elasticsearch index is modified and name not - """ - self.render_config_template( - elasticsearch={"index": "test"}, - es_template_pattern="test", - ) - - exit_code = self.run_beat() - - assert exit_code == 1 - assert self.log_contains( - "setup.template.name and setup.template.pattern have to be set if index name is modified") - - def test_index_with_pattern_name(self): - """ - Test that beat starts running if elasticsearch output with modified index and pattern and name are set - """ - self.render_config_template( - elasticsearch=self.get_elasticsearch_template_config(), - es_template_name="test", - es_template_pattern="test-*", - ) - - proc = self.start_beat() - self.wait_until(lambda: self.log_contains("mockbeat start running.")) - proc.check_kill_and_wait() - - @unittest.skipUnless(INTEGRATION_TESTS, "integration test") - @pytest.mark.tag('integration') - def test_json_template(self): - """ - Test loading of json based template - """ - - template_name = "bla" - es = self.es_client() - self.copy_files(["template.json"]) - path = os.path.join(self.working_dir, "template.json") - print(path) - - self.render_config_template( - elasticsearch=self.get_elasticsearch_template_config(), - template_overwrite="true", - template_json_enabled="true", - template_json_path=path, - template_json_name=template_name, - ) - - proc = self.start_beat() - self.wait_until(lambda: self.log_contains("mockbeat start running.")) - self.wait_until(lambda: self.log_contains("Loading json template from file")) - self.wait_until(lambda: self.log_contains('Template with name \\\"bla\\\" loaded.')) - proc.check_kill_and_wait() - - result = es.transport.perform_request('GET', '/_index_template/' + template_name) - assert len(result) == 1 - - def get_host(self): - return os.getenv('ES_HOST', 'localhost') + ':' + os.getenv('ES_PORT', '9200') - - -class TestRunTemplate(BaseTest): - """ - Test run cmd with focus on template setup - """ - - def setUp(self): - super(TestRunTemplate, self).setUp() - # auto-derived default settings, if nothing else is set - self.data_stream = self.beat_name + "-9.9.9" - - self.es = self.es_client() - self.idxmgmt = IdxMgmt(self.es, self.data_stream) - self.idxmgmt.delete(data_streams=[self.data_stream]) - - def tearDown(self): - self.idxmgmt.delete(data_streams=[self.data_stream]) - - def render_config(self, **kwargs): - self.render_config_template( - elasticsearch=self.get_elasticsearch_template_config(), - **kwargs - ) - - @unittest.skipUnless(INTEGRATION_TESTS, "integration test") - @pytest.mark.tag('integration') - def test_template_default(self): - """ - Test run cmd with default settings for template - """ - self.render_config() - proc = self.start_beat() - self.wait_until(lambda: self.log_contains("mockbeat start running.")) - self.wait_until(lambda: self.log_contains('Template with name \\\"mockbeat-9.9.9\\\" loaded.')) - self.wait_until(lambda: self.log_contains("PublishEvents: 1 events have been published")) - proc.check_kill_and_wait() - - self.idxmgmt.assert_index_template_loaded(self.data_stream) - self.idxmgmt.assert_docs_written_to_data_stream(self.data_stream) - - @unittest.skipUnless(INTEGRATION_TESTS, "integration test") - @pytest.mark.tag('integration') - def test_template_disabled(self): - """ - Test run cmd does not load template when disabled in config - """ - self.render_config() - proc = self.start_beat(extra_args=["-E", "setup.template.enabled=false"]) - self.wait_until(lambda: self.log_contains("mockbeat start running.")) - self.wait_until(lambda: self.log_contains("PublishEvents: 1 events have been published")) - proc.check_kill_and_wait() - - self.idxmgmt.assert_index_template_not_loaded(self.data_stream) - - -class TestCommandSetupTemplate(BaseTest): - """ - Test beat command `setup` with focus on template - """ - - def setUp(self): - super(TestCommandSetupTemplate, self).setUp() - - # auto-derived default settings, if nothing else is set - self.setupCmd = "--index-management" - self.data_stream = self.beat_name + "-9.9.9" - self.policy_name = self.beat_name - - self.es = self.es_client() - self.idxmgmt = IdxMgmt(self.es, self.data_stream) - self.idxmgmt.delete(indices=[self.data_stream], policies=[self.policy_name]) - logging.getLogger("urllib3").setLevel(logging.WARNING) - logging.getLogger("elasticsearch").setLevel(logging.ERROR) - - def tearDown(self): - self.idxmgmt.delete(indices=[self.data_stream], policies=[self.policy_name]) - - def render_config(self, **kwargs): - self.render_config_template( - elasticsearch=self.get_elasticsearch_template_config(), - **kwargs - ) - - @unittest.skipUnless(INTEGRATION_TESTS, "integration test") - @pytest.mark.tag('integration') - def test_setup(self): - """ - Test setup cmd with template and ilm-policy subcommands - """ - self.render_config() - exit_code = self.run_beat(logging_args=["-v", "-d", "*"], - extra_args=["setup", self.setupCmd]) - - assert exit_code == 0 - self.idxmgmt.assert_index_template_loaded(self.data_stream) - self.idxmgmt.assert_policy_created(self.policy_name) - - @unittest.skipUnless(INTEGRATION_TESTS, "integration test") - @pytest.mark.tag('integration') - def test_setup_template_default(self): - """ - Test template setup with default config - """ - self.render_config() - exit_code = self.run_beat(logging_args=["-v", "-d", "*"], - extra_args=["setup", self.setupCmd]) - - assert exit_code == 0 - self.idxmgmt.assert_index_template_loaded(self.data_stream) - self.idxmgmt.assert_index_template_index_pattern(self.data_stream, [self.data_stream]) - - self.idxmgmt.assert_policy_created(self.policy_name) - - @unittest.skipUnless(INTEGRATION_TESTS, "integration test") - @pytest.mark.tag('integration') - def test_setup_template_disabled(self): - """ - Test template setup when ilm disabled - """ - self.render_config() - exit_code = self.run_beat(logging_args=["-v", "-d", "*"], - extra_args=["setup", self.setupCmd, - "-E", "setup.template.enabled=false"]) - - assert exit_code == 0 - self.idxmgmt.assert_index_template_not_loaded(self.data_stream) - - self.idxmgmt.assert_policy_created(self.policy_name) - - @unittest.skipUnless(INTEGRATION_TESTS, "integration test") - @pytest.mark.tag('integration') - def test_setup_template_with_opts(self): - """ - Test template setup with config options - """ - self.render_config() - exit_code = self.run_beat(logging_args=["-v", "-d", "*"], - extra_args=["setup", self.setupCmd, - "-E", "setup.ilm.enabled=false", - "-E", "setup.template.settings.index.number_of_shards=2"]) - - assert exit_code == 0 - self.idxmgmt.assert_index_template_loaded(self.data_stream) - - # check that settings are overwritten - resp = self.es.transport.perform_request('GET', '/_index_template/' + self.data_stream) - found = False - for index_template in resp["index_templates"]: - if self.data_stream == index_template["name"]: - found = True - index = index_template["index_template"]["template"]["settings"]["index"] - assert index["number_of_shards"] == "2", index["number_of_shards"] - assert found - - @unittest.skipUnless(INTEGRATION_TESTS, "integration test") - @pytest.mark.tag('integration') - def test_template_created_on_ilm_policy_created(self): - """ - Test template setup overwrites template when new ilm policy is created - """ - - self.render_config() - exit_code = self.run_beat(logging_args=["-v", "-d", "*"], - extra_args=["setup", self.setupCmd, - "-E", "setup.ilm.enabled=false"]) - assert exit_code == 0 - self.idxmgmt.assert_index_template_loaded(self.data_stream) - self.idxmgmt.assert_policy_not_created(self.policy_name) - - # ensure ilm policy is created, triggering overwriting existing template - exit_code = self.run_beat(extra_args=["setup", self.setupCmd, - "-E", "setup.template.overwrite=false", - "-E", "setup.template.settings.index.number_of_shards=2"]) - assert exit_code == 0 - self.idxmgmt.assert_policy_created(self.policy_name) - - -class TestCommandExportTemplate(BaseTest): - """ - Test beat command `export template` - """ - - def setUp(self): - super(TestCommandExportTemplate, self).setUp() - - self.config = "libbeat.yml" - self.output = os.path.join(self.working_dir, self.config) - shutil.copy(os.path.join(self.beat_path, "fields.yml"), self.output) - self.template_name = self.beat_name + "-9.9.9" - - def assert_log_contains_template(self, index_pattern): - assert self.log_contains('Loaded index template') - assert self.log_contains(index_pattern) - - def test_default(self): - """ - Test export template works - """ - self.render_config_template(self.beat_name, self.output, - fields=self.output) - exit_code = self.run_beat( - extra_args=["export", "template"], - config=self.config) - - assert exit_code == 0 - self.assert_log_contains_template(self.template_name) - - def test_load_disabled(self): - """ - Test template also exported when disabled in config - """ - self.render_config_template(self.beat_name, self.output, - fields=self.output) - exit_code = self.run_beat( - extra_args=["export", "template", "-E", "setup.template.enabled=false"], - config=self.config) - - assert exit_code == 0 - self.assert_log_contains_template(self.template_name) - - def test_export_to_file_absolute_path(self): - """ - Test export template to file with absolute file path - """ - self.render_config_template(self.beat_name, self.output, - fields=self.output) - - base_path = os.path.abspath(os.path.join(self.beat_path, os.path.dirname(__file__), "export")) - exit_code = self.run_beat( - extra_args=["export", "template", "--dir=" + base_path], - config=self.config) - - assert exit_code == 0 - - file = os.path.join(base_path, "template", self.template_name + '.json') - with open(file) as f: - template = json.load(f) - assert 'index_patterns' in template - assert template['index_patterns'] == [self.template_name], template - - os.remove(file) - - def test_export_to_file_relative_path(self): - """ - Test export template to file with relative file path - """ - self.render_config_template(self.beat_name, self.output, - fields=self.output) - - path = os.path.join(os.path.dirname(__file__), "export") - exit_code = self.run_beat( - extra_args=["export", "template", "--dir=" + path], - config=self.config) - - assert exit_code == 0 - - base_path = os.path.abspath(os.path.join(self.beat_path, os.path.dirname(__file__), "export")) - file = os.path.join(base_path, "template", self.template_name + '.json') - with open(file) as f: - template = json.load(f) - assert 'index_patterns' in template - assert template['index_patterns'] == [self.template_name], template - - os.remove(file) diff --git a/libbeat/version/version.go b/libbeat/version/version.go index 7ba8762ee6c..cfa3abd3aef 100644 --- a/libbeat/version/version.go +++ b/libbeat/version/version.go @@ -18,4 +18,4 @@ // Code generated by dev-tools/set_version package version -const defaultBeatVersion = "8.9.0" +const defaultBeatVersion = "8.10.0" diff --git a/magefile.go b/magefile.go index 82c45217562..0e47301e7cc 100644 --- a/magefile.go +++ b/magefile.go @@ -26,7 +26,6 @@ import ( "path/filepath" "github.com/magefile/mage/mg" - "github.com/pkg/errors" "go.uber.org/multierr" devtools "github.com/elastic/beats/v7/dev-tools/mage" @@ -88,7 +87,7 @@ func PackageBeatDashboards() error { } else if _, err := os.Stat(legacyDir); err == nil { spec.Files[beatName] = devtools.PackageFile{Source: legacyDir} } else { - return errors.Errorf("no dashboards found for %v", beatDir) + return fmt.Errorf("no dashboards found for %v", beatDir) } } diff --git a/metricbeat/Dockerfile b/metricbeat/Dockerfile index e7ebcc93371..97ca1600c21 100644 --- a/metricbeat/Dockerfile +++ b/metricbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.19.10 +FROM golang:1.19.12 RUN \ apt update \ @@ -13,7 +13,7 @@ RUN \ && rm -rf /var/lib/apt/lists/* # Use a virtualenv to avoid the PEP668 "externally managed environment" error caused by conflicts -# with the system Python installation. golang:1.19.10 uses Debian 12 which now enforces PEP668. +# with the system Python installation. golang:1.20.6 uses Debian 12 which now enforces PEP668. ENV VIRTUAL_ENV=/opt/venv RUN python3 -m venv $VIRTUAL_ENV ENV PATH="$VIRTUAL_ENV/bin:$PATH" @@ -21,7 +21,7 @@ ENV PATH="$VIRTUAL_ENV/bin:$PATH" RUN pip3 install --upgrade pip==20.1.1 RUN pip3 install --upgrade docker-compose==1.23.2 RUN pip3 install --upgrade setuptools==47.3.2 -RUN pip3 install --upgrade PyYAML==6.0.0 +RUN pip3 install --upgrade PyYAML==5.3.1 # Oracle instant client RUN cd /usr/lib \ diff --git a/metricbeat/cmd/modules.go b/metricbeat/cmd/modules.go index f137a91c37b..75cd807cb8b 100644 --- a/metricbeat/cmd/modules.go +++ b/metricbeat/cmd/modules.go @@ -18,10 +18,9 @@ package cmd import ( + "fmt" "strings" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd" @@ -33,16 +32,16 @@ func BuildModulesManager(beat *beat.Beat) (cmd.ModulesManager, error) { glob, err := config.String("config.modules.path", -1) if err != nil { - return nil, errors.Errorf("modules management requires 'metricbeat.config.modules.path' setting") + return nil, fmt.Errorf("modules management requires 'metricbeat.config.modules.path' setting") } if !strings.HasSuffix(glob, "*.yml") { - return nil, errors.Errorf("wrong settings for config.modules.path, it is expected to end with *.yml. Got: %s", glob) + return nil, fmt.Errorf("wrong settings for config.modules.path, it is expected to end with *.yml. Got: %s", glob) } modulesManager, err := cfgfile.NewGlobManager(glob, ".yml", ".disabled") if err != nil { - return nil, errors.Wrap(err, "initialization error") + return nil, fmt.Errorf("initialization error: %w", err) } return modulesManager, nil } diff --git a/metricbeat/docker-compose.yml b/metricbeat/docker-compose.yml index 8e2f2af0109..ddbab5b80c1 100644 --- a/metricbeat/docker-compose.yml +++ b/metricbeat/docker-compose.yml @@ -17,11 +17,11 @@ services: # Used by base tests elasticsearch: - image: docker.elastic.co/integrations-ci/beats-elasticsearch:${ELASTICSEARCH_VERSION:-8.7.0}-1 + image: docker.elastic.co/integrations-ci/beats-elasticsearch:${ELASTICSEARCH_VERSION:-8.8.1}-1 build: context: ./module/elasticsearch/_meta args: - ELASTICSEARCH_VERSION: ${ELASTICSEARCH_VERSION:-8.7.0} + ELASTICSEARCH_VERSION: ${ELASTICSEARCH_VERSION:-8.8.1} environment: - "ES_JAVA_OPTS=-Xms256m -Xmx256m" - "transport.host=127.0.0.1" @@ -38,11 +38,11 @@ services: # Used by base tests kibana: - image: docker.elastic.co/integrations-ci/beats-kibana:${KIBANA_VERSION:-8.7.0}-1 + image: docker.elastic.co/integrations-ci/beats-kibana:${KIBANA_VERSION:-8.8.1}-1 build: context: ./module/kibana/_meta args: - KIBANA_VERSION: ${KIBANA_VERSION:-8.7.0} + KIBANA_VERSION: ${KIBANA_VERSION:-8.8.1} healthcheck: test: ["CMD-SHELL", "curl -u beats:testing -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 @@ -53,11 +53,11 @@ services: # Used by base tests metricbeat: - image: docker.elastic.co/integrations-ci/beats-metricbeat:${BEAT_VERSION:-8.7.0}-1 + image: docker.elastic.co/integrations-ci/beats-metricbeat:${BEAT_VERSION:-8.8.1}-1 build: context: ./module/beat/_meta args: - BEAT_VERSION: ${BEAT_VERSION:-8.7.0} + BEAT_VERSION: ${BEAT_VERSION:-8.8.1} command: '-e' ports: - 5066:5066 diff --git a/metricbeat/docs/fields.asciidoc b/metricbeat/docs/fields.asciidoc index 17f608f713b..6e7e895a56d 100644 --- a/metricbeat/docs/fields.asciidoc +++ b/metricbeat/docs/fields.asciidoc @@ -45789,6 +45789,26 @@ type: boolean -- + +*`kubernetes.deployment.status.available`*:: ++ +-- +Deployment Available Condition status (true, false or unknown) + + +type: keyword + +-- + +*`kubernetes.deployment.status.progressing`*:: ++ +-- +Deployment Progresing Condition status (true, false or unknown) + +type: keyword + +-- + [float] === replicas diff --git a/metricbeat/docs/running-on-docker.asciidoc b/metricbeat/docs/running-on-docker.asciidoc index cc7c8c5b650..72cfed9757d 100644 --- a/metricbeat/docs/running-on-docker.asciidoc +++ b/metricbeat/docs/running-on-docker.asciidoc @@ -18,7 +18,9 @@ docker run \ --mount type=bind,source=/proc,target=/hostfs/proc,readonly \ <1> --mount type=bind,source=/sys/fs/cgroup,target=/hostfs/sys/fs/cgroup,readonly \ <2> --mount type=bind,source=/,target=/hostfs,readonly \ <3> - --net=host \ <4> + --mount type=bind,source=/var/run/dbus/system_bus_socket,target=/hostfs/var/run/dbus/system_bus_socket,readonly \ <4> + --env DBUS_SYSTEM_BUS_ADDRESS='unix:path=/hostfs/var/run/dbus/system_bus_socket' \ <4> + --net=host \ <5> {dockerimage} -e -system.hostfs=/hostfs ---- @@ -36,7 +38,9 @@ mounted inside the directory specified by the `hostfs` config value. <3> If you want to be able to monitor filesystems from the host by using the <>, then those filesystems need to be mounted inside of the container. They can be mounted at any location. -<4> The <> uses data from `/proc/net/dev`, or +<4> The <> and <> +both require access to dbus. Mount the dbus socket and set the `DBUS_SYSTEM_BUS_ADDRESS` environment variable to the mounted system socket path. +<5> The <> uses data from `/proc/net/dev`, or `/hostfs/proc/net/dev` when using `hostfs=/hostfs`. The only way to make this file contain the host's network devices is to use the `--net=host` flag. This is due to Linux namespacing; simply bind mounting the host's `/proc` diff --git a/metricbeat/docs/troubleshooting.asciidoc b/metricbeat/docs/troubleshooting.asciidoc index 823ad0d3357..f1876af0f27 100644 --- a/metricbeat/docs/troubleshooting.asciidoc +++ b/metricbeat/docs/troubleshooting.asciidoc @@ -7,6 +7,7 @@ If you have issues installing or running {beatname_uc}, read the following tips: * <> * <> +* <> * <> //sets block macro for getting-help.asciidoc included in next section @@ -24,3 +25,15 @@ include::{libbeat-dir}/getting-help.asciidoc[] == Debug include::{libbeat-dir}/debugging.asciidoc[] + +//sets block macro for metrics-in-logs.asciidoc included in next section + +[id="understand-{beatname_lc}-logs"] +[role="xpack"] +== Understand metrics in {beatname_uc} logs + +++++ +Understand logged metrics +++++ + +include::{libbeat-dir}/metrics-in-logs.asciidoc[] \ No newline at end of file diff --git a/metricbeat/helper/dialer/dialer_posix.go b/metricbeat/helper/dialer/dialer_posix.go index 04651ab1ea4..0e66e82b2b7 100644 --- a/metricbeat/helper/dialer/dialer_posix.go +++ b/metricbeat/helper/dialer/dialer_posix.go @@ -20,11 +20,10 @@ package dialer import ( + "errors" "strings" "time" - "github.com/pkg/errors" - "github.com/elastic/elastic-agent-libs/transport" ) diff --git a/metricbeat/helper/dialer/dialer_windows.go b/metricbeat/helper/dialer/dialer_windows.go index ff5fedca908..0ef34666d2a 100644 --- a/metricbeat/helper/dialer/dialer_windows.go +++ b/metricbeat/helper/dialer/dialer_windows.go @@ -20,12 +20,11 @@ package dialer import ( + "errors" "net" "strings" "time" - "github.com/pkg/errors" - winio "github.com/Microsoft/go-winio" "github.com/elastic/beats/v7/libbeat/api/npipe" diff --git a/metricbeat/helper/elastic/elastic.go b/metricbeat/helper/elastic/elastic.go index e6cdea008f8..25fa5835434 100644 --- a/metricbeat/helper/elastic/elastic.go +++ b/metricbeat/helper/elastic/elastic.go @@ -21,8 +21,6 @@ import ( "fmt" "strings" - "github.com/pkg/errors" - "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" "github.com/elastic/elastic-agent-libs/version" @@ -145,7 +143,7 @@ func NewModule(base *mb.BaseModule, xpackEnabledMetricsets []string, optionalXpa XPackEnabled bool `config:"xpack.enabled"` }{} if err := base.UnpackConfig(&config); err != nil { - return nil, errors.Wrapf(err, "could not unpack configuration for module %v", moduleName) + return nil, fmt.Errorf("could not unpack configuration for module %v: %w", moduleName, err) } // No special configuration is needed if xpack.enabled != true @@ -155,7 +153,7 @@ func NewModule(base *mb.BaseModule, xpackEnabledMetricsets []string, optionalXpa var raw mapstr.M if err := base.UnpackConfig(&raw); err != nil { - return nil, errors.Wrapf(err, "could not unpack configuration for module %v", moduleName) + return nil, fmt.Errorf("could not unpack configuration for module %v: %w", moduleName, err) } // Ensure all required metricsets are enabled when xpack.enabled == true, and add any additional which are optional @@ -190,12 +188,12 @@ func NewModule(base *mb.BaseModule, xpackEnabledMetricsets []string, optionalXpa newConfig, err := conf.NewConfigFrom(raw) if err != nil { - return nil, errors.Wrapf(err, "could not create new configuration for module %v", moduleName) + return nil, fmt.Errorf("could not create new configuration for module %v: %w", moduleName, err) } newModule, err := base.WithConfig(*newConfig) if err != nil { - return nil, errors.Wrapf(err, "could not reconfigure module %v", moduleName) + return nil, fmt.Errorf("could not reconfigure module %v: %w", moduleName, err) } logger.Debugf("Configuration for module %v modified because xpack.enabled was set to true", moduleName) diff --git a/metricbeat/helper/http.go b/metricbeat/helper/http.go index fd1ee0431ed..9b8cf792879 100644 --- a/metricbeat/helper/http.go +++ b/metricbeat/helper/http.go @@ -26,8 +26,6 @@ import ( "io/ioutil" "net/http" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/version" "github.com/elastic/beats/v7/metricbeat/helper/dialer" "github.com/elastic/beats/v7/metricbeat/mb" @@ -119,7 +117,7 @@ func (h *HTTP) FetchResponse() (*http.Response, error) { req, err := http.NewRequest(h.method, h.uri, reader) if err != nil { - return nil, errors.Wrap(err, "failed to create HTTP request") + return nil, fmt.Errorf("failed to create HTTP request: %w", err) } req.Header = h.headers if h.hostData.User != "" || h.hostData.Password != "" { @@ -218,7 +216,7 @@ func getAuthHeaderFromToken(path string) (string, error) { b, err := ioutil.ReadFile(path) if err != nil { - return "", errors.Wrap(err, "reading bearer token file") + return "", fmt.Errorf("reading bearer token file: %w", err) } if len(b) != 0 { diff --git a/metricbeat/helper/privileges_windows.go b/metricbeat/helper/privileges_windows.go index 5c25d84a461..e5b11db3573 100644 --- a/metricbeat/helper/privileges_windows.go +++ b/metricbeat/helper/privileges_windows.go @@ -18,13 +18,14 @@ package helper import ( + "fmt" "sync" "syscall" - "github.com/pkg/errors" - "github.com/elastic/gosigar/sys/windows" + "errors" + "github.com/elastic/elastic-agent-libs/logp" ) @@ -55,7 +56,7 @@ func enableSeDebugPrivilege() error { } if err = windows.EnableTokenPrivileges(token, windows.SeDebugPrivilege); err != nil { - return errors.Wrap(err, "EnableTokenPrivileges failed") + return fmt.Errorf("EnableTokenPrivileges failed: %w", err) } return nil @@ -74,7 +75,7 @@ func CheckAndEnableSeDebugPrivilege() error { func checkAndEnableSeDebugPrivilege() error { info, err := windows.GetDebugInfo() if err != nil { - return errors.Wrap(err, "GetDebugInfo failed") + return fmt.Errorf("GetDebugInfo failed: %w", err) } logp.Info("Metricbeat process and system info: %v", info) @@ -94,7 +95,7 @@ func checkAndEnableSeDebugPrivilege() error { info, err = windows.GetDebugInfo() if err != nil { - return errors.Wrap(err, "GetDebugInfo failed") + return fmt.Errorf("GetDebugInfo failed: %w", err) } seDebug, found = info.ProcessPrivs[windows.SeDebugPrivilege] @@ -103,7 +104,7 @@ func checkAndEnableSeDebugPrivilege() error { } if !seDebug.Enabled { - return errors.Errorf("Metricbeat failed to enable the "+ + return fmt.Errorf("Metricbeat failed to enable the "+ "SeDebugPrivilege, a Windows privilege that allows it to collect "+ "metrics from other processes. %v", seDebug) } diff --git a/metricbeat/helper/server/tcp/tcp.go b/metricbeat/helper/server/tcp/tcp.go index 021e6a24989..5d7739ce6ec 100644 --- a/metricbeat/helper/server/tcp/tcp.go +++ b/metricbeat/helper/server/tcp/tcp.go @@ -22,8 +22,6 @@ import ( "fmt" "net" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/helper/server" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/elastic-agent-libs/logp" @@ -76,7 +74,7 @@ func NewTcpServer(base mb.BaseMetricSet) (server.Server, error) { func (g *TcpServer) Start() error { listener, err := net.ListenTCP("tcp", g.tcpAddr) if err != nil { - return errors.Wrap(err, "failed to start TCP server") + return fmt.Errorf("failed to start TCP server: %w", err) } g.listener = listener logp.Info("Started listening for TCP on: %s", g.tcpAddr.String()) diff --git a/metricbeat/helper/server/udp/udp.go b/metricbeat/helper/server/udp/udp.go index e254d15ae3a..036f0d236bb 100644 --- a/metricbeat/helper/server/udp/udp.go +++ b/metricbeat/helper/server/udp/udp.go @@ -21,8 +21,6 @@ import ( "fmt" "net" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/helper/server" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/elastic-agent-libs/logp" @@ -78,7 +76,7 @@ func (g *UdpServer) GetHost() string { func (g *UdpServer) Start() error { listener, err := net.ListenUDP("udp", g.udpaddr) if err != nil { - return errors.Wrap(err, "failed to start UDP server") + return fmt.Errorf("failed to start UDP server: %w", err) } logp.Info("Started listening for UDP on: %s", g.udpaddr.String()) diff --git a/metricbeat/helper/socket/netlink.go b/metricbeat/helper/socket/netlink.go index 0037cca64f7..df1230f5421 100644 --- a/metricbeat/helper/socket/netlink.go +++ b/metricbeat/helper/socket/netlink.go @@ -20,11 +20,10 @@ package socket import ( + "fmt" "os" "sync/atomic" - "github.com/pkg/errors" - "github.com/elastic/gosigar/sys/linux" ) @@ -48,7 +47,7 @@ func (session *NetlinkSession) GetSocketList() ([]*linux.InetDiagMsg, error) { req.Header.Seq = atomic.AddUint32(&session.seq, 1) sockets, err := linux.NetlinkInetDiagWithBuf(req, session.readBuffer, nil) if err != nil { - return nil, errors.Wrap(err, "failed requesting socket dump") + return nil, fmt.Errorf("failed requesting socket dump: %w", err) } return sockets, nil } diff --git a/metricbeat/helper/sql/sql.go b/metricbeat/helper/sql/sql.go index 80e59779adc..90ba5d96062 100644 --- a/metricbeat/helper/sql/sql.go +++ b/metricbeat/helper/sql/sql.go @@ -26,8 +26,6 @@ import ( "strings" "time" - "github.com/pkg/errors" - "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -48,14 +46,14 @@ type sqlRow interface { func NewDBClient(driver, uri string, l *logp.Logger) (*DbClient, error) { dbx, err := sql.Open(switchDriverName(driver), uri) if err != nil { - return nil, errors.Wrap(err, "opening connection") + return nil, fmt.Errorf("opening connection: %w", err) } err = dbx.Ping() if err != nil { if closeErr := dbx.Close(); closeErr != nil { - return nil, errors.Wrapf(err, "failed to close with %s, after connection test failed", closeErr) + return nil, fmt.Errorf("failed to close with %s, after connection test failed: %w", closeErr, err) } - return nil, errors.Wrap(err, "testing connection") + return nil, fmt.Errorf("testing connection: %w", err) } return &DbClient{DB: dbx, logger: l}, nil @@ -76,7 +74,7 @@ func (d *DbClient) fetchTableMode(rows sqlRow) ([]mapstr.M, error) { // https://stackoverflow.com/questions/23507531/is-golangs-sql-package-incapable-of-ad-hoc-exploratory-queries/23507765#23507765 cols, err := rows.Columns() if err != nil { - return nil, errors.Wrap(err, "error getting columns") + return nil, fmt.Errorf("error getting columns: %w", err) } for k, v := range cols { @@ -92,7 +90,7 @@ func (d *DbClient) fetchTableMode(rows sqlRow) ([]mapstr.M, error) { for rows.Next() { err = rows.Scan(vals...) if err != nil { - d.logger.Debug(errors.Wrap(err, "error trying to scan rows")) + d.logger.Debug(fmt.Errorf("error trying to scan rows: %w", err)) continue } @@ -107,7 +105,7 @@ func (d *DbClient) fetchTableMode(rows sqlRow) ([]mapstr.M, error) { } if err = rows.Err(); err != nil { - d.logger.Debug(errors.Wrap(err, "error trying to read rows")) + d.logger.Debug(fmt.Errorf("error trying to read rows: %w", err)) } return rr, nil @@ -131,7 +129,7 @@ func (d *DbClient) fetchVariableMode(rows sqlRow) (mapstr.M, error) { var val interface{} err := rows.Scan(&key, &val) if err != nil { - d.logger.Debug(errors.Wrap(err, "error trying to scan rows")) + d.logger.Debug(fmt.Errorf("error trying to scan rows: %w", err)) continue } @@ -140,7 +138,7 @@ func (d *DbClient) fetchVariableMode(rows sqlRow) (mapstr.M, error) { } if err := rows.Err(); err != nil { - d.logger.Debug(errors.Wrap(err, "error trying to read rows")) + d.logger.Debug(fmt.Errorf("error trying to read rows: %w", err)) } r := mapstr.M{} diff --git a/metricbeat/helper/windows/pdh/pdh_query_windows.go b/metricbeat/helper/windows/pdh/pdh_query_windows.go index 44e86637346..3706edbf85c 100644 --- a/metricbeat/helper/windows/pdh/pdh_query_windows.go +++ b/metricbeat/helper/windows/pdh/pdh_query_windows.go @@ -20,6 +20,8 @@ package pdh import ( + "errors" + "fmt" "regexp" "runtime" "strings" @@ -27,8 +29,6 @@ import ( "unsafe" "golang.org/x/sys/windows" - - "github.com/pkg/errors" ) var ( @@ -195,10 +195,10 @@ func (q *Query) GetFormattedCounterValues() (map[string][]CounterValue, error) { func (q *Query) GetCountersAndInstances(objectName string) ([]string, []string, error) { counters, instances, err := PdhEnumObjectItems(objectName) if err != nil { - return nil, nil, errors.Wrapf(err, "Unable to retrieve counter and instance list for %s", objectName) + return nil, nil, fmt.Errorf("Unable to retrieve counter and instance list for %s: %w", objectName, err) } if len(counters) == 0 && len(instances) == 0 { - return nil, nil, errors.Errorf("Unable to retrieve counter and instance list for %s", objectName) + return nil, nil, fmt.Errorf("Unable to retrieve counter and instance list for %s", objectName) } return UTF16ToStringArray(counters), UTF16ToStringArray(instances), nil } @@ -333,7 +333,7 @@ func getCounterValue(counter *Counter) CounterValue { counterValue.Measurement = value.Value } default: - counterValue.Err.Error = errors.Errorf("initialization failed: format '%#v' "+ + counterValue.Err.Error = fmt.Errorf("initialization failed: format '%#v' "+ "for instance '%s' is invalid (must be PdhFmtDouble, PdhFmtLarge or PdhFmtLong)", counter.format, counter.instanceName) } diff --git a/metricbeat/internal/sysinit/init.go b/metricbeat/internal/sysinit/init.go index 880063506b9..a14a7709550 100644 --- a/metricbeat/internal/sysinit/init.go +++ b/metricbeat/internal/sysinit/init.go @@ -19,6 +19,7 @@ package sysinit import ( "flag" + "fmt" "sync" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" @@ -39,21 +40,24 @@ type HostFSConfig struct { HostFS string `config:"hostfs"` } -// MetricbeatHostFSConfig +// MetricbeatHostFSConfig carries config information for the hostfs setting type MetricbeatHostFSConfig struct { HostFS string `config:"system.hostfs"` } -// Init either the system or linux module. This will produce different modules depending on if we're running under agent or not. +// InitSystemModule initializes either either the system or linux module. This will produce different modules depending on if we're running under agent or not. func InitSystemModule(base mb.BaseModule) (mb.Module, error) { // common code for the base use case of `hostfs` being set at the module-level logger := logp.L() - hostfs, userSet := findConfigValue(base) + hostfs, userSet, err := findConfigValue(base) + if err != nil { + return nil, fmt.Errorf("error fetching config value: %w", err) + } if fleetmode.Enabled() { logger.Infof("initializing HostFS values under agent: %s", hostfs) return fleetInit(base, hostfs, userSet) } - return metricbeatInit(base, hostfs, userSet) + return metricbeatInit(base, hostfs) } func fleetInit(base mb.BaseModule, modulepath string, moduleSet bool) (mb.Module, error) { @@ -71,12 +75,11 @@ func fleetInit(base mb.BaseModule, modulepath string, moduleSet bool) (mb.Module } // Deal with the legacy configs available to metricbeat -func metricbeatInit(base mb.BaseModule, modulePath string, moduleSet bool) (mb.Module, error) { +func metricbeatInit(base mb.BaseModule, modulePath string) (mb.Module, error) { var hostfs = modulePath var userSet bool // allow the CLI to override other settings if hostfsCLI != nil && *hostfsCLI != "" { - cfgwarn.Deprecate("8.0.0", "The --system.hostfs flag will be removed in the future and replaced by a config value.") hostfs = *hostfsCLI userSet = true } @@ -91,22 +94,29 @@ func metricbeatInit(base mb.BaseModule, modulePath string, moduleSet bool) (mb.M // A user can supply either `system.hostfs` or `hostfs`. // In additon, we will probably want to change Integration Config values to `hostfs` as well. // We need to figure out which one we got, if any. -func findConfigValue(base mb.BaseModule) (string, bool) { +// Returns false if no config value was set +func findConfigValue(base mb.BaseModule) (string, bool, error) { partialConfig := HostFSConfig{} - base.UnpackConfig(&partialConfig) + err := base.UnpackConfig(&partialConfig) + if err != nil { + return "", false, fmt.Errorf("error unpacking hostfs config: %w", err) + } // if the newer value is set, just use that. if partialConfig.HostFS != "" { - return partialConfig.HostFS, true + return partialConfig.HostFS, true, nil } legacyConfig := MetricbeatHostFSConfig{} - base.UnpackConfig(&legacyConfig) + err = base.UnpackConfig(&legacyConfig) + if err != nil { + return "", false, fmt.Errorf("error unpacking legacy config: %w", err) + } if legacyConfig.HostFS != "" { cfgwarn.Deprecate("8.0.0", "The system.hostfs config value will be removed, use `hostfs` from within the module config.") // Only fallback to this if the user didn't set anything else - return legacyConfig.HostFS, true + return legacyConfig.HostFS, true, nil } - return "/", false + return "/", false, nil } diff --git a/metricbeat/mb/builders.go b/metricbeat/mb/builders.go index 57c3707b728..269c194063c 100644 --- a/metricbeat/mb/builders.go +++ b/metricbeat/mb/builders.go @@ -18,12 +18,12 @@ package mb import ( + "errors" "fmt" "strings" "github.com/gofrs/uuid" "github.com/joeshaw/multierror" - "github.com/pkg/errors" conf "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/logp" @@ -91,7 +91,7 @@ func newBaseModuleFromConfig(rawConfig *conf.C) (BaseModule, error) { err = mustNotContainDuplicates(baseModule.config.Hosts) if err != nil { - return baseModule, errors.Wrapf(err, "invalid hosts for module '%s'", baseModule.name) + return baseModule, fmt.Errorf("invalid hosts for module '%s': %w", baseModule.name, err) } return baseModule, nil @@ -129,8 +129,8 @@ func initMetricSets(r *Register, m Module) ([]MetricSet, error) { if registration.HostParser != nil { bm.hostData, err = registration.HostParser(bm.Module(), bm.host) if err != nil { - errs = append(errs, errors.Wrapf(err, "host parsing failed for %v-%v", - bm.Module().Name(), bm.Name())) + errs = append(errs, fmt.Errorf("host parsing failed for %v-%v: %w", + bm.Module().Name(), bm.Name(), err)) continue } bm.host = bm.hostData.Host @@ -168,7 +168,7 @@ func newBaseMetricSets(r *Register, m Module) ([]BaseMetricSet, error) { var err error metricSetNames, err = r.DefaultMetricSets(m.Name()) if err != nil { - return nil, errors.Errorf("no metricsets configured for module '%s'", m.Name()) + return nil, fmt.Errorf("no metricsets configured for module '%s'", m.Name()) } } @@ -178,7 +178,7 @@ func newBaseMetricSets(r *Register, m Module) ([]BaseMetricSet, error) { for _, host := range hosts { id, err := uuid.NewV4() if err != nil { - return nil, errors.Wrap(err, "failed to generate ID for metricset") + return nil, fmt.Errorf("failed to generate ID for metricset: %w", err) } msID := id.String() metrics := monitoring.NewRegistry() diff --git a/metricbeat/mb/lightmetricset.go b/metricbeat/mb/lightmetricset.go index 544c1fb7d4f..cb1d44e9600 100644 --- a/metricbeat/mb/lightmetricset.go +++ b/metricbeat/mb/lightmetricset.go @@ -18,7 +18,7 @@ package mb import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/libbeat/processors" conf "github.com/elastic/elastic-agent-libs/config" @@ -43,10 +43,10 @@ type LightMetricSet struct { func (m *LightMetricSet) Registration(r *Register) (MetricSetRegistration, error) { registration, err := r.metricSetRegistration(m.Input.Module, m.Input.MetricSet) if err != nil { - return registration, errors.Wrapf(err, - "failed to start light metricset '%s/%s' using '%s/%s' metricset as input", + return registration, fmt.Errorf( + "failed to start light metricset '%s/%s' using '%s/%s' metricset as input: %w", m.Module, m.Name, - m.Input.Module, m.Input.MetricSet) + m.Input.Module, m.Input.MetricSet, err) } originalFactory := registration.Factory @@ -69,7 +69,7 @@ func (m *LightMetricSet) Registration(r *Register) (MetricSetRegistration, error base.name = m.Name baseModule, err := m.baseModule(base.module) if err != nil { - return nil, errors.Wrapf(err, "failed to create base module for light module '%s', using base module '%s'", m.Module, base.module.Name()) + return nil, fmt.Errorf("failed to create base module for light module '%s', using base module '%s': %w", m.Module, base.module.Name(), err) } base.module = baseModule @@ -79,7 +79,7 @@ func (m *LightMetricSet) Registration(r *Register) (MetricSetRegistration, error if moduleFactory != nil { module, err := moduleFactory(*baseModule) if err != nil { - return nil, errors.Wrapf(err, "module factory for module '%s' failed while creating light metricset '%s/%s'", m.Input.Module, m.Module, m.Name) + return nil, fmt.Errorf("module factory for module '%s' failed while creating light metricset '%s/%s': %w", m.Input.Module, m.Module, m.Name, err) } base.module = module } @@ -88,7 +88,7 @@ func (m *LightMetricSet) Registration(r *Register) (MetricSetRegistration, error if originalHostParser != nil { base.hostData, err = originalHostParser(base.module, base.host) if err != nil { - return nil, errors.Wrapf(err, "host parser failed on light metricset factory for '%s/%s'", m.Module, m.Name) + return nil, fmt.Errorf("host parser failed on light metricset factory for '%s/%s': %w", m.Module, m.Name, err) } base.host = base.hostData.Host } @@ -105,18 +105,18 @@ func (m *LightMetricSet) baseModule(from Module) (*BaseModule, error) { // Initialize config using input defaults as raw config rawConfig, err := conf.NewConfigFrom(m.Input.Defaults) if err != nil { - return nil, errors.Wrap(err, "invalid input defaults") + return nil, fmt.Errorf("invalid input defaults: %w", err) } // Copy values from user configuration if err = from.UnpackConfig(rawConfig); err != nil { - return nil, errors.Wrap(err, "failed to copy values from user configuration") + return nil, fmt.Errorf("failed to copy values from user configuration: %w", err) } // Create the base module baseModule, err := newBaseModuleFromConfig(rawConfig) if err != nil { - return nil, errors.Wrap(err, "failed to create base module") + return nil, fmt.Errorf("failed to create base module: %w", err) } baseModule.name = m.Module diff --git a/metricbeat/mb/lightmodules.go b/metricbeat/mb/lightmodules.go index 2b60d882c29..50293d7f60b 100644 --- a/metricbeat/mb/lightmodules.go +++ b/metricbeat/mb/lightmodules.go @@ -24,8 +24,6 @@ import ( "path/filepath" "strings" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/processors" "github.com/elastic/elastic-agent-libs/logp" @@ -74,7 +72,7 @@ func (s *LightModulesSource) HasModule(moduleName string) bool { func (s *LightModulesSource) DefaultMetricSets(r *Register, moduleName string) ([]string, error) { module, err := s.loadModule(r, moduleName) if err != nil { - return nil, errors.Wrapf(err, "getting default metricsets for module '%s'", moduleName) + return nil, fmt.Errorf("getting default metricsets for module '%s': %w", moduleName, err) } var metricsets []string for _, ms := range module.MetricSets { @@ -89,7 +87,7 @@ func (s *LightModulesSource) DefaultMetricSets(r *Register, moduleName string) ( func (s *LightModulesSource) MetricSets(r *Register, moduleName string) ([]string, error) { module, err := s.loadModule(r, moduleName) if err != nil { - return nil, errors.Wrapf(err, "getting metricsets for module '%s'", moduleName) + return nil, fmt.Errorf("getting metricsets for module '%s': %w", moduleName, err) } metricsets := make([]string, 0, len(module.MetricSets)) for _, ms := range module.MetricSets { @@ -123,7 +121,7 @@ func (s *LightModulesSource) HasMetricSet(moduleName, metricSetName string) bool func (s *LightModulesSource) MetricSetRegistration(register *Register, moduleName, metricSetName string) (MetricSetRegistration, error) { lightModule, err := s.loadModule(register, moduleName) if err != nil { - return MetricSetRegistration{}, errors.Wrapf(err, "loading module '%s'", moduleName) + return MetricSetRegistration{}, fmt.Errorf("loading module '%s': %w", moduleName, err) } ms, found := lightModule.MetricSets[metricSetName] @@ -163,7 +161,7 @@ type lightModuleConfig struct { func (s *LightModulesSource) ProcessorsForMetricSet(r *Register, moduleName string, metricSetName string) (*processors.Processors, error) { module, err := s.loadModule(r, moduleName) if err != nil { - return nil, errors.Wrapf(err, "reading processors for metricset '%s' in module '%s'", metricSetName, moduleName) + return nil, fmt.Errorf("reading processors for metricset '%s' in module '%s': %w", metricSetName, moduleName, err) } metricSet, ok := module.MetricSets[metricSetName] if !ok { @@ -186,12 +184,12 @@ func (s *LightModulesSource) loadModule(register *Register, moduleName string) ( moduleConfig, err := s.loadModuleConfig(modulePath) if err != nil { - return nil, errors.Wrapf(err, "loading light module '%s' definition", moduleName) + return nil, fmt.Errorf("loading light module '%s' definition: %w", moduleName, err) } metricSets, err := s.loadMetricSets(register, filepath.Dir(modulePath), moduleConfig.Name, moduleConfig.MetricSets) if err != nil { - return nil, errors.Wrapf(err, "loading metric sets for light module '%s'", moduleName) + return nil, fmt.Errorf("loading metric sets for light module '%s': %w", moduleName, err) } return &LightModule{Name: moduleName, MetricSets: metricSets}, nil @@ -210,12 +208,12 @@ func (s *LightModulesSource) findModulePath(moduleName string) (string, bool) { func (s *LightModulesSource) loadModuleConfig(modulePath string) (*lightModuleConfig, error) { config, err := common.LoadFile(modulePath) if err != nil { - return nil, errors.Wrapf(err, "loading module configuration from '%s'", modulePath) + return nil, fmt.Errorf("loading module configuration from '%s': %w", modulePath, err) } var moduleConfig lightModuleConfig if err = config.Unpack(&moduleConfig); err != nil { - return nil, errors.Wrapf(err, "parsing light module definition from '%s'", modulePath) + return nil, fmt.Errorf("parsing light module definition from '%s': %w", modulePath, err) } return &moduleConfig, nil } @@ -233,7 +231,7 @@ func (s *LightModulesSource) loadMetricSets(register *Register, moduleDirPath, m metricSetConfig, err := s.loadMetricSetConfig(manifestPath) if err != nil { - return nil, errors.Wrapf(err, "loading light metricset '%s'", metricSet) + return nil, fmt.Errorf("loading light metricset '%s': %w", metricSet, err) } metricSetConfig.Name = metricSet metricSetConfig.Module = moduleName @@ -246,11 +244,11 @@ func (s *LightModulesSource) loadMetricSets(register *Register, moduleDirPath, m func (s *LightModulesSource) loadMetricSetConfig(manifestPath string) (ms LightMetricSet, err error) { config, err := common.LoadFile(manifestPath) if err != nil { - return ms, errors.Wrapf(err, "loading metricset manifest from '%s'", manifestPath) + return ms, fmt.Errorf("loading metricset manifest from '%s': %w", manifestPath, err) } if err := config.Unpack(&ms); err != nil { - return ms, errors.Wrapf(err, "parsing metricset manifest from '%s'", manifestPath) + return ms, fmt.Errorf("parsing metricset manifest from '%s': %w", manifestPath, err) } return } @@ -264,7 +262,7 @@ func (s *LightModulesSource) moduleNames() ([]string, error) { } files, err := ioutil.ReadDir(dir) if err != nil { - return nil, errors.Wrapf(err, "listing modules on path '%s'", dir) + return nil, fmt.Errorf("listing modules on path '%s': %w", dir, err) } for _, f := range files { if !f.IsDir() { diff --git a/metricbeat/mb/mb.go b/metricbeat/mb/mb.go index ea9cf0ac6d9..06b85662838 100644 --- a/metricbeat/mb/mb.go +++ b/metricbeat/mb/mb.go @@ -27,8 +27,6 @@ import ( "net/url" "time" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/helper/dialer" conf "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/logp" @@ -107,7 +105,7 @@ func (m *BaseModule) WithConfig(config conf.C) (*BaseModule, error) { Module string `config:"module"` } if err := config.Unpack(&chkConfig); err != nil { - return nil, errors.Wrap(err, "error parsing new module configuration") + return nil, fmt.Errorf("error parsing new module configuration: %w", err) } // Don't allow module name change @@ -116,7 +114,7 @@ func (m *BaseModule) WithConfig(config conf.C) (*BaseModule, error) { } if err := config.SetString("module", -1, m.name); err != nil { - return nil, errors.Wrap(err, "unable to set existing module name in new configuration") + return nil, fmt.Errorf("unable to set existing module name in new configuration: %w", err) } newBM := &BaseModule{ @@ -125,7 +123,7 @@ func (m *BaseModule) WithConfig(config conf.C) (*BaseModule, error) { } if err := config.Unpack(&newBM.config); err != nil { - return nil, errors.Wrap(err, "error parsing new module configuration") + return nil, fmt.Errorf("error parsing new module configuration: %w", err) } return newBM, nil diff --git a/metricbeat/mb/module/configuration.go b/metricbeat/mb/module/configuration.go index eea33c1f56f..1e69d6094c4 100644 --- a/metricbeat/mb/module/configuration.go +++ b/metricbeat/mb/module/configuration.go @@ -18,7 +18,7 @@ package module import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/metricbeat/mb" @@ -44,18 +44,18 @@ func ConfiguredModules(modulesData []*conf.C, configModulesData *conf.C, moduleO modulesManager, err := cfgfile.NewGlobManager(config.Path, ".yml", ".disabled") if err != nil { - return nil, errors.Wrap(err, "initialization error") + return nil, fmt.Errorf("initialization error: %w", err) } for _, file := range modulesManager.ListEnabled() { confs, err := cfgfile.LoadList(file.Path) if err != nil { - return nil, errors.Wrap(err, "error loading config files") + return nil, fmt.Errorf("error loading config files: %w", err) } for _, conf := range confs { m, err := NewWrapper(conf, mb.Registry, moduleOptions...) if err != nil { - return nil, errors.Wrap(err, "module initialization error") + return nil, fmt.Errorf("module initialization error: %w", err) } modules = append(modules, m) } diff --git a/metricbeat/mb/module/connector.go b/metricbeat/mb/module/connector.go index 02e64144092..6e6b0ca6113 100644 --- a/metricbeat/mb/module/connector.go +++ b/metricbeat/mb/module/connector.go @@ -18,7 +18,7 @@ package module import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common/fmtstr" @@ -80,8 +80,8 @@ func NewConnector( func (c *Connector) UseMetricSetProcessors(r metricSetRegister, moduleName, metricSetName string) error { metricSetProcessors, err := r.ProcessorsForMetricSet(moduleName, metricSetName) if err != nil { - return errors.Wrapf(err, "reading metricset processors failed (module: %s, metricset: %s)", - moduleName, metricSetName) + return fmt.Errorf("reading metricset processors failed (module: %s, metricset: %s): %w", + moduleName, metricSetName, err) } if metricSetProcessors == nil || len(metricSetProcessors.List) == 0 { diff --git a/metricbeat/mb/parse/hostparsers.go b/metricbeat/mb/parse/hostparsers.go index e499de9a1d8..4fca7e99aa9 100644 --- a/metricbeat/mb/parse/hostparsers.go +++ b/metricbeat/mb/parse/hostparsers.go @@ -18,7 +18,7 @@ package parse import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" ) @@ -35,7 +35,7 @@ func PassThruHostParser(module mb.Module, host string) (mb.HostData, error) { // value is empty and returns an error if not. func EmptyHostParser(module mb.Module, host string) (mb.HostData, error) { if host != "" { - return mb.HostData{}, errors.Errorf("hosts must be empty for %v", module.Name()) + return mb.HostData{}, fmt.Errorf("hosts must be empty for %v", module.Name()) } return mb.HostData{}, nil diff --git a/metricbeat/mb/parse/url.go b/metricbeat/mb/parse/url.go index c0b23f421d7..5dae25816e9 100644 --- a/metricbeat/mb/parse/url.go +++ b/metricbeat/mb/parse/url.go @@ -26,8 +26,6 @@ import ( "github.com/elastic/beats/v7/metricbeat/helper/dialer" "github.com/elastic/beats/v7/metricbeat/mb" - - "github.com/pkg/errors" ) // URLHostParserBuilder builds a tailored HostParser for used with host strings @@ -55,7 +53,7 @@ func (b URLHostParserBuilder) Build() mb.HostParser { if ok { queryMap, ok := query.(map[string]interface{}) if !ok { - return mb.HostData{}, errors.Errorf("'query' config for module %v is not a map", module.Name()) + return mb.HostData{}, fmt.Errorf("'query' config for module %v is not a map", module.Name()) } b.QueryParams = mb.QueryParams(queryMap).String() @@ -66,7 +64,7 @@ func (b URLHostParserBuilder) Build() mb.HostParser { if ok { user, ok = t.(string) if !ok { - return mb.HostData{}, errors.Errorf("'username' config for module %v is not a string", module.Name()) + return mb.HostData{}, fmt.Errorf("'username' config for module %v is not a string", module.Name()) } } else { user = b.DefaultUsername @@ -75,7 +73,7 @@ func (b URLHostParserBuilder) Build() mb.HostParser { if ok { pass, ok = t.(string) if !ok { - return mb.HostData{}, errors.Errorf("'password' config for module %v is not a string", module.Name()) + return mb.HostData{}, fmt.Errorf("'password' config for module %v is not a string", module.Name()) } } else { pass = b.DefaultPassword @@ -84,7 +82,7 @@ func (b URLHostParserBuilder) Build() mb.HostParser { if ok { path, ok = t.(string) if !ok { - return mb.HostData{}, errors.Errorf("'%v' config for module %v is not a string", b.PathConfigKey, module.Name()) + return mb.HostData{}, fmt.Errorf("'%v' config for module %v is not a string", b.PathConfigKey, module.Name()) } } else { path = b.DefaultPath @@ -96,7 +94,7 @@ func (b URLHostParserBuilder) Build() mb.HostParser { if ok { basePath, ok = t.(string) if !ok { - return mb.HostData{}, errors.Errorf("'basepath' config for module %v is not a string", module.Name()) + return mb.HostData{}, fmt.Errorf("'basepath' config for module %v is not a string", module.Name()) } } diff --git a/metricbeat/mb/registry.go b/metricbeat/mb/registry.go index d006c52082d..307e473101c 100644 --- a/metricbeat/mb/registry.go +++ b/metricbeat/mb/registry.go @@ -23,8 +23,6 @@ import ( "strings" "sync" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/processors" "github.com/elastic/elastic-agent-libs/logp" ) @@ -267,7 +265,7 @@ func (r *Register) metricSetRegistration(module, name string) (MetricSetRegistra if source := r.secondarySource; source != nil && source.HasMetricSet(module, name) { registration, err := source.MetricSetRegistration(r, module, name) if err != nil { - return MetricSetRegistration{}, errors.Wrapf(err, "failed to obtain registration for non-registered metricset '%s/%s'", module, name) + return MetricSetRegistration{}, fmt.Errorf("failed to obtain registration for non-registered metricset '%s/%s': %w", module, name, err) } return registration, nil } diff --git a/metricbeat/mb/testing/testdata.go b/metricbeat/mb/testing/testdata.go index dcc16f00da4..60455c1d635 100644 --- a/metricbeat/mb/testing/testdata.go +++ b/metricbeat/mb/testing/testdata.go @@ -19,6 +19,7 @@ package testing import ( "encoding/json" + "fmt" "io/ioutil" "net/http" "net/http/httptest" @@ -27,8 +28,6 @@ import ( "strings" "testing" - "github.com/pkg/errors" - "github.com/mitchellh/hashstructure" "gopkg.in/yaml.v2" @@ -446,7 +445,7 @@ func documentedFieldCheck(foundKeys mapstr.M, knownKeys map[string]interface{}, } } - return errors.Errorf("field missing '%s'", foundKey) + return fmt.Errorf("field missing '%s'", foundKey) } } diff --git a/metricbeat/metricbeat.reference.yml b/metricbeat/metricbeat.reference.yml index c5e39f47220..2a184fdb0bf 100644 --- a/metricbeat/metricbeat.reference.yml +++ b/metricbeat/metricbeat.reference.yml @@ -1010,10 +1010,10 @@ metricbeat.modules: # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. +# If this option is not defined, the hostname is used. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. Tags make it easy to group servers by different # logical properties. #tags: ["service-X", "web-tier"] @@ -1025,7 +1025,7 @@ metricbeat.modules: # env: staging # If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields +# fields in the output document instead of being grouped under a field # sub-dictionary. Default is false. #fields_under_root: false @@ -1037,7 +1037,7 @@ metricbeat.modules: #queue: # Queue type by name (default 'mem') # The memory queue will present all available events (up to the outputs - # bulk_max_size) to the output, the moment the output is ready to server + # bulk_max_size) to the output, the moment the output is ready to serve # another batch of events. #mem: # Max number of events the queue can buffer. @@ -1089,7 +1089,7 @@ metricbeat.modules: # length of its retry interval each time, up to this maximum. #max_retry_interval: 30s -# Sets the maximum number of CPUs that can be executing simultaneously. The +# Sets the maximum number of CPUs that can be executed simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: @@ -1210,7 +1210,7 @@ metricbeat.modules: # ignore_missing: false # fail_on_error: true # -# The following example copies the value of message to message_copied +# The following example copies the value of the message to message_copied # #processors: # - copy_fields: @@ -1220,7 +1220,7 @@ metricbeat.modules: # fail_on_error: true # ignore_missing: false # -# The following example truncates the value of message to 1024 bytes +# The following example truncates the value of the message to 1024 bytes # #processors: # - truncate_fields: @@ -1317,7 +1317,7 @@ output.elasticsearch: # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. #index: "metricbeat-%{[agent.version]}" - # Optional ingest pipeline. By default no pipeline will be used. + # Optional ingest pipeline. By default, no pipeline will be used. #pipeline: "" # Optional HTTP path @@ -2048,14 +2048,14 @@ output.elasticsearch: # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards are disabled by default and can be enabled either by setting the -# options here, or by using the `-setup` CLI flag or the `setup` command. +# options here or by using the `-setup` CLI flag or the `setup` command. #setup.dashboards.enabled: false # The directory from where to read the dashboards. The default is the `kibana` # folder in the home path. #setup.dashboards.directory: ${path.home}/kibana -# The URL from where to download the dashboards archive. It is used instead of +# The URL from where to download the dashboard archive. It is used instead of # the directory if it has a value. #setup.dashboards.url: @@ -2152,7 +2152,7 @@ setup.template.settings: # Configure index lifecycle management (ILM) to manage the backing indices # of your data streams. -# Enable ILM support. Valid values are true, false. +# Enable ILM support. Valid values are true, or false. #setup.ilm.enabled: true # Set the lifecycle policy name. The default policy name is @@ -2307,25 +2307,25 @@ logging.files: # The name of the files where the logs are written to. #name: metricbeat - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. #rotateeverybytes: 10485760 # = 10MB - # Number of rotated log files to keep. Oldest files will be deleted first. + # Number of rotated log files to keep. The oldest files will be deleted first. #keepfiles: 7 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. #permissions: 0600 - # Enable log file rotation on time intervals in addition to size-based rotation. + # Enable log file rotation on time intervals in addition to the size-based rotation. # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h # are boundary-aligned with minutes, hours, days, weeks, months, and years as # reported by the local system clock. All other intervals are calculated from the # Unix epoch. Defaults to disabled. #interval: 0 - # Rotate existing logs on startup rather than appending to the existing + # Rotate existing logs on startup rather than appending them to the existing # file. Defaults to true. # rotateonstartup: true @@ -2353,7 +2353,7 @@ logging.files: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # In case you specify an additional path, the scheme is required: http://localhost:9200/path # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 #hosts: ["localhost:9200"] @@ -2400,7 +2400,7 @@ logging.files: # Elasticsearch after a network error. The default is 60s. #backoff.max: 60s - # Configure HTTP request timeout before failing an request to Elasticsearch. + # Configure HTTP request timeout before failing a request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. @@ -2497,15 +2497,15 @@ logging.files: # =============================== HTTP Endpoint ================================ -# Each beat can expose internal metrics through a HTTP endpoint. For security +# Each beat can expose internal metrics through an HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. -# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# Stats can be accessed through http://localhost:5066/stats. For pretty JSON output # append ?pretty to the URL. # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# The HTTP endpoint will bind to this hostname, IP address, unix socket, or named pipe. # When using IP addresses, it is recommended to only use localhost. #http.host: localhost @@ -2515,7 +2515,7 @@ logging.files: # Define which user should be owning the named pipe. #http.named_pipe.user: -# Define which the permissions that should be applied to the named pipe, use the Security +# Define which permissions should be applied to the named pipe, use the Security # Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with # `http.user`. #http.named_pipe.security_descriptor: diff --git a/metricbeat/metricbeat.yml b/metricbeat/metricbeat.yml index df17ac01dbe..8b974c6c020 100644 --- a/metricbeat/metricbeat.yml +++ b/metricbeat/metricbeat.yml @@ -33,7 +33,7 @@ setup.template.settings: # all the transactions sent by a single shipper in the web interface. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. #tags: ["service-X", "web-tier"] @@ -48,8 +48,8 @@ setup.template.settings: # options here or by using the `setup` command. #setup.dashboards.enabled: false -# The URL from where to download the dashboards archive. By default this URL -# has a value which is computed based on the Beat name and version. For released +# The URL from where to download the dashboard archive. By default, this URL +# has a value that is computed based on the Beat name and version. For released # versions, this URL points to the dashboard archive on the artifacts.elastic.co # website. #setup.dashboards.url: @@ -134,7 +134,7 @@ processors: #logging.level: debug # At debug level, you can selectively enable logging only for some components. -# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# To enable all selectors, use ["*"]. Examples of other selectors are "beat", # "publisher", "service". #logging.selectors: ["*"] @@ -152,7 +152,7 @@ processors: #monitoring.cluster_uuid: # Uncomment to send the metrics to Elasticsearch. Most settings from the -# Elasticsearch output are accepted here as well. +# Elasticsearch outputs are accepted here as well. # Note that the settings should point to your Elasticsearch *monitoring* cluster. # Any setting that is not set is automatically inherited from the Elasticsearch # output configuration, so if you have the Elasticsearch output configured such diff --git a/metricbeat/module/aerospike/aerospike.go b/metricbeat/module/aerospike/aerospike.go index 8c9e9078f69..65cfabf6239 100644 --- a/metricbeat/module/aerospike/aerospike.go +++ b/metricbeat/module/aerospike/aerospike.go @@ -18,22 +18,21 @@ package aerospike import ( + "fmt" "strconv" "strings" - "github.com/pkg/errors" - as "github.com/aerospike/aerospike-client-go" ) func ParseHost(host string) (*as.Host, error) { pieces := strings.Split(host, ":") if len(pieces) != 2 { - return nil, errors.Errorf("Can't parse host %s", host) + return nil, fmt.Errorf("Can't parse host %s", host) } port, err := strconv.Atoi(pieces[1]) if err != nil { - return nil, errors.Wrapf(err, "Can't parse port") + return nil, fmt.Errorf("Can't parse port: %w", err) } return as.NewHost(pieces[0], port), nil } diff --git a/metricbeat/module/aerospike/namespace/namespace.go b/metricbeat/module/aerospike/namespace/namespace.go index 1f421a1cc7b..97beb050ce1 100644 --- a/metricbeat/module/aerospike/namespace/namespace.go +++ b/metricbeat/module/aerospike/namespace/namespace.go @@ -18,10 +18,10 @@ package namespace import ( + "fmt" "strings" as "github.com/aerospike/aerospike-client-go" - "github.com/pkg/errors" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/aerospike" @@ -57,7 +57,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { host, err := aerospike.ParseHost(base.Host()) if err != nil { - return nil, errors.Wrap(err, "Invalid host format, expected hostname:port") + return nil, fmt.Errorf("Invalid host format, expected hostname:port: %w", err) } return &MetricSet{ @@ -71,7 +71,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // of an error set the Error field of mb.Event or simply call report.Error(). func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { if err := m.connect(); err != nil { - return errors.Wrap(err, "error connecting to Aerospike") + return fmt.Errorf("error connecting to Aerospike: %w", err) } for _, node := range m.client.GetNodes() { diff --git a/metricbeat/module/apache/status/status.go b/metricbeat/module/apache/status/status.go index 012e08b91ac..472e4fcb3d5 100644 --- a/metricbeat/module/apache/status/status.go +++ b/metricbeat/module/apache/status/status.go @@ -19,7 +19,7 @@ package status import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/libbeat/common/fleetmode" "github.com/elastic/beats/v7/metricbeat/helper" @@ -87,7 +87,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { scanner, err := m.http.FetchScanner() if err != nil { - return errors.Wrap(err, "error fetching data") + return fmt.Errorf("error fetching data: %w", err) } data, _ := eventMapping(scanner, m.Host()) diff --git a/metricbeat/module/beat/state/data.go b/metricbeat/module/beat/state/data.go index 919da1b3f3e..b555c84bd40 100644 --- a/metricbeat/module/beat/state/data.go +++ b/metricbeat/module/beat/state/data.go @@ -19,12 +19,11 @@ package state import ( "encoding/json" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper/elastic" "github.com/elastic/elastic-agent-libs/mapstr" - "github.com/pkg/errors" - s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" "github.com/elastic/beats/v7/metricbeat/mb" @@ -75,7 +74,7 @@ func eventMapping(r mb.ReporterV2, info beat.Info, content []byte, isXpack bool) var data map[string]interface{} err := json.Unmarshal(content, &data) if err != nil { - return errors.Wrap(err, "failure parsing Beat's State API response") + return fmt.Errorf("failure parsing Beat's State API response: %w", err) } event.MetricSetFields, _ = schema.Apply(data) diff --git a/metricbeat/module/beat/stats/data.go b/metricbeat/module/beat/stats/data.go index 24a430aff59..0ad05d3aad7 100644 --- a/metricbeat/module/beat/stats/data.go +++ b/metricbeat/module/beat/stats/data.go @@ -19,12 +19,11 @@ package stats import ( "encoding/json" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper/elastic" "github.com/elastic/elastic-agent-libs/mapstr" - "github.com/pkg/errors" - s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" "github.com/elastic/beats/v7/metricbeat/mb" @@ -129,7 +128,7 @@ func eventMapping(r mb.ReporterV2, info beat.Info, clusterUUID string, content [ var data map[string]interface{} err := json.Unmarshal(content, &data) if err != nil { - return errors.Wrap(err, "failure parsing Beat's Stats API response") + return fmt.Errorf("failure parsing Beat's Stats API response: %w", err) } event.MetricSetFields, _ = schema.Apply(data) diff --git a/metricbeat/module/ceph/cluster_disk/data.go b/metricbeat/module/ceph/cluster_disk/data.go index f89d3017575..07a014d7ddb 100644 --- a/metricbeat/module/ceph/cluster_disk/data.go +++ b/metricbeat/module/ceph/cluster_disk/data.go @@ -19,8 +19,7 @@ package cluster_disk import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -44,7 +43,7 @@ func eventMapping(content []byte) (mapstr.M, error) { var d DfRequest err := json.Unmarshal(content, &d) if err != nil { - return nil, errors.Wrap(err, "could not get DFRequest data") + return nil, fmt.Errorf("could not get DFRequest data: %w", err) } return mapstr.M{ diff --git a/metricbeat/module/ceph/cluster_health/cluster_health.go b/metricbeat/module/ceph/cluster_health/cluster_health.go index 887cb9a5de2..bc3971721e2 100644 --- a/metricbeat/module/ceph/cluster_health/cluster_health.go +++ b/metricbeat/module/ceph/cluster_health/cluster_health.go @@ -18,7 +18,7 @@ package cluster_health import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -70,12 +70,12 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { content, err := m.HTTP.FetchContent() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } events, err := eventMapping(content) if err != nil { - return errors.Wrap(err, "error in mapping") + return fmt.Errorf("error in mapping: %w", err) } reporter.Event(mb.Event{MetricSetFields: events}) diff --git a/metricbeat/module/ceph/cluster_health/data.go b/metricbeat/module/ceph/cluster_health/data.go index 6144b146825..3642a315297 100644 --- a/metricbeat/module/ceph/cluster_health/data.go +++ b/metricbeat/module/ceph/cluster_health/data.go @@ -19,8 +19,7 @@ package cluster_health import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -48,7 +47,7 @@ func eventMapping(content []byte) (mapstr.M, error) { var d HealthRequest err := json.Unmarshal(content, &d) if err != nil { - return nil, errors.Wrap(err, "error getting HealthRequest data") + return nil, fmt.Errorf("error getting HealthRequest data: %w", err) } return mapstr.M{ diff --git a/metricbeat/module/ceph/cluster_status/cluster_status.go b/metricbeat/module/ceph/cluster_status/cluster_status.go index 69c11a084f3..786bde0c9a5 100644 --- a/metricbeat/module/ceph/cluster_status/cluster_status.go +++ b/metricbeat/module/ceph/cluster_status/cluster_status.go @@ -18,7 +18,7 @@ package cluster_status import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -70,12 +70,12 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { content, err := m.HTTP.FetchContent() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } events, err := eventsMapping(content) if err != nil { - return errors.Wrap(err, "error in mapping") + return fmt.Errorf("error in mapping: %w", err) } for _, event := range events { diff --git a/metricbeat/module/ceph/cluster_status/data.go b/metricbeat/module/ceph/cluster_status/data.go index 1b5f3757ea2..c6b4e0850d6 100644 --- a/metricbeat/module/ceph/cluster_status/data.go +++ b/metricbeat/module/ceph/cluster_status/data.go @@ -19,8 +19,7 @@ package cluster_status import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -88,7 +87,7 @@ func eventsMapping(content []byte) ([]mapstr.M, error) { var d HealthRequest err := json.Unmarshal(content, &d) if err != nil { - return nil, errors.Wrap(err, "error getting HealthRequest data") + return nil, fmt.Errorf("error getting HealthRequest data: %w", err) } //osd map info diff --git a/metricbeat/module/ceph/mgr/event_mapping.go b/metricbeat/module/ceph/mgr/event_mapping.go index ee173cea3d5..3a71b47b9ec 100644 --- a/metricbeat/module/ceph/mgr/event_mapping.go +++ b/metricbeat/module/ceph/mgr/event_mapping.go @@ -21,7 +21,7 @@ import ( "encoding/json" "fmt" - "github.com/pkg/errors" + "errors" ) // Request stores either or finished command result. @@ -43,7 +43,7 @@ func UnmarshalResponse(content []byte, response interface{}) error { var request Request err := json.Unmarshal(content, &request) if err != nil { - return errors.Wrap(err, "could not get request data") + return fmt.Errorf("could not get request data: %w", err) } if request.HasFailed { @@ -59,7 +59,7 @@ func UnmarshalResponse(content []byte, response interface{}) error { err = json.Unmarshal([]byte(request.Finished[0].Outb), response) if err != nil { - return errors.Wrap(err, "could not get response data") + return fmt.Errorf("could not get response data: %w", err) } return nil } diff --git a/metricbeat/module/ceph/mgr_cluster_disk/data.go b/metricbeat/module/ceph/mgr_cluster_disk/data.go index e2e808cbc08..4f41ba05d79 100644 --- a/metricbeat/module/ceph/mgr_cluster_disk/data.go +++ b/metricbeat/module/ceph/mgr_cluster_disk/data.go @@ -18,7 +18,7 @@ package mgr_cluster_disk import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/module/ceph/mgr" "github.com/elastic/elastic-agent-libs/mapstr" @@ -36,7 +36,7 @@ func eventMapping(content []byte) (mapstr.M, error) { var response DfResponse err := mgr.UnmarshalResponse(content, &response) if err != nil { - return nil, errors.Wrap(err, "could not get response data") + return nil, fmt.Errorf("could not get response data: %w", err) } return mapstr.M{ diff --git a/metricbeat/module/ceph/mgr_cluster_health/data.go b/metricbeat/module/ceph/mgr_cluster_health/data.go index ad4116a6296..0a0b9505902 100644 --- a/metricbeat/module/ceph/mgr_cluster_health/data.go +++ b/metricbeat/module/ceph/mgr_cluster_health/data.go @@ -18,7 +18,7 @@ package mgr_cluster_health import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/module/ceph/mgr" "github.com/elastic/elastic-agent-libs/mapstr" @@ -42,13 +42,13 @@ func eventMapping(statusContent, timeSyncStatusContent []byte) (mapstr.M, error) var statusResponse StatusResponse err := mgr.UnmarshalResponse(statusContent, &statusResponse) if err != nil { - return nil, errors.Wrap(err, "could not unmarshal response") + return nil, fmt.Errorf("could not unmarshal response: %w", err) } var timeSyncStatusResponse TimeSyncStatusResponse err = mgr.UnmarshalResponse(timeSyncStatusContent, &timeSyncStatusResponse) if err != nil { - return nil, errors.Wrap(err, "could not unmarshal response") + return nil, fmt.Errorf("could not unmarshal response: %w", err) } return mapstr.M{ diff --git a/metricbeat/module/ceph/mgr_osd_perf/data.go b/metricbeat/module/ceph/mgr_osd_perf/data.go index 20046a38df3..59ba21393e8 100644 --- a/metricbeat/module/ceph/mgr_osd_perf/data.go +++ b/metricbeat/module/ceph/mgr_osd_perf/data.go @@ -18,7 +18,7 @@ package mgr_osd_perf import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/module/ceph/mgr" "github.com/elastic/elastic-agent-libs/mapstr" @@ -42,7 +42,7 @@ func eventsMapping(content []byte) ([]mapstr.M, error) { var response OsdPerfResponse err := mgr.UnmarshalResponse(content, &response) if err != nil { - return nil, errors.Wrap(err, "could not get response data") + return nil, fmt.Errorf("could not get response data: %w", err) } var events []mapstr.M diff --git a/metricbeat/module/ceph/mgr_osd_pool_stats/data.go b/metricbeat/module/ceph/mgr_osd_pool_stats/data.go index 28c62d14789..6c7dc960dd2 100644 --- a/metricbeat/module/ceph/mgr_osd_pool_stats/data.go +++ b/metricbeat/module/ceph/mgr_osd_pool_stats/data.go @@ -18,7 +18,7 @@ package mgr_osd_pool_stats import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/module/ceph/mgr" "github.com/elastic/elastic-agent-libs/mapstr" @@ -39,7 +39,7 @@ func eventsMapping(content []byte) ([]mapstr.M, error) { var response []OsdPoolStat err := mgr.UnmarshalResponse(content, &response) if err != nil { - return nil, errors.Wrap(err, "could not get response data") + return nil, fmt.Errorf("could not get response data: %w", err) } var events []mapstr.M diff --git a/metricbeat/module/ceph/mgr_osd_tree/data.go b/metricbeat/module/ceph/mgr_osd_tree/data.go index fc1988e4092..85c467b2b26 100644 --- a/metricbeat/module/ceph/mgr_osd_tree/data.go +++ b/metricbeat/module/ceph/mgr_osd_tree/data.go @@ -18,11 +18,10 @@ package mgr_osd_tree import ( + "fmt" "strconv" "strings" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/module/ceph/mgr" "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -48,7 +47,7 @@ func eventsMapping(content []byte) ([]mapstr.M, error) { var response OsdTreeResponse err := mgr.UnmarshalResponse(content, &response) if err != nil { - return nil, errors.Wrap(err, "could not get response data") + return nil, fmt.Errorf("could not get response data: %w", err) } nodeList := response.Nodes diff --git a/metricbeat/module/ceph/mgr_pool_disk/data.go b/metricbeat/module/ceph/mgr_pool_disk/data.go index 79db8e01ae6..7daab9b8c0e 100644 --- a/metricbeat/module/ceph/mgr_pool_disk/data.go +++ b/metricbeat/module/ceph/mgr_pool_disk/data.go @@ -18,7 +18,7 @@ package mgr_pool_disk import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/module/ceph/mgr" "github.com/elastic/elastic-agent-libs/mapstr" @@ -41,7 +41,7 @@ func eventsMapping(content []byte) ([]mapstr.M, error) { var response DfResponse err := mgr.UnmarshalResponse(content, &response) if err != nil { - return nil, errors.Wrap(err, "could not get response data") + return nil, fmt.Errorf("could not get response data: %w", err) } var events []mapstr.M diff --git a/metricbeat/module/ceph/monitor_health/data.go b/metricbeat/module/ceph/monitor_health/data.go index 6986c6bd15d..2ee58a86409 100644 --- a/metricbeat/module/ceph/monitor_health/data.go +++ b/metricbeat/module/ceph/monitor_health/data.go @@ -19,10 +19,9 @@ package monitor_health import ( "encoding/json" + "fmt" "time" - "github.com/pkg/errors" - "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -90,7 +89,7 @@ func eventsMapping(content []byte) ([]mapstr.M, error) { var d HealthRequest err := json.Unmarshal(content, &d) if err != nil { - return nil, errors.Wrapf(err, "could not get HealthRequest data") + return nil, fmt.Errorf("could not get HealthRequest data: %w", err) } events := []mapstr.M{} diff --git a/metricbeat/module/ceph/osd_df/data.go b/metricbeat/module/ceph/osd_df/data.go index 3d82da0b552..80044b88dab 100644 --- a/metricbeat/module/ceph/osd_df/data.go +++ b/metricbeat/module/ceph/osd_df/data.go @@ -19,8 +19,7 @@ package osd_df import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -51,7 +50,7 @@ func eventsMapping(content []byte) ([]mapstr.M, error) { var d OsdDfRequest err := json.Unmarshal(content, &d) if err != nil { - return nil, errors.Wrap(err, "error getting data for OSD_DF") + return nil, fmt.Errorf("error getting data for OSD_DF: %w", err) } nodeList := d.Output.Nodes diff --git a/metricbeat/module/ceph/osd_df/osd_df.go b/metricbeat/module/ceph/osd_df/osd_df.go index 333f259b9d9..5675061999b 100644 --- a/metricbeat/module/ceph/osd_df/osd_df.go +++ b/metricbeat/module/ceph/osd_df/osd_df.go @@ -18,7 +18,7 @@ package osd_df import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -70,12 +70,12 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { content, err := m.HTTP.FetchContent() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } events, err := eventsMapping(content) if err != nil { - return errors.Wrap(err, "error in mapping") + return fmt.Errorf("error in mapping: %w", err) } for _, event := range events { diff --git a/metricbeat/module/ceph/osd_tree/osd_tree.go b/metricbeat/module/ceph/osd_tree/osd_tree.go index 6417ca59d9b..3d159df2556 100644 --- a/metricbeat/module/ceph/osd_tree/osd_tree.go +++ b/metricbeat/module/ceph/osd_tree/osd_tree.go @@ -18,7 +18,7 @@ package osd_tree import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -70,12 +70,12 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { content, err := m.HTTP.FetchContent() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } events, err := eventsMapping(content) if err != nil { - return errors.Wrap(err, "error in mapping") + return fmt.Errorf("error in mapping: %w", err) } for _, event := range events { diff --git a/metricbeat/module/ceph/pool_disk/pool_disk.go b/metricbeat/module/ceph/pool_disk/pool_disk.go index 4fda029bff4..5e369199bbc 100644 --- a/metricbeat/module/ceph/pool_disk/pool_disk.go +++ b/metricbeat/module/ceph/pool_disk/pool_disk.go @@ -18,7 +18,7 @@ package pool_disk import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -70,7 +70,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { content, err := m.HTTP.FetchContent() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } events := eventsMapping(content) diff --git a/metricbeat/module/consul/agent/agent.go b/metricbeat/module/consul/agent/agent.go index 2eedcfb5506..600088c0ec7 100644 --- a/metricbeat/module/consul/agent/agent.go +++ b/metricbeat/module/consul/agent/agent.go @@ -18,7 +18,7 @@ package agent import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/metricbeat/helper" @@ -74,12 +74,12 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(report mb.ReporterV2) error { content, err := m.http.FetchContent() if err != nil { - return errors.Wrap(err, "error in http fetch") + return fmt.Errorf("error in http fetch: %w", err) } mappings, err := eventMapping(content) if err != nil { - return errors.Wrap(err, "error in event mapping") + return fmt.Errorf("error in event mapping: %w", err) } for _, m := range mappings { diff --git a/metricbeat/module/couchbase/bucket/bucket.go b/metricbeat/module/couchbase/bucket/bucket.go index 643b1ecf41c..5e8afd15c51 100644 --- a/metricbeat/module/couchbase/bucket/bucket.go +++ b/metricbeat/module/couchbase/bucket/bucket.go @@ -18,7 +18,7 @@ package bucket import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -70,7 +70,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { content, err := m.http.FetchContent() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } events := eventsMapping(content) diff --git a/metricbeat/module/couchbase/cluster/cluster.go b/metricbeat/module/couchbase/cluster/cluster.go index 2d18856c090..e7bb27e8059 100644 --- a/metricbeat/module/couchbase/cluster/cluster.go +++ b/metricbeat/module/couchbase/cluster/cluster.go @@ -18,7 +18,7 @@ package cluster import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -70,7 +70,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { content, err := m.http.FetchContent() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } reporter.Event(mb.Event{ diff --git a/metricbeat/module/couchbase/node/node.go b/metricbeat/module/couchbase/node/node.go index cbb1ba7ce06..55edcbc64f6 100644 --- a/metricbeat/module/couchbase/node/node.go +++ b/metricbeat/module/couchbase/node/node.go @@ -18,7 +18,7 @@ package node import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -70,7 +70,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { content, err := m.http.FetchContent() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } events := eventsMapping(content) diff --git a/metricbeat/module/couchdb/server/server.go b/metricbeat/module/couchdb/server/server.go index 640bea3a24b..8193b0171c7 100644 --- a/metricbeat/module/couchdb/server/server.go +++ b/metricbeat/module/couchdb/server/server.go @@ -19,6 +19,7 @@ package server import ( "encoding/json" + "fmt" "net/http" "time" @@ -26,8 +27,6 @@ import ( "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" "github.com/elastic/elastic-agent-libs/version" - - "github.com/pkg/errors" ) const ( @@ -91,15 +90,15 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { content, err := m.http.FetchContent() if err != nil { - return errors.Wrap(err, "error in http fetch") + return fmt.Errorf("error in http fetch: %w", err) } if err = m.retrieveFetcher(); err != nil { - return errors.Wrapf(err, "error trying to get CouchDB version. Retrying on next fetch...") + return fmt.Errorf("error trying to get CouchDB version. Retrying on next fetch...: %w", err) } event, err := m.fetcher.MapEvent(m.info, content) if err != nil { - return errors.Wrap(err, "error trying to get couchdb data") + return fmt.Errorf("error trying to get couchdb data: %w", err) } reporter.Event(event) @@ -113,12 +112,12 @@ func (m *MetricSet) retrieveFetcher() (err error) { m.info, err = m.getInfoFromCouchdbHost(m.Host()) if err != nil { - return errors.Wrap(err, "cannot start CouchDB metricbeat module") + return fmt.Errorf("cannot start CouchDB metricbeat module: %w", err) } version, err := version.New(m.info.Version) if err != nil { - return errors.Wrap(err, "could not capture couchdb version") + return fmt.Errorf("could not capture couchdb version: %w", err) } m.Logger().Debugf("found couchdb version %d", version.Major) @@ -153,18 +152,18 @@ func (m *MetricSet) getInfoFromCouchdbHost(h string) (*CommonInfo, error) { hostdata, err := hpb.Build()(m.Module(), h) if err != nil { - return nil, errors.Wrap(err, "error using host parser") + return nil, fmt.Errorf("error using host parser: %w", err) } res, err := c.Get(hostdata.URI) if err != nil { - return nil, errors.Wrap(err, "error trying to do GET request to couchdb") + return nil, fmt.Errorf("error trying to do GET request to couchdb: %w", err) } defer res.Body.Close() var info CommonInfo if err = json.NewDecoder(res.Body).Decode(&info); err != nil { - return nil, errors.Wrap(err, "error trying to parse couchdb info") + return nil, fmt.Errorf("error trying to parse couchdb info: %w", err) } return &info, nil diff --git a/metricbeat/module/couchdb/server/v1.go b/metricbeat/module/couchdb/server/v1.go index 2b3f43e3ce8..e9652ecca3a 100644 --- a/metricbeat/module/couchdb/server/v1.go +++ b/metricbeat/module/couchdb/server/v1.go @@ -19,11 +19,10 @@ package server import ( "encoding/json" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/elastic-agent-libs/mapstr" - - "github.com/pkg/errors" ) type V1 struct{} @@ -32,7 +31,7 @@ func (v *V1) MapEvent(info *CommonInfo, in []byte) (mb.Event, error) { var data ServerV1 err := json.Unmarshal(in, &data) if err != nil { - return mb.Event{}, errors.Wrap(err, "error parsing v1 server JSON") + return mb.Event{}, fmt.Errorf("error parsing v1 server JSON: %w", err) } event := mapstr.M{ diff --git a/metricbeat/module/couchdb/server/v2.go b/metricbeat/module/couchdb/server/v2.go index 96e65edda9f..a5be3390db2 100644 --- a/metricbeat/module/couchdb/server/v2.go +++ b/metricbeat/module/couchdb/server/v2.go @@ -19,8 +19,7 @@ package server import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/elastic-agent-libs/mapstr" @@ -32,7 +31,7 @@ func (v *V2) MapEvent(info *CommonInfo, in []byte) (mb.Event, error) { var data ServerV2 err := json.Unmarshal(in, &data) if err != nil { - return mb.Event{}, errors.Wrap(err, "error parsing v2 server JSON") + return mb.Event{}, fmt.Errorf("error parsing v2 server JSON: %w", err) } event := mapstr.M{ diff --git a/metricbeat/module/docker/cpu/cpu.go b/metricbeat/module/docker/cpu/cpu.go index 0ee2d47d545..a29ee8a00cc 100644 --- a/metricbeat/module/docker/cpu/cpu.go +++ b/metricbeat/module/docker/cpu/cpu.go @@ -20,8 +20,9 @@ package cpu import ( + "fmt" + "github.com/docker/docker/client" - "github.com/pkg/errors" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/docker" @@ -74,7 +75,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(r mb.ReporterV2) error { stats, err := docker.FetchStats(m.dockerClient, m.Module().Config().Timeout) if err != nil { - return errors.Wrap(err, "failed to get docker stats") + return fmt.Errorf("failed to get docker stats: %w", err) } formattedStats := m.cpuService.getCPUStatsList(stats, m.dedot) diff --git a/metricbeat/module/docker/healthcheck/data.go b/metricbeat/module/docker/healthcheck/data.go index 3f42c270fbe..e1ada5b36ee 100644 --- a/metricbeat/module/docker/healthcheck/data.go +++ b/metricbeat/module/docker/healthcheck/data.go @@ -22,7 +22,6 @@ import ( "strings" "github.com/docker/docker/api/types" - "github.com/pkg/errors" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/metricbeat/mb" @@ -43,7 +42,6 @@ func eventMapping(r mb.ReporterV2, cont *types.Container, m *MetricSet) { container, err := m.dockerClient.ContainerInspect(context.TODO(), cont.ID) if err != nil { - errors.Wrapf(err, "Error inspecting container %v", cont.ID) return } diff --git a/metricbeat/module/docker/healthcheck/healthcheck.go b/metricbeat/module/docker/healthcheck/healthcheck.go index d37781bf48a..b6c63c3f2e4 100644 --- a/metricbeat/module/docker/healthcheck/healthcheck.go +++ b/metricbeat/module/docker/healthcheck/healthcheck.go @@ -21,10 +21,10 @@ package healthcheck import ( "context" + "fmt" "github.com/docker/docker/api/types" "github.com/docker/docker/client" - "github.com/pkg/errors" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/docker" @@ -68,7 +68,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { // Fetch a list of all containers. containers, err := m.dockerClient.ContainerList(context.TODO(), types.ContainerListOptions{}) if err != nil { - return errors.Wrap(err, "failed to get docker containers list") + return fmt.Errorf("failed to get docker containers list: %w", err) } eventsMapping(r, containers, m) diff --git a/metricbeat/module/docker/memory/memory.go b/metricbeat/module/docker/memory/memory.go index 56bfc7d7c5d..a4f56138972 100644 --- a/metricbeat/module/docker/memory/memory.go +++ b/metricbeat/module/docker/memory/memory.go @@ -23,7 +23,6 @@ import ( "fmt" "github.com/docker/docker/client" - "github.com/pkg/errors" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/docker" @@ -68,7 +67,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(r mb.ReporterV2) error { stats, err := docker.FetchStats(m.dockerClient, m.Module().Config().Timeout) if err != nil { - return errors.Wrap(err, "failed to get docker stats") + return fmt.Errorf("failed to get docker stats: %w", err) } memoryStats := m.memoryService.getMemoryStatsList(stats, m.dedot) diff --git a/metricbeat/module/docker/network/network.go b/metricbeat/module/docker/network/network.go index 239eef015f9..8a70fd12446 100644 --- a/metricbeat/module/docker/network/network.go +++ b/metricbeat/module/docker/network/network.go @@ -20,8 +20,9 @@ package network import ( + "fmt" + "github.com/docker/docker/client" - "github.com/pkg/errors" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/docker" @@ -67,7 +68,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(r mb.ReporterV2) error { stats, err := docker.FetchStats(m.dockerClient, m.Module().Config().Timeout) if err != nil { - return errors.Wrap(err, "failed to get docker stats") + return fmt.Errorf("failed to get docker stats: %w", err) } formattedStats := m.netService.getNetworkStatsPerContainer(stats, m.dedot) diff --git a/metricbeat/module/docker/network_summary/helper.go b/metricbeat/module/docker/network_summary/helper.go index 63cb5793d44..694178b6147 100644 --- a/metricbeat/module/docker/network_summary/helper.go +++ b/metricbeat/module/docker/network_summary/helper.go @@ -29,7 +29,6 @@ import ( "time" "github.com/docker/docker/client" - "github.com/pkg/errors" "github.com/elastic/go-sysinfo" @@ -55,19 +54,23 @@ func fetchContainerNetStats(client *client.Client, timeout time.Duration, contai inspect, err := client.ContainerInspect(ctx, container) if err != nil { - return nil, errors.Wrapf(err, "error fetching stats for container %s", container) + return nil, fmt.Errorf("error fetching stats for container %s: %w", container, err) } rootPID := inspect.ContainerJSONBase.State.Pid proc, err := sysinfo.Process(rootPID) + if err != nil { + return nil, fmt.Errorf("cannot fetch process information for PID %d: %w", rootPID, err) + } + procNet, ok := proc.(sysinfotypes.NetworkCounters) if !ok { - return nil, errors.Wrapf(err, "cannot fetch network counters for PID %d", rootPID) + return nil, fmt.Errorf("cannot fetch network counters for PID %d", rootPID) } counters, err := procNet.NetworkCounters() if err != nil { - return &sysinfotypes.NetworkCountersInfo{}, errors.Wrapf(err, "error fetching network counters for PID %d", rootPID) + return &sysinfotypes.NetworkCountersInfo{}, fmt.Errorf("error fetching network counters for PID %d: %w", rootPID, err) } return counters, nil @@ -78,7 +81,7 @@ func fetchContainerNetStats(client *client.Client, timeout time.Duration, contai func fetchNamespace(pid int) (int, error) { nsLink, err := os.Readlink(filepath.Join("/proc/", fmt.Sprintf("%d", pid), "/ns/net")) if err != nil { - return 0, errors.Wrap(err, "error reading network namespace link") + return 0, fmt.Errorf("error reading network namespace link: %w", err) } nsidString := nsRegex.FindString(nsLink) // This is minor metadata, so don't consider it an error @@ -88,7 +91,7 @@ func fetchNamespace(pid int) (int, error) { nsID, err := strconv.Atoi(nsidString) if err != nil { - return 0, errors.Wrapf(err, "error converting %s to int", nsidString) + return 0, fmt.Errorf("error converting %s to int: %w", nsidString, err) } return nsID, nil } diff --git a/metricbeat/module/docker/network_summary/network_summary.go b/metricbeat/module/docker/network_summary/network_summary.go index 090869d1d8d..9753b449add 100644 --- a/metricbeat/module/docker/network_summary/network_summary.go +++ b/metricbeat/module/docker/network_summary/network_summary.go @@ -21,10 +21,10 @@ package network_summary import ( "context" + "fmt" "runtime" "github.com/docker/docker/client" - "github.com/pkg/errors" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/metricbeat/mb" @@ -86,7 +86,7 @@ func (m *MetricSet) Fetch(ctx context.Context, report mb.ReporterV2) error { stats, err := docker.FetchStats(m.dockerClient, m.Module().Config().Timeout) if err != nil { - return errors.Wrap(err, "failed to get docker stats") + return fmt.Errorf("failed to get docker stats: %w", err) } for _, myStats := range stats { @@ -96,19 +96,19 @@ func (m *MetricSet) Fetch(ctx context.Context, report mb.ReporterV2) error { inspect, err := m.dockerClient.ContainerInspect(ctx, myStats.Container.ID) if err != nil { - return errors.Wrapf(err, "error fetching stats for container %s", myStats.Container.ID) + return fmt.Errorf("error fetching stats for container %s: %w", myStats.Container.ID, err) } rootPID := inspect.ContainerJSONBase.State.Pid netNS, err := fetchNamespace(rootPID) if err != nil { - return errors.Wrapf(err, "error fetching namespace for PID %d", rootPID) + return fmt.Errorf("error fetching namespace for PID %d: %w", rootPID, err) } networkStats, err := fetchContainerNetStats(m.dockerClient, m.Module().Config().Timeout, myStats.Container.ID) if err != nil { - return errors.Wrap(err, "error fetching per-PID stats") + return fmt.Errorf("error fetching per-PID stats") } summary := network.MapProcNetCounters(networkStats) diff --git a/metricbeat/module/elasticsearch/ccr/data.go b/metricbeat/module/elasticsearch/ccr/data.go index 3f917b3f6bd..5301f6e217c 100644 --- a/metricbeat/module/elasticsearch/ccr/data.go +++ b/metricbeat/module/elasticsearch/ccr/data.go @@ -19,12 +19,12 @@ package ccr import ( "encoding/json" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper/elastic" "github.com/elastic/elastic-agent-libs/mapstr" "github.com/joeshaw/multierror" - "github.com/pkg/errors" s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" @@ -139,7 +139,7 @@ func eventsMapping(r mb.ReporterV2, info elasticsearch.Info, content []byte, isX var data response err := json.Unmarshal(content, &data) if err != nil { - return errors.Wrap(err, "failure parsing Elasticsearch CCR Stats API response") + return fmt.Errorf("failure parsing Elasticsearch CCR Stats API response: %w", err) } var errs multierror.Errors diff --git a/metricbeat/module/elasticsearch/cluster_stats/data.go b/metricbeat/module/elasticsearch/cluster_stats/data.go index 5051ec20af0..2853dd3466f 100644 --- a/metricbeat/module/elasticsearch/cluster_stats/data.go +++ b/metricbeat/module/elasticsearch/cluster_stats/data.go @@ -24,8 +24,6 @@ import ( "sort" "strings" - "github.com/pkg/errors" - s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" "github.com/elastic/beats/v7/metricbeat/helper" @@ -205,12 +203,12 @@ func getClusterMetadataSettings(httpClient *helper.HTTP) (mapstr.M, error) { filterPaths := []string{"*.cluster.metadata.display_name"} clusterSettings, err := elasticsearch.GetClusterSettingsWithDefaults(httpClient, httpClient.GetURI(), filterPaths) if err != nil { - return nil, errors.Wrap(err, "failure to get cluster settings") + return nil, fmt.Errorf("failure to get cluster settings: %w", err) } clusterSettings, err = elasticsearch.MergeClusterSettings(clusterSettings) if err != nil { - return nil, errors.Wrap(err, "failure to merge cluster settings") + return nil, fmt.Errorf("failure to merge cluster settings: %w", err) } return clusterSettings, nil @@ -220,7 +218,7 @@ func eventMapping(r mb.ReporterV2, httpClient *helper.HTTP, info elasticsearch.I var data map[string]interface{} err := json.Unmarshal(content, &data) if err != nil { - return errors.Wrap(err, "failure parsing Elasticsearch Cluster Stats API response") + return fmt.Errorf("failure parsing Elasticsearch Cluster Stats API response: %w", err) } clusterStats := mapstr.M(data) @@ -228,48 +226,48 @@ func eventMapping(r mb.ReporterV2, httpClient *helper.HTTP, info elasticsearch.I license, err := elasticsearch.GetLicense(httpClient, httpClient.GetURI()) if err != nil { - return errors.Wrap(err, "failed to get license from Elasticsearch") + return fmt.Errorf("failed to get license from Elasticsearch: %w", err) } clusterStateMetrics := []string{"version", "master_node", "nodes", "routing_table"} clusterState, err := elasticsearch.GetClusterState(httpClient, httpClient.GetURI(), clusterStateMetrics) if err != nil { - return errors.Wrap(err, "failed to get cluster state from Elasticsearch") + return fmt.Errorf("failed to get cluster state from Elasticsearch: %w", err) } clusterState.Delete("cluster_name") clusterStateReduced := mapstr.M{} if err = elasticsearch.PassThruField("status", clusterStats, clusterStateReduced); err != nil { - return errors.Wrap(err, "failed to pass through status field") + return fmt.Errorf("failed to pass through status field: %w", err) } clusterStateReduced.Delete("status") if err = elasticsearch.PassThruField("master_node", clusterState, clusterStateReduced); err != nil { - return errors.Wrap(err, "failed to pass through master_node field") + return fmt.Errorf("failed to pass through master_node field: %w", err) } if err = elasticsearch.PassThruField("state_uuid", clusterState, clusterStateReduced); err != nil { - return errors.Wrap(err, "failed to pass through state_uuid field") + return fmt.Errorf("failed to pass through state_uuid field: %w", err) } if err = elasticsearch.PassThruField("nodes", clusterState, clusterStateReduced); err != nil { - return errors.Wrap(err, "failed to pass through nodes field") + return fmt.Errorf("failed to pass through nodes field: %w", err) } nodesHash, err := computeNodesHash(clusterState) if err != nil { - return errors.Wrap(err, "failed to compute nodes hash") + return fmt.Errorf("failed to compute nodes hash: %w", err) } clusterStateReduced.Put("nodes_hash", nodesHash) usage, err := elasticsearch.GetStackUsage(httpClient, httpClient.GetURI()) if err != nil { - return errors.Wrap(err, "failed to get stack usage from Elasticsearch") + return fmt.Errorf("failed to get stack usage from Elasticsearch: %w", err) } clusterNeedsTLS, err := clusterNeedsTLSEnabled(license, usage) if err != nil { - return errors.Wrap(err, "failed to determine if cluster needs TLS enabled") + return fmt.Errorf("failed to determine if cluster needs TLS enabled: %w", err) } l := license.ToMapStr() @@ -277,7 +275,7 @@ func eventMapping(r mb.ReporterV2, httpClient *helper.HTTP, info elasticsearch.I isAPMFound, err := apmIndicesExist(clusterState) if err != nil { - return errors.Wrap(err, "failed to determine if APM indices exist") + return fmt.Errorf("failed to determine if APM indices exist: %w", err) } delete(clusterState, "routing_table") // We don't want to index the routing table in monitoring indices @@ -311,7 +309,7 @@ func eventMapping(r mb.ReporterV2, httpClient *helper.HTTP, info elasticsearch.I metricSetFields.Put("state", clusterStateReduced) if err = elasticsearch.PassThruField("version", clusterState, event.ModuleFields); err != nil { - return errors.Wrap(err, "failed to pass through version field") + return fmt.Errorf("failed to pass through version field: %w", err) } event.MetricSetFields = metricSetFields diff --git a/metricbeat/module/elasticsearch/elasticsearch.go b/metricbeat/module/elasticsearch/elasticsearch.go index 9cd316a41d3..00737472f2c 100644 --- a/metricbeat/module/elasticsearch/elasticsearch.go +++ b/metricbeat/module/elasticsearch/elasticsearch.go @@ -19,6 +19,7 @@ package elasticsearch import ( "encoding/json" + "errors" "fmt" "net/url" "strconv" @@ -26,8 +27,6 @@ import ( "sync" "time" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/helper/elastic" "github.com/elastic/beats/v7/metricbeat/mb" @@ -390,7 +389,7 @@ func GetIndicesSettings(http *helper.HTTP, resetURI string) (map[string]IndexSet content, err := fetchPath(http, resetURI, "*/_settings", "filter_path=*.settings.index.hidden&expand_wildcards=all") if err != nil { - return nil, errors.Wrap(err, "could not fetch indices settings") + return nil, fmt.Errorf("could not fetch indices settings: %w", err) } var resp map[string]struct { @@ -403,7 +402,7 @@ func GetIndicesSettings(http *helper.HTTP, resetURI string) (map[string]IndexSet err = json.Unmarshal(content, &resp) if err != nil { - return nil, errors.Wrap(err, "could not parse indices settings response") + return nil, fmt.Errorf("could not parse indices settings response: %w", err) } ret := make(map[string]IndexSettings, len(resp)) @@ -604,7 +603,7 @@ func (l *License) ToMapStr() mapstr.M { func getSettingGroup(allSettings mapstr.M, groupKey string) (mapstr.M, error) { hasSettingGroup, err := allSettings.HasKey(groupKey) if err != nil { - return nil, errors.Wrap(err, "failure to determine if "+groupKey+" settings exist") + return nil, fmt.Errorf("failure to determine if "+groupKey+" settings exist: %w", err) } if !hasSettingGroup { @@ -613,12 +612,12 @@ func getSettingGroup(allSettings mapstr.M, groupKey string) (mapstr.M, error) { settings, err := allSettings.GetValue(groupKey) if err != nil { - return nil, errors.Wrap(err, "failure to extract "+groupKey+" settings") + return nil, fmt.Errorf("failure to extract "+groupKey+" settings: %w", err) } v, ok := settings.(map[string]interface{}) if !ok { - return nil, errors.Wrap(err, groupKey+" settings are not a map") + return nil, fmt.Errorf(groupKey + " settings are not a map") } return mapstr.M(v), nil diff --git a/metricbeat/module/elasticsearch/elasticsearch_integration_test.go b/metricbeat/module/elasticsearch/elasticsearch_integration_test.go index a3309b41ac6..3d102a2fd62 100644 --- a/metricbeat/module/elasticsearch/elasticsearch_integration_test.go +++ b/metricbeat/module/elasticsearch/elasticsearch_integration_test.go @@ -22,6 +22,7 @@ package elasticsearch_test import ( "bytes" "encoding/json" + "errors" "fmt" "io/ioutil" "math/rand" @@ -30,8 +31,6 @@ import ( "testing" "time" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/libbeat/tests/compose" @@ -205,14 +204,14 @@ func createIndex(host string, isHidden bool) (string, error) { req, err := http.NewRequest("PUT", fmt.Sprintf("http://%v/%v", host, indexName), strings.NewReader(reqBody)) if err != nil { - return "", errors.Wrap(err, "could not build create index request") + return "", fmt.Errorf("could not build create index request: %w", err) } req.Header.Add("Content-Type", "application/json") client := &http.Client{} resp, err := client.Do(req) if err != nil { - return "", errors.Wrap(err, "could not send create index request") + return "", fmt.Errorf("could not send create index request: %w", err) } defer resp.Body.Close() respBody, err := ioutil.ReadAll(resp.Body) @@ -321,7 +320,7 @@ func createMLJob(host string, version *version.V) error { body, resp, err := httpPutJSON(host, jobURL, mlJob) if err != nil { - return errors.Wrap(err, "error doing PUT request when creating ML job") + return fmt.Errorf("error doing PUT request when creating ML job: %w", err) } if resp.StatusCode != 200 { @@ -334,17 +333,17 @@ func createMLJob(host string, version *version.V) error { func createCCRStats(host string) error { err := setupCCRRemote(host) if err != nil { - return errors.Wrap(err, "error setup CCR remote settings") + return errors.New("error setup CCR remote settings") } err = createCCRLeaderIndex(host) if err != nil { - return errors.Wrap(err, "error creating CCR leader index") + return errors.New("error creating CCR leader index") } err = createCCRFollowerIndex(host) if err != nil { - return errors.Wrap(err, "error creating CCR follower index") + return errors.New("error creating CCR follower index") } // Give ES sufficient time to do the replication and produce stats @@ -354,7 +353,7 @@ func createCCRStats(host string) error { exists, err := waitForSuccess(checkCCRStats, 500*time.Millisecond, 10) if err != nil { - return errors.Wrap(err, "error checking if CCR stats exist") + return fmt.Errorf("error checking if CCR stats exist: %w", err) } if !exists { @@ -439,27 +438,27 @@ func checkExists(url string) bool { func createEnrichStats(host string) error { err := createEnrichSourceIndex(host) if err != nil { - return errors.Wrap(err, "error creating enrich source index") + return fmt.Errorf("error creating enrich source index: %w", err) } err = createEnrichPolicy(host) if err != nil { - return errors.Wrap(err, "error creating enrich policy") + return fmt.Errorf("error creating enrich policy: %w", err) } err = executeEnrichPolicy(host) if err != nil { - return errors.Wrap(err, "error executing enrich policy") + return fmt.Errorf("error executing enrich policy: %w", err) } err = createEnrichIngestPipeline(host) if err != nil { - return errors.Wrap(err, "error creating ingest pipeline with enrich processor") + return fmt.Errorf("error creating ingest pipeline with enrich processor: %w", err) } err = ingestAndEnrichDoc(host) if err != nil { - return errors.Wrap(err, "error ingesting doc for enrichment") + return fmt.Errorf("error ingesting doc for enrichment: %w", err) } return nil diff --git a/metricbeat/module/elasticsearch/enrich/enrich.go b/metricbeat/module/elasticsearch/enrich/enrich.go index ecb6e37b7b5..e13dcd1cad9 100644 --- a/metricbeat/module/elasticsearch/enrich/enrich.go +++ b/metricbeat/module/elasticsearch/enrich/enrich.go @@ -18,10 +18,9 @@ package enrich import ( + "fmt" "time" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/helper/elastic" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/elasticsearch" @@ -70,7 +69,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { enrichUnavailableMessage, err := m.checkEnrichAvailability(info.Version.Number) if err != nil { - return errors.Wrap(err, "error determining if Enrich is available") + return fmt.Errorf("error determining if Enrich is available: %w", err) } if enrichUnavailableMessage != "" { diff --git a/metricbeat/module/elasticsearch/index/data.go b/metricbeat/module/elasticsearch/index/data.go index 4b776444f4b..bcd4aeb3b6e 100644 --- a/metricbeat/module/elasticsearch/index/data.go +++ b/metricbeat/module/elasticsearch/index/data.go @@ -22,7 +22,6 @@ import ( "fmt" "github.com/joeshaw/multierror" - "github.com/pkg/errors" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/helper/elastic" @@ -182,17 +181,17 @@ func eventsMapping(r mb.ReporterV2, httpClient *helper.HTTP, info elasticsearch. clusterStateMetrics := []string{"routing_table"} clusterState, err := elasticsearch.GetClusterState(httpClient, httpClient.GetURI(), clusterStateMetrics) if err != nil { - return errors.Wrap(err, "failure retrieving cluster state from Elasticsearch") + return fmt.Errorf("failure retrieving cluster state from Elasticsearch: %w", err) } var indicesStats stats if err := parseAPIResponse(content, &indicesStats); err != nil { - return errors.Wrap(err, "failure parsing Indices Stats Elasticsearch API response") + return fmt.Errorf("failure parsing Indices Stats Elasticsearch API response: %w", err) } indicesSettings, err := elasticsearch.GetIndicesSettings(httpClient, httpClient.GetURI()) if err != nil { - return errors.Wrap(err, "failure retrieving indices settings from Elasticsearch") + return fmt.Errorf("failure retrieving indices settings from Elasticsearch: %w", err) } var errs multierror.Errors @@ -209,7 +208,7 @@ func eventsMapping(r mb.ReporterV2, httpClient *helper.HTTP, info elasticsearch. err = addClusterStateFields(&idx, clusterState) if err != nil { - errs = append(errs, errors.Wrap(err, "failure adding cluster state fields")) + errs = append(errs, fmt.Errorf("failure adding cluster state fields: %w", err)) continue } @@ -220,12 +219,12 @@ func eventsMapping(r mb.ReporterV2, httpClient *helper.HTTP, info elasticsearch. // metricset level indexBytes, err := json.Marshal(idx) if err != nil { - errs = append(errs, errors.Wrap(err, "failure trying to convert metrics results to JSON")) + errs = append(errs, fmt.Errorf("failure trying to convert metrics results to JSON: %w", err)) continue } var indexOutput mapstr.M if err = json.Unmarshal(indexBytes, &indexOutput); err != nil { - errs = append(errs, errors.Wrap(err, "failure trying to convert JSON metrics back to mapstr")) + errs = append(errs, fmt.Errorf("failure trying to convert JSON metrics back to mapstr: %w", err)) continue } @@ -255,12 +254,12 @@ func parseAPIResponse(content []byte, indicesStats *stats) error { func addClusterStateFields(idx *Index, clusterState mapstr.M) error { indexRoutingTable, err := getClusterStateMetricForIndex(clusterState, idx.Index, "routing_table") if err != nil { - return errors.Wrap(err, "failed to get index routing table from cluster state") + return fmt.Errorf("failed to get index routing table from cluster state: %w", err) } shards, err := getShardsFromRoutingTable(indexRoutingTable) if err != nil { - return errors.Wrap(err, "failed to get shards from routing table") + return fmt.Errorf("failed to get shards from routing table: %w", err) } // "index_stats.version.created", <--- don't think this is being used in the UI, so can we skip it? @@ -268,13 +267,13 @@ func addClusterStateFields(idx *Index, clusterState mapstr.M) error { status, err := getIndexStatus(shards) if err != nil { - return errors.Wrap(err, "failed to get index status") + return fmt.Errorf("failed to get index status: %w", err) } idx.Status = status shardStats, err := getIndexShardStats(shards) if err != nil { - return errors.Wrap(err, "failed to get index shard stats") + return fmt.Errorf("failed to get index shard stats: %w", err) } idx.Shards = *shardStats return nil @@ -284,7 +283,7 @@ func getClusterStateMetricForIndex(clusterState mapstr.M, index, metricKey strin fieldKey := metricKey + ".indices." + index value, err := clusterState.GetValue(fieldKey) if err != nil { - return nil, errors.Wrap(err, "'"+fieldKey+"'") + return nil, fmt.Errorf("'"+fieldKey+"': %w", err) } metric, ok := value.(map[string]interface{}) diff --git a/metricbeat/module/elasticsearch/index/index.go b/metricbeat/module/elasticsearch/index/index.go index 0f128b56600..050ad311c85 100644 --- a/metricbeat/module/elasticsearch/index/index.go +++ b/metricbeat/module/elasticsearch/index/index.go @@ -18,11 +18,10 @@ package index import ( + "fmt" "net/url" "strings" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/elasticsearch" "github.com/elastic/elastic-agent-libs/version" @@ -73,7 +72,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { info, err := elasticsearch.GetInfo(m.HTTP, m.HostData().SanitizedURI) if err != nil { - return errors.Wrap(err, "failed to get info from Elasticsearch") + return fmt.Errorf("failed to get info from Elasticsearch: %w", err) } if err := m.updateServicePath(*info.Version.Number); err != nil { diff --git a/metricbeat/module/elasticsearch/index_summary/index_summary.go b/metricbeat/module/elasticsearch/index_summary/index_summary.go index 5a75916133e..c74b744f238 100644 --- a/metricbeat/module/elasticsearch/index_summary/index_summary.go +++ b/metricbeat/module/elasticsearch/index_summary/index_summary.go @@ -18,7 +18,7 @@ package index_summary import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" @@ -77,7 +77,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { info, err := elasticsearch.GetInfo(m.HTTP, m.HostData().SanitizedURI+statsPath) if err != nil { - return errors.Wrap(err, "failed to get info from Elasticsearch") + return fmt.Errorf("failed to get info from Elasticsearch: %w", err) } return eventMapping(r, info, content, m.XPackEnabled) diff --git a/metricbeat/module/elasticsearch/ingest_pipeline/data.go b/metricbeat/module/elasticsearch/ingest_pipeline/data.go index 8cdb90ecf1d..6cd660f040d 100644 --- a/metricbeat/module/elasticsearch/ingest_pipeline/data.go +++ b/metricbeat/module/elasticsearch/ingest_pipeline/data.go @@ -19,8 +19,7 @@ package ingest_pipeline import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/elasticsearch" @@ -60,7 +59,7 @@ type PipelineStat struct { func eventsMapping(r mb.ReporterV2, info elasticsearch.Info, content []byte, isXpack bool, sampleProcessors bool) error { var nodeIngestStats Stats if err := json.Unmarshal(content, &nodeIngestStats); err != nil { - return errors.Wrap(err, "failure parsing Node Ingest Stats API response") + return fmt.Errorf("failure parsing Node Ingest Stats API response: %w", err) } for nodeId, nodeStats := range nodeIngestStats.Nodes { diff --git a/metricbeat/module/elasticsearch/ingest_pipeline/ingest_pipeline.go b/metricbeat/module/elasticsearch/ingest_pipeline/ingest_pipeline.go index ce2ffba9f82..c7aac1ad214 100644 --- a/metricbeat/module/elasticsearch/ingest_pipeline/ingest_pipeline.go +++ b/metricbeat/module/elasticsearch/ingest_pipeline/ingest_pipeline.go @@ -18,11 +18,10 @@ package ingest_pipeline import ( + "fmt" "math" "net/url" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/elasticsearch" @@ -113,7 +112,7 @@ func (m *IngestMetricSet) Fetch(report mb.ReporterV2) error { info, err := elasticsearch.GetInfo(m.HTTP, m.HostData().SanitizedURI) if err != nil { - return errors.Wrap(err, "failed to get info from Elasticsearch") + return fmt.Errorf("failed to get info from Elasticsearch: %w", err) } m.fetchCounter++ // It's fine if this overflows, it's only used for modulo diff --git a/metricbeat/module/elasticsearch/metricset.go b/metricbeat/module/elasticsearch/metricset.go index 3dcb456b357..363a9634f05 100644 --- a/metricbeat/module/elasticsearch/metricset.go +++ b/metricbeat/module/elasticsearch/metricset.go @@ -19,10 +19,9 @@ package elasticsearch import ( "encoding/json" + "errors" "fmt" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/common/productorigin" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -134,7 +133,7 @@ func (m *MetricSet) ShouldSkipFetch() (bool, error) { if m.Scope == ScopeNode { isMaster, err := isMaster(m.HTTP, m.GetServiceURI()) if err != nil { - return false, errors.Wrap(err, "error determining if connected Elasticsearch node is master") + return false, fmt.Errorf("error determining if connected Elasticsearch node is master: %w", err) } // Not master, no event sent diff --git a/metricbeat/module/elasticsearch/ml_job/data.go b/metricbeat/module/elasticsearch/ml_job/data.go index f70321606b9..7954d106b25 100644 --- a/metricbeat/module/elasticsearch/ml_job/data.go +++ b/metricbeat/module/elasticsearch/ml_job/data.go @@ -19,9 +19,9 @@ package ml_job import ( "encoding/json" + "fmt" "github.com/joeshaw/multierror" - "github.com/pkg/errors" "github.com/elastic/beats/v7/metricbeat/helper/elastic" "github.com/elastic/elastic-agent-libs/mapstr" @@ -58,7 +58,7 @@ func eventsMapping(r mb.ReporterV2, info elasticsearch.Info, content []byte, isX jobsData := &jobsStruct{} err := json.Unmarshal(content, jobsData) if err != nil { - return errors.Wrap(err, "failure parsing Elasticsearch ML Job Stats API response") + return fmt.Errorf("failure parsing Elasticsearch ML Job Stats API response: %w", err) } var errs multierror.Errors diff --git a/metricbeat/module/elasticsearch/node/data.go b/metricbeat/module/elasticsearch/node/data.go index 742a2f056da..4576cc4ac88 100644 --- a/metricbeat/module/elasticsearch/node/data.go +++ b/metricbeat/module/elasticsearch/node/data.go @@ -19,12 +19,12 @@ package node import ( "encoding/json" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper/elastic" "github.com/elastic/elastic-agent-libs/mapstr" "github.com/joeshaw/multierror" - "github.com/pkg/errors" s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" @@ -71,7 +71,7 @@ func eventsMapping(r mb.ReporterV2, info elasticsearch.Info, content []byte, isX err := json.Unmarshal(content, &nodesStruct) if err != nil { - return errors.Wrap(err, "failure parsing Elasticsearch Node Stats API response") + return fmt.Errorf("failure parsing Elasticsearch Node Stats API response: %w", err) } var errs multierror.Errors @@ -87,7 +87,7 @@ func eventsMapping(r mb.ReporterV2, info elasticsearch.Info, content []byte, isX event.MetricSetFields, err = schema.Apply(node) if err != nil { - errs = append(errs, errors.Wrap(err, "failure applying node schema")) + errs = append(errs, fmt.Errorf("failure applying node schema: %w", err)) continue } diff --git a/metricbeat/module/elasticsearch/node/node.go b/metricbeat/module/elasticsearch/node/node.go index 58cf6ae4bf2..c9fa0b86d06 100644 --- a/metricbeat/module/elasticsearch/node/node.go +++ b/metricbeat/module/elasticsearch/node/node.go @@ -18,7 +18,7 @@ package node import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" @@ -71,7 +71,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { info, err := elasticsearch.GetInfo(m.HTTP, m.HostData().SanitizedURI+nodeStatsPath) if err != nil { - return errors.Wrap(err, "failed to get info from Elasticsearch") + return fmt.Errorf("failed to get info from Elasticsearch: %w", err) } return eventsMapping(r, info, content, m.XPackEnabled) diff --git a/metricbeat/module/elasticsearch/pending_tasks/data.go b/metricbeat/module/elasticsearch/pending_tasks/data.go index 1e09dd169f6..a75c2dea6b6 100644 --- a/metricbeat/module/elasticsearch/pending_tasks/data.go +++ b/metricbeat/module/elasticsearch/pending_tasks/data.go @@ -19,9 +19,9 @@ package pending_tasks import ( "encoding/json" + "fmt" "github.com/joeshaw/multierror" - "github.com/pkg/errors" s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" @@ -47,7 +47,7 @@ func eventsMapping(r mb.ReporterV2, info elasticsearch.Info, content []byte, isX err := json.Unmarshal(content, &tasksStruct) if err != nil { - return errors.Wrap(err, "failure parsing Elasticsearch Pending Tasks API response") + return fmt.Errorf("failure parsing Elasticsearch Pending Tasks API response: %w", err) } if tasksStruct.Tasks == nil { @@ -67,7 +67,7 @@ func eventsMapping(r mb.ReporterV2, info elasticsearch.Info, content []byte, isX event.MetricSetFields, err = schema.Apply(task) if err != nil { - errs = append(errs, errors.Wrap(err, "failure applying task schema")) + errs = append(errs, fmt.Errorf("failure applying task schema: %w", err)) continue } diff --git a/metricbeat/module/envoyproxy/server/server.go b/metricbeat/module/envoyproxy/server/server.go index b37cd6dbcfd..76659531bce 100644 --- a/metricbeat/module/envoyproxy/server/server.go +++ b/metricbeat/module/envoyproxy/server/server.go @@ -18,7 +18,7 @@ package server import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -71,7 +71,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { content, err := m.http.FetchContent() if err != nil { - return errors.Wrap(err, "error in http fetch") + return fmt.Errorf("error in http fetch: %w", err) } event, _ := eventMapping(content) diff --git a/metricbeat/module/etcd/self/self.go b/metricbeat/module/etcd/self/self.go index 066f4048079..b42e7287cde 100644 --- a/metricbeat/module/etcd/self/self.go +++ b/metricbeat/module/etcd/self/self.go @@ -18,7 +18,7 @@ package self import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -74,7 +74,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { content, err := m.http.FetchContent() if err != nil { - return errors.Wrap(err, "error in http fetch") + return fmt.Errorf("error in http fetch: %w", err) } reporter.Event(mb.Event{ diff --git a/metricbeat/module/etcd/store/store.go b/metricbeat/module/etcd/store/store.go index 00662ccbd00..6234806a6b1 100644 --- a/metricbeat/module/etcd/store/store.go +++ b/metricbeat/module/etcd/store/store.go @@ -18,7 +18,7 @@ package store import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -73,7 +73,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { content, err := m.http.FetchContent() if err != nil { - return errors.Wrap(err, "error in http fetch") + return fmt.Errorf("error in http fetch: %w", err) } reporter.Event(mb.Event{ diff --git a/metricbeat/module/golang/expvar/expvar.go b/metricbeat/module/golang/expvar/expvar.go index 0de02b78c3f..07243b0be14 100644 --- a/metricbeat/module/golang/expvar/expvar.go +++ b/metricbeat/module/golang/expvar/expvar.go @@ -18,7 +18,7 @@ package expvar import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -86,7 +86,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { json, err := m.http.FetchJSON() if err != nil { - return errors.Wrap(err, "error in http fetch") + return fmt.Errorf("error in http fetch: %w", err) } //flatten cmdline diff --git a/metricbeat/module/golang/heap/heap.go b/metricbeat/module/golang/heap/heap.go index e7974731e11..5cac1256f72 100644 --- a/metricbeat/module/golang/heap/heap.go +++ b/metricbeat/module/golang/heap/heap.go @@ -19,8 +19,7 @@ package heap import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -81,14 +80,14 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { data, err := m.http.FetchContent() if err != nil { - return errors.Wrap(err, "error in http fetch") + return fmt.Errorf("error in http fetch: %w", err) } var stats Stats err = json.Unmarshal(data, &stats) if err != nil { - return errors.Wrap(err, "error unmarshalling json") + return fmt.Errorf("error unmarshalling json: %w", err) } reporter.Event(mb.Event{ diff --git a/metricbeat/module/graphite/server/server.go b/metricbeat/module/graphite/server/server.go index 0fdc4f62e83..38857766f83 100644 --- a/metricbeat/module/graphite/server/server.go +++ b/metricbeat/module/graphite/server/server.go @@ -18,7 +18,7 @@ package server import ( - "github.com/pkg/errors" + "fmt" serverhelper "github.com/elastic/beats/v7/metricbeat/helper/server" "github.com/elastic/beats/v7/metricbeat/helper/server/tcp" @@ -80,7 +80,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Run(reporter mb.PushReporter) { // Start event watcher if err := m.server.Start(); err != nil { - err = errors.Wrap(err, "failed to start graphite server") + err = fmt.Errorf("failed to start graphite server: %w", err) logp.Err("%v", err) reporter.Error(err) return diff --git a/metricbeat/module/haproxy/haproxy.go b/metricbeat/module/haproxy/haproxy.go index c14c681bd80..dcc639dcc87 100644 --- a/metricbeat/module/haproxy/haproxy.go +++ b/metricbeat/module/haproxy/haproxy.go @@ -20,6 +20,8 @@ package haproxy import ( "bytes" "encoding/csv" + "errors" + "fmt" "io" "io/ioutil" "net" @@ -28,7 +30,6 @@ import ( "github.com/gocarina/gocsv" "github.com/mitchellh/mapstructure" - "github.com/pkg/errors" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -214,7 +215,7 @@ type Client struct { func NewHaproxyClient(address string, base mb.BaseMetricSet) (*Client, error) { u, err := url.Parse(address) if err != nil { - return nil, errors.Wrap(err, "invalid url") + return nil, fmt.Errorf("invalid url: %w", err) } switch u.Scheme { @@ -229,7 +230,7 @@ func NewHaproxyClient(address string, base mb.BaseMetricSet) (*Client, error) { } return &Client{&httpProto{HTTP: http}}, nil default: - return nil, errors.Errorf("invalid protocol scheme: %s", u.Scheme) + return nil, fmt.Errorf("invalid protocol scheme: %s", u.Scheme) } } @@ -246,7 +247,7 @@ func (c *Client) GetStat() ([]*Stat, error) { err = gocsv.UnmarshalCSV(csvReader, &statRes) if err != nil { - return nil, errors.Errorf("error parsing CSV: %s", err) + return nil, fmt.Errorf("error parsing CSV: %s", err) } return statRes, nil @@ -301,25 +302,25 @@ func (p *unixProto) run(cmd string) (*bytes.Buffer, error) { conn, err := net.Dial(p.Network, p.Address) if err != nil { - return response, errors.Wrapf(err, "error connecting to %s", p.Address) + return response, fmt.Errorf("error connecting to %s: %w", p.Address, err) } defer conn.Close() _, err = conn.Write([]byte(cmd + "\n")) if err != nil { - return response, errors.Wrap(err, "error writing to connection") + return response, fmt.Errorf("error writing to connection: %w", err) } recv, err := io.Copy(response, conn) if err != nil { - return response, errors.Wrap(err, "error reading response") + return response, fmt.Errorf("error reading response: %w", err) } if recv == 0 { return response, errors.New("got empty response from HAProxy") } if strings.HasPrefix(response.String(), "Unknown command") { - return response, errors.Errorf("unknown command: %s", cmd) + return response, fmt.Errorf("unknown command: %s", cmd) } return response, nil diff --git a/metricbeat/module/haproxy/info/data.go b/metricbeat/module/haproxy/info/data.go index b52c3fe9c48..d4cf9700269 100644 --- a/metricbeat/module/haproxy/info/data.go +++ b/metricbeat/module/haproxy/info/data.go @@ -18,7 +18,7 @@ package info import ( - "github.com/pkg/errors" + "fmt" s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstrstr" @@ -174,7 +174,7 @@ func eventMapping(info *haproxy.Info, r mb.ReporterV2) (mb.Event, error) { // Convert this value to a float between 0.0 and 1.0 fval, err := strconv.ParseFloat(f.Interface().(string), 64) if err != nil { - return mb.Event{}, errors.Wrap(err, "error getting IdlePct") + return mb.Event{}, fmt.Errorf("error getting IdlePct: %w", err) } source[typeOfT.Field(i).Name] = strconv.FormatFloat(fval/float64(100), 'f', 2, 64) } else if typeOfT.Field(i).Name == "Memmax_MB" { @@ -182,7 +182,7 @@ func eventMapping(info *haproxy.Info, r mb.ReporterV2) (mb.Event, error) { val, err := strconv.Atoi(strings.TrimSpace(f.Interface().(string))) if err != nil { r.Error(err) - return mb.Event{}, errors.Wrap(err, "error getting Memmax_MB") + return mb.Event{}, fmt.Errorf("error getting Memmax_MB: %w", err) } source[typeOfT.Field(i).Name] = strconv.Itoa((val * 1024 * 1024)) } else { @@ -201,7 +201,7 @@ func eventMapping(info *haproxy.Info, r mb.ReporterV2) (mb.Event, error) { fields, err := schema.Apply(source) if err != nil { - return event, errors.Wrap(err, "error applying schema") + return event, fmt.Errorf("error applying schema: %w", err) } if processID, err := fields.GetValue("pid"); err == nil { event.RootFields.Put("process.pid", processID) diff --git a/metricbeat/module/haproxy/info/info.go b/metricbeat/module/haproxy/info/info.go index b875f05e718..027daf8fe73 100644 --- a/metricbeat/module/haproxy/info/info.go +++ b/metricbeat/module/haproxy/info/info.go @@ -18,7 +18,7 @@ package info import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -57,17 +57,17 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { hapc, err := haproxy.NewHaproxyClient(m.HostData().URI, m.BaseMetricSet) if err != nil { - return errors.Wrap(err, "failed creating haproxy client") + return fmt.Errorf("failed creating haproxy client: %w", err) } res, err := hapc.GetInfo() if err != nil { - return errors.Wrap(err, "failed fetching haproxy info") + return fmt.Errorf("failed fetching haproxy info: %w", err) } event, err := eventMapping(res, reporter) if err != nil { - return errors.Wrap(err, "error in mapping") + return fmt.Errorf("error in mapping: %w", err) } reporter.Event(event) return nil diff --git a/metricbeat/module/haproxy/stat/stat.go b/metricbeat/module/haproxy/stat/stat.go index ddfc391ae4b..22fa5104633 100644 --- a/metricbeat/module/haproxy/stat/stat.go +++ b/metricbeat/module/haproxy/stat/stat.go @@ -18,7 +18,7 @@ package stat import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/haproxy" @@ -55,12 +55,12 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { hapc, err := haproxy.NewHaproxyClient(m.HostData().URI, m.BaseMetricSet) if err != nil { - return errors.Wrap(err, "failed creating haproxy client") + return fmt.Errorf("failed creating haproxy client: %w", err) } res, err := hapc.GetStat() if err != nil { - return errors.Wrap(err, "failed fetching haproxy stat") + return fmt.Errorf("failed fetching haproxy stat: %w", err) } eventMapping(res, reporter) diff --git a/metricbeat/module/http/_meta/Dockerfile b/metricbeat/module/http/_meta/Dockerfile index 650c1c7c062..4bcd674a247 100644 --- a/metricbeat/module/http/_meta/Dockerfile +++ b/metricbeat/module/http/_meta/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.16.5 +FROM golang:1.20.7 COPY test/main.go main.go diff --git a/metricbeat/module/http/json/json.go b/metricbeat/module/http/json/json.go index 0f2e0f2ff1a..e312a950cae 100644 --- a/metricbeat/module/http/json/json.go +++ b/metricbeat/module/http/json/json.go @@ -19,10 +19,9 @@ package json import ( "encoding/json" + "fmt" "io/ioutil" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" @@ -144,7 +143,7 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { event := m.processBody(response, obj) if reported := reporter.Event(event); !reported { - m.Logger().Debug(errors.Errorf("error reporting event: %#v", event)) + m.Logger().Debug(fmt.Errorf("error reporting event: %#v", event)) return nil } } @@ -157,7 +156,7 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { event := m.processBody(response, jsonBody) if reported := reporter.Event(event); !reported { - m.Logger().Debug(errors.Errorf("error reporting event: %#v", event)) + m.Logger().Debug(fmt.Errorf("error reporting event: %#v", event)) return nil } } diff --git a/metricbeat/module/jolokia/jmx/config.go b/metricbeat/module/jolokia/jmx/config.go index 23d7b18a5e2..337f0960011 100644 --- a/metricbeat/module/jolokia/jmx/config.go +++ b/metricbeat/module/jolokia/jmx/config.go @@ -19,13 +19,12 @@ package jmx import ( "encoding/json" + "errors" "fmt" "regexp" "sort" "strings" - "github.com/pkg/errors" - "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -395,7 +394,7 @@ func (pc *JolokiaHTTPGetFetcher) EventMapping(content []byte, mapping AttributeM // When we use GET, the response is a single Entry if err := json.Unmarshal(content, &singleEntry); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal jolokia JSON response '%v'", string(content)) + return nil, fmt.Errorf("failed to unmarshal jolokia JSON response '%v': %w", string(content), err) } return eventMapping([]Entry{singleEntry}, mapping) @@ -517,8 +516,7 @@ func (pc *JolokiaHTTPPostFetcher) EventMapping(content []byte, mapping Attribute // When we use POST, the response is an array of Entry objects if err := json.Unmarshal(content, &entries); err != nil { - - return nil, errors.Wrapf(err, "failed to unmarshal jolokia JSON response '%v'", string(content)) + return nil, fmt.Errorf("failed to unmarshal jolokia JSON response '%v': %w", string(content), err) } return eventMapping(entries, mapping) diff --git a/metricbeat/module/jolokia/jmx/data.go b/metricbeat/module/jolokia/jmx/data.go index a48901f1a1a..59498764cf9 100644 --- a/metricbeat/module/jolokia/jmx/data.go +++ b/metricbeat/module/jolokia/jmx/data.go @@ -18,10 +18,10 @@ package jmx import ( + "fmt" "strings" "github.com/joeshaw/multierror" - "github.com/pkg/errors" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/elastic-agent-libs/logp" @@ -168,7 +168,7 @@ func constructEvents(entryValues map[string]interface{}, v Entry, mbeanEvents ma // to be actually the matching mbean name values, ok := value.(map[string]interface{}) if !ok { - errs = append(errs, errors.Errorf("expected map of values for %s", v.Request.Mbean)) + errs = append(errs, fmt.Errorf("expected map of values for %s", v.Request.Mbean)) continue } @@ -207,7 +207,7 @@ func parseResponseEntry( // This shouldn't ever happen, if it does it is probably that some of our // assumptions when building the request and the mapping is wrong. logp.Debug("jolokia.jmx", "mapping: %+v", mapping) - return errors.Errorf("metric key '%v' for mbean '%s' not found in mapping", attributeName, requestMbeanName) + return fmt.Errorf("metric key '%v' for mbean '%s' not found in mapping", attributeName, requestMbeanName) } var key eventKey diff --git a/metricbeat/module/kafka/broker.go b/metricbeat/module/kafka/broker.go index 11bc0ac2c5c..9d504644730 100644 --- a/metricbeat/module/kafka/broker.go +++ b/metricbeat/module/kafka/broker.go @@ -26,8 +26,6 @@ import ( "strings" "time" - "github.com/pkg/errors" - "github.com/Shopify/sarama" "github.com/elastic/beats/v7/libbeat/common" @@ -115,7 +113,7 @@ func (b *Broker) Close() error { // Connect connects the broker to the configured host func (b *Broker) Connect() error { if err := b.broker.Open(b.cfg); err != nil { - return errors.Wrap(err, "broker.Open failed") + return fmt.Errorf("broker.Open failed: %w", err) } c, err := getClusterWideClient(b.Addr(), b.cfg) @@ -133,7 +131,7 @@ func (b *Broker) Connect() error { meta, err := queryMetadataWithRetry(b.broker, b.cfg, nil) if err != nil { closeBroker(b.broker) - return errors.Wrap(err, "failed to query metadata") + return fmt.Errorf("failed to query metadata: %w", err) } finder := brokerFinder{Net: &defaultNet{}} @@ -189,12 +187,12 @@ func (b *Broker) PartitionOffset( req.AddBlock(topic, partition, time, 1) resp, err := b.broker.GetAvailableOffsets(req) if err != nil { - return -1, errors.Wrap(err, "get available offsets failed") + return -1, fmt.Errorf("get available offsets failed: %w", err) } block := resp.GetBlock(topic, partition) if len(block.Offsets) == 0 { - return -1, errors.Wrap(block.Err, "block offsets is empty") + return -1, fmt.Errorf("block offsets is empty: %w", block.Err) } return block.Offsets[0], nil diff --git a/metricbeat/module/kafka/broker_test.go b/metricbeat/module/kafka/broker_test.go index b9d7262683b..70cbb2babd8 100644 --- a/metricbeat/module/kafka/broker_test.go +++ b/metricbeat/module/kafka/broker_test.go @@ -21,7 +21,8 @@ import ( "net" "testing" - "github.com/pkg/errors" + "errors" + "github.com/stretchr/testify/assert" ) diff --git a/metricbeat/module/kafka/consumergroup/consumergroup.go b/metricbeat/module/kafka/consumergroup/consumergroup.go index acce0b39d20..5fa41b13f01 100644 --- a/metricbeat/module/kafka/consumergroup/consumergroup.go +++ b/metricbeat/module/kafka/consumergroup/consumergroup.go @@ -20,8 +20,6 @@ package consumergroup import ( "fmt" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/kafka" "github.com/elastic/elastic-agent-libs/logp" @@ -81,7 +79,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(r mb.ReporterV2) error { broker, err := m.Connect() if err != nil { - return errors.Wrap(err, "error in connect") + return fmt.Errorf("error in connect: %w", err) } defer broker.Close() @@ -114,7 +112,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { } err = fetchGroupInfo(emitEvent, broker, m.groups.pred(), m.topics.pred()) if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } return nil diff --git a/metricbeat/module/kafka/consumergroup/consumergroup_integration_test.go b/metricbeat/module/kafka/consumergroup/consumergroup_integration_test.go index db3423c8c3d..aafb5250499 100644 --- a/metricbeat/module/kafka/consumergroup/consumergroup_integration_test.go +++ b/metricbeat/module/kafka/consumergroup/consumergroup_integration_test.go @@ -20,12 +20,12 @@ package consumergroup import ( + "fmt" "io" "testing" "time" saramacluster "github.com/bsm/sarama-cluster" - "github.com/pkg/errors" "github.com/elastic/beats/v7/libbeat/tests/compose" "github.com/elastic/beats/v7/metricbeat/mb" @@ -47,7 +47,7 @@ func TestData(t *testing.T) { c, err := startConsumer(t, service.HostForPort(9092), "metricbeat-test") if err != nil { - t.Fatal(errors.Wrap(err, "starting kafka consumer")) + t.Fatal(fmt.Errorf("starting kafka consumer: %w", err)) } defer c.Close() @@ -70,7 +70,7 @@ func TestFetch(t *testing.T) { c, err := startConsumer(t, service.HostForPort(9092), "metricbeat-test") if err != nil { - t.Fatal(errors.Wrap(err, "starting kafka consumer")) + t.Fatal(fmt.Errorf("starting kafka consumer: %w", err)) } defer c.Close() diff --git a/metricbeat/module/kafka/partition/partition.go b/metricbeat/module/kafka/partition/partition.go index c0372b32361..486c9a79e24 100644 --- a/metricbeat/module/kafka/partition/partition.go +++ b/metricbeat/module/kafka/partition/partition.go @@ -18,12 +18,11 @@ package partition import ( + "errors" "fmt" "github.com/Shopify/sarama" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" "github.com/elastic/beats/v7/metricbeat/module/kafka" @@ -78,13 +77,13 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(r mb.ReporterV2) error { broker, err := m.Connect() if err != nil { - return errors.Wrap(err, "error in connect") + return fmt.Errorf("error in connect: %w", err) } defer broker.Close() topics, err := broker.GetTopicsMetadata(m.topics...) if err != nil { - return errors.Wrap(err, "error getting topic metadata") + return fmt.Errorf("error getting topic metadata: %w", err) } if len(topics) == 0 { debugf("no topic could be read, check ACLs") @@ -190,12 +189,12 @@ func queryOffsetRange( ) (int64, int64, bool, error) { oldest, err := b.PartitionOffset(replicaID, topic, partition, sarama.OffsetOldest) if err != nil { - return -1, -1, false, errors.Wrap(err, "failed to get oldest offset") + return -1, -1, false, fmt.Errorf("failed to get oldest offset: %w", err) } newest, err := b.PartitionOffset(replicaID, topic, partition, sarama.OffsetNewest) if err != nil { - return -1, -1, false, errors.Wrap(err, "failed to get newest offset") + return -1, -1, false, fmt.Errorf("failed to get newest offset: %w", err) } okOld := oldest != -1 diff --git a/metricbeat/module/kibana/settings/data.go b/metricbeat/module/kibana/settings/data.go index 55c6860e9f6..fc47e8a4839 100644 --- a/metricbeat/module/kibana/settings/data.go +++ b/metricbeat/module/kibana/settings/data.go @@ -19,8 +19,7 @@ package settings import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper/elastic" "github.com/elastic/elastic-agent-libs/mapstr" @@ -34,7 +33,7 @@ func eventMapping(r mb.ReporterV2, content []byte) error { var data map[string]interface{} err := json.Unmarshal(content, &data) if err != nil { - return errors.Wrap(err, "failure parsing Kibana API response") + return fmt.Errorf("failure parsing Kibana API response: %w", err) } schema := s.Schema{ diff --git a/metricbeat/module/kibana/status/data.go b/metricbeat/module/kibana/status/data.go index f02b2812d0b..3f975323a9e 100644 --- a/metricbeat/module/kibana/status/data.go +++ b/metricbeat/module/kibana/status/data.go @@ -19,8 +19,7 @@ package status import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" @@ -60,7 +59,7 @@ func eventMapping(r mb.ReporterV2, content []byte, isXpack bool) error { var data map[string]interface{} err := json.Unmarshal(content, &data) if err != nil { - return errors.Wrap(err, "failure parsing Kibana Status API response") + return fmt.Errorf("failure parsing Kibana Status API response: %w", err) } dataFields, _ := schema.Apply(data) diff --git a/metricbeat/module/kubernetes/_meta/terraform/eks/requirements.txt b/metricbeat/module/kubernetes/_meta/terraform/eks/requirements.txt index 2f67d6aad45..57d386ba6a0 100644 --- a/metricbeat/module/kubernetes/_meta/terraform/eks/requirements.txt +++ b/metricbeat/module/kubernetes/_meta/terraform/eks/requirements.txt @@ -5,7 +5,7 @@ docutils==0.15.2 jmespath==0.9.5 pyasn1==0.4.8 python-dateutil==2.8.1 -PyYAML==5.4.1 +PyYAML==5.3.1 rsa==4.7.2 s3transfer==0.3.3 six==1.14.0 diff --git a/metricbeat/module/kubernetes/fields.go b/metricbeat/module/kubernetes/fields.go index 32a78e4395c..3cbfdbde396 100644 --- a/metricbeat/module/kubernetes/fields.go +++ b/metricbeat/module/kubernetes/fields.go @@ -32,5 +32,5 @@ func init() { // AssetKubernetes returns asset data. // This is the base64 encoded zlib format compressed contents of module/kubernetes. func AssetKubernetes() string { - return "eJzsXU9z47aSv8+nQPmyzpbj2vPU1qtKPC8bb5KJ155JDltbGogEJcQkwACgPXqffgt/CEIkwD8iKGts6ZDK2Fb3rxsNoLvRaHwPHtHuPXis1ogRJBB/B4DAIkfvwcUv9ocX7wBIEU8YLgWm5D34xzsAAGj+ABRIMJzIbzOUI8jRe7CB7wDgSAhMNvw9+N8LzvOLK3CxFaK8+D/5uy1lYpVQkuHNe5DBnKN3AGQY5Sl/rxh8DwgsUAue/IhdKTkwWpXmJx548nNLMsoKKH8MIEkBF1BgLnDCAc1ASVMOCkjgBqVgvXP4XBsKNRpLsIYES8wRe0LM/saHqgdZS4E/3N0CTdDRZf3Z12n9cTXVhsfQ3xXi4pohTiuWoL0/qpE+ot0zZWnrdz145edeU0Yp8NJuA+DVekkMIfIdGAkt4wMAiiy4TPKKC8SuFFNewgRdWe1814vrCbF1PFg/f/p0Bzok2zwTmkZUheLZIdnlSQQiYiUZxR8Gg0GxAB0WbSwp261YReLB+BOJLWJAbFHNA1QccZCyHWgzaoN5xKTNbQaSXzBJ5cJmqA8MSVFSgoiIx/6mJgm2kKQ5JhtXKb1o2qvmTCRyOVUkQUbrkRmxTDwhxjGNaBqGoEXRFbMNQWlub1uZCaGeJD7CbeYFElsa0R7VxPQQ7QhNeUQztBK3qdZsS0YTxLmXo88QfTutSy8pq2uOks7va5oprdZ5e93rCHJz9xlwlFCStpE1nApUULaT2zpOERHX613jFHX55pRsPL/ULtF7EPryHqof5R8BTEDN02AYgviEmahgfkyEhuUQwCzl17RE5DqhVWf1G4S2x/pjVawRkyuuJAgynCP7B5SFh5ELyARKIxjNgzYYwDFJkFpijHHXPLwT4BmKZBvN/NETIoJfc/wvpIf7el0lj0hc/3tQOLr+CyU+3etfrMYPwZ9SFA0BSAQgxVwwvK6Uz49JwIbC2HlVLGquD1UhDea5wc0VcH4I2Jgm7CIaguBxW8DAot3h3F24gVq89T4NHo0vI23agRbYR3hJCfe7loeY9MvYclAjSrge81b+BYLJVgt7Vfsd6n/WTTBy5QZMVyZ8kXGxdQavx6jkSFOkHtXR82OhidHgELR2WbjXDpMcSx02LnYXRBBAkLnmByA35Ps8qXguTVT9eVTmMksrplI019VBtrXHtnb+aprS5AtpOgVOGB3yr1wk81XQxUJch0H/cByYI65Cdr3JoUAk2e0tOVdgi7mgGwYLoDGF8ScVY3I6zFfkLclyvNmKYVOS1FhFCCabyGtAPQ0TgZ+Q+jYwjPrXBCSS9FoPQpQFoUlSmqHlAArFxcseVikW12rrjMJe0fN5CfsMGZLQUBqRZ02yzbxZsoiAmMxLyTratfSiZGSVO74SuPA7KSkU7V8MuAYPkiDoEHRi0tGbwVBa5+4zqDjcII8iQmK7UNR3g/PQB6iP6p6QlPkIDxMfYuAy8azQbTbBWK3+jHDz6s+NNTup9xvKkFE+gSS4f+3hhYRKxYRgj4A8Eq42DJQOsLTAaIquS+8m1eDiCcxRuspyCkN/WPuSJWJJN395kAxSv5ADWNOU/zaxh6AC5go7gHlOEyjgOkfye73C5rjA4tuTNkUZJijV8G3aslkKL+VPghoBOAMVUd9F6XfX4DZrfV3+jfo1B5AhUGDO5QYqQxD5h18k0S/qn1+4gAKt9A/MuoPM19ZUbKVXItmmgBIgtlAoQFdAbHF9HAmecZ6DdcMGEYEZynfX3hUzp5vxKcEBff9KNzJeyejEpRI+QZxD/8Scv1yGgi8wblUYiuHAeDtU+rHCggSWMMFiNxzi1X/5FvSj59l43cil+C3oRW0549WC5cIQTkHP8z/8EQaIus1+UnbQzJagQE6Om6F+xygeLslqDKSAdS4BSRmIB9L+EUW0XMlbWbTbdjhwzrKc239qKtGKCAp84h7wbw76iU5wwALAyfvBY2Se4Qobgxjyhg2KU3OI3eFjnWNy8Mqm8P3DQ/8EtoemlD1isuEonFN8HRr5UwsKOBLjl7bTnOchUY4054O2VMINymCVezLZ087//aI3qVPJCAQ4WXcI/kXZ0RApbkFcdt2hVGQRK3TeRth4T6lQVSh8xwUqJkeQb8WT9evJjbDOobZfRya0ermQ+yhh5GdPAOkeMzGa54jpmwOzjptuLDFzDyHOYdMxy8qPWU5+7PrUuHWpiluwKFX+Nx6vj7BA42qf/0VJRL63JGOQC1YlomKoS/y0S3Bt+ojp20J65bu5+6zOWQEvERFy9TuX6c5A+O2U6UoIBfwaAcGvNp54sUJhWy5Qxzq2UFiVDlcEfwWopMk2ZOD7RW7RZm5fsdw0JTfDrFZaW7IjqHO97gqsGX1EBKT0Wbox6kpgxdWec2X2AjX5PWu/5y5RxMo1W/VoYNflV63atY4Acmc+DPECBVK2aM0pcBsYgMPxH7UorjUq7TrcTlncTDmPWG5rZVMFxqZcUn0zngxL1OLFQ/cShuSt5o5nRbqGc7khMCXREfEtb+ZuOXkcO+9K8QKFugvYkpvz/rtCVbybFVIwJAMEXaM737H5mT7L2HlX+yxgC7nybwwnW6Vr/B3KwBohUv+4oxQrsi8ic/IQJMME820U56wjA80UDiVLSgnSBzpY+c8loxsmHTc1cpCTfxNaooQS6fsznaHpVcGhUsM0jbGQ/Gm5wTTVVRSHIkpRKbZRIZmydU35UFgMCYajLLoNMOIsv4r6FHB7R9AJzXOUCOq/ZnzYHUCcKLcs5i6jzlhqyl1pPZkLd35uEczFdhcVkaWqsE2EFFs1E9lr5IEjuvEY7vYOzSaqw+4CCKaIXWO+KiAXgevua0pzBNu38YeaMWybbgxJN7uKCRdQxruYGxCWwrs2yPZ9w8nZ3U9b5PZ4MZc97dk9MNPQ/kYt8pAhsEFEBk+6K019y8Ms4nscsFrx5UD80u6RAyZkjsMWGrCL3kG4kVQ0F8BQQlmqN+Rm/RK4QPpnJWQCJ1UOmbnDK3c8mqg1OPUgVN8UsCg9KLvrVl+OPMOMi5VhRQLtaaZfy/hUA5RyKh6g4SF/Fr7FmsPFAUkWA3iaBCHvHPprDAJ9FeOt4TdNx1gCSpteGPgJEY86ElruVoL6EDS7K+StNh3hnHUvuntFaSw4a4XtHjMHcv+0K206rp+jJ4EfMvp+jmpZrNu0MFRSJnSfFsw9Y9E3gRZtIJMxWoDnLU62Sjl6bcC8WRm9kOKe13yU7ockDCgZi8U5qYIpFHD+iP1mKAHIOU2w2hWesdj2zqG+cfMvodOdP2sHDHUGBPQtWCOOYfcWLcVApRT7ZkoDqB6XVdwjtf8yZI1JZI0x+B3t+Od5o3iq/mNxGSuSMqbUk0BPgGc4NBvrY8dV9LZKf5i2Sq5C+k85Kxzx2PgzwX9XCKhzNpxh6W1SB4gnw2OXcZRnqxyTx4hg7n+V6zhDXKIxLbdC2wgmTzR/QunKg3Gp1anm6dNL3zoFSxzfcn64u7VNuYz19AxX3O5skrfb1aSHcdzFw12wepguN19ryhNUH3fCfr79MMDbzY/MifmcC+QqfXG+O36+Ox74xL87rjzWb/3a+PmWlv9vzre0Op94t7TO9106kM/3XULQz7c3Bm5vECSk9URbu9nXV26C9yhB+Enl+1VjJ1LfRpI6BpgIxDKYIHWHsPNTgKWvKerLS1f2mp9O78mNlgrp7zCUCPAE8wqBL//xpVc1iDFfXeJo3YyV+6vh9EIi25zYazewTwwSXmAh3p6NfXpBG7OHTeerbPVn5Kj9dL7FNllF5wts7qejnrdxd80pVgm0QWnDOk4XmwbXqfSvaRCFethY/7MiwezbIWs4LqS/vlBPovD+MMxgiAkYOdPB+PTWmBkPpqXBbgsVn0zfQcDIXQS8bUWO2GfAlGXvTSrRvxvZFMPejdQ5pxAlTb/JQ4hzDqH+HJRDOIXIy0b357hbnNK4fPKOy5s6/TuZ064OsFNsETelSfKbaowsN1fbHYq320OZjsiUIEAZKChD7h/X17Bpqmq+W53ihr+EefOd0+igHPnE9nw02QF+kqvDuYFkvCVjVBfJb2ltAG8wMbs3qcNCtw7aV6//pF0r5rlz3v4NLgBxOkvOWwzCse6rr2LRlmSbHclRUJfgB6qdSrhBqwULJTSs0WUbq+PgCRdtOO24vu7mJJyca46KVszH7M8d5iZxCzZzOGZXwHN/NxC1ecm5v1sP63N/t3N/t3N/t3N/t+Zz7u927u927u82ZwjO/d3eQn83viNtx+Pgw77Hao107GciwB1JDjz3Y1WOeNzNd0eSOwnrXpK2q/yUt8G7oGLOuADAEc+Ed3Ed0UYDsAdtNSiNKftQTe5gUWCyWcIJM1yAwybkjx2KNGq3rx64IyxkAOkRzeVjjyDjbcauXskWpVU+7wkGJ3Nl6X1zjzAcM430mtNx4V6nB7F70AYlrbxL2Rb9dNq5xOIZbhRTMprhTlIhDlcfbcejqvKIwv4gBCpKYejKQLSew60KykX78ox6Z+M4LQ+bJayn0yE4J2q92M6JWh/Ac6L2nKidpuRzovacqN0X4ZyoPSdqx6A7J2rPidpzotYj4fkhjpAc54c4AhKfH+IoTvkhDm4zJtGmdIlIKudySaMYR7PL1CNhGADJoCOztHMts2i3R98DyRAqFIdr9bJHcdwEdNNby+IABseMUwuPUHOjkLsuPh/JHgQxLaCrrTFIoE7PxYBiKmsbGDXteolN8ooLxACnIIPt7JqzcNaQXurArEmUGigzIhmTalUbkDe1OiB3pEiZh2UaipnnShDTxrtiTMLbnI+Yev53bTiHXeC39KKcSh3cVrmjxxuLC/sz7TqhE29320IevjnkF6AtRN8NUCuOYgQurV/6DLFQ/yMQKzCB/VcvEEzDvbn8+f2RKBuEiolfv3tBkYCs52IBJgJtOgcRB4DRfAJ9hBzFtN9sccHMGr9P6vBFveRSv/tkLJPXvkuu1iIzlODSwr9Rj0vI0b1hkG9/pbT8ESaPNMuuwD8ZU8077qo8v/Iytr823/kOUOaYieRTlDkSKL1qNHYDCaHiviKKg4wBfv/9t19wnqP0OzWoKHyFTr0R1DBYLa1V9WCQV7XPUG+9jbAK+jiRpZoaof23iaf0IRgMdtUdstDte003dHVs0mS4ufusOotzzXJEsv0okAw7lIKj9dvWKl/+OGtIfHNxTt+7G+z+Vo/Ly+Nuhqy++hfqFZQwSv6i61juhqYWxdmYc9Z+Y3AMBdCzGXjpOM6ceSzNPzPG8GlIgJLmuEXJureJwE/I69AGjS7gyGpSKkq3tQldI3F8Qr7iFZfBfadR1+iyhL13F41msQxZfXQby1VPR3kSdz1Juz22/1QHpXW2Tp+guhDkLuV7oGpvP60tIBoOqXS1c9YwWEX8EwR9XYi9pDzIPkUwzTEJcx6yuQ+GgGUNMxWD10U4EkmdDZV+VwZx7ozEmP/p/2f3f6xsEBWU7N+pn1Mc+EHRe1DXvY+8MjabU5njBI4PqQY2HK90hsmBldvDLR7GxORhf7RJvTZNCGu1gBKxRpAgxBRxzHq6384DaKjv5YgnwesPIGNpT0eQE6FV5HjD6/AagumcQOR0V8x8tNdxhRqCUeZ8CT2dMGfU/f3iRaq5+JIDR1hGHBx20DDJ6MRVZGiKzspcfGgwNuZWT1uL+pKXKAmneYYnQiyM3Wkwa37GguWbn2FgZep9IjQ6KM2nC6gGMiZM8k/scU6CP3BaKkTyPPa83X+eVDl/fVXJMkaItQ7cNZ1mbdjRDjlGeBAqeFlgf/HGR0EYLU91CRiaRT8MXiUJQt24LC4SxYXzrMq7aGokkxqgj98xpIXaF4mnepuhp4dt4OR7fHi0M6JqdKFwnn7uiyj3cNUp0KMi83NtYfJcFIs1jA0HwJEQmGymjueyrnlCSYY3FVNZUAtVJVzc9QpcPnS2/saFYzDPUY55++A0lhIdDievRRers+H06I8+k8DNl/maU7SVt8kKdT48UW3eMqJoBynurmyfw1XFZPSZcN1YsbtZNug8TxFHRFc/S9yDDFyizTW4uGGU/DddX4RdY8xXCSWC0Tz3unQRIP/+XB/uWUbg8kKwCl1cgYsM5lz+D2Xg4j8JJegfF35rnHhAPc0czXWGw+2xXs6XUaGbr93bPAKKrMgjoc+kZ9wHPKaoaI3rNBaqneKn+bhzzCqJ/vTRrEFQrwu7pQfgUqr/CijlS9UbzYdtpCImNdwbo86qklAo9/gMFUro87VVyRDnlfdR6FjK012D7wyjg7WYYv54DLgfMH+cDZZWYkWzlcS8INTfK/F7JvEejLPE6TF0enf7YbZKTT+H1Zh8z3zEpmfDZyfhMwX4EuUcTmPc5SoopF/kduBtSjtoBqDeAJSbBBlysnQyklAnr8ECb0cn5kWqASFmhPj1MwT27avjvbfujNFLVVO0h9BUUjTjV9dU2GsdM8fxpQT9qL0RJV3/M2dO4m+RoVbtq5eyZHcse9NmdkAWhaO6d7ef22suYzCOuUBEPNG8KmL5nQ1ZoOk2KVdGC/WX36tCvO9fuoDnDw1PkggU+PXNmnH1B4aH983DvrrhqULokmCYJJSpCzCCOmMScOspgxu0SnIY6K4xgvuDJgIUEZuj6tgTGFMgEbLLJIe4WMw4kxyesIne/XHTY59ahNUcBj9ikqK0VkaYlSn2WxmrmTEj7ptKz3p6xZ8VUm+KgJ82VKcJq2JGo6gfFAkgSfh5LDi/7v64aRdgd2fRyb3PuKVcrHB76zbnq4ensiQ8SRrc3sVIX0xjbOKMw0qRFrwv0oJpLozc1xdG7sw9zOvrQ++JxEQ3L2NTl/ItlNhrD3jNzYf3qou2XdIyr9rPmZV1FRyPVO+3YM2NCzVc/HNKpXt79Vz3+h8vV7F3OK4XK9UbgY2uVZefpZS2QQTV/YUMp/oOeAMO9JwDNQXYa7RMocW+FrMqz3c1t0FtOnfo1HnY3xUVMNrS4tCM80DOYmX09wbr/yisQ8X0bS1NQaA56NMylILLLWSp2qA4Sr8b12FxjqO+L2jwzomns8FoFq6EeubIr16BL1LUL1LWL1LYL4H9wyP4AfLpc2jdqkHCgWWZY8SBoN2Isf+f4QhTLgc4iZXwMNRe/D7Sg8HRk8/QHQhCTvgIHrdEIEZgDm7vrMkb+f0s0Vf9hVlBai1ZTQx8+PgQngKW5eFidhgGYoucwnS1hjkkySy1/kphCn40dKxBBZjOmeK1YB0atjaCqI5Bs0xE9xwKoK8ZyJBtjk3UbH720ekvvwsWuHlVpWjIxXDvC250ibIqj+fY1xSjefZ9ShhK1vgLeK1KbF0kuFQNQfU++GAkaHt/Rwg19pRnfaiDoo2F/dPG8bPu6Z7PF1IieIGwo3P3YCzAF4s/hkywiQ6WNkInDjm80mphW7QW6IA9DRusLW8EsFbetZ12nbcku1nYF/fz9tCEvb2S0SfMMQ0VtE44XGooNV6fiyJ0ZqCOblaeO+WTAgNFxdxMV/zTHYEFTqAMmM3uZk4w/Edd5pxkjVXWc1ba/zea6pP3FKmXahvdqK5MJAWGS3x/ZG/YB7wS1Y4+lvXr3vbOK9Bx3sztNqGaNBLdRaNpTRF0CFUjoFXwpsjQGHielbDN0v1STqgpGpLu7rN5wPuw0wb13eAyPefR7YR6C9KGiQ8xcJl4Xg9qs+l92huMb7QFjLZvKENG5QSS4HNLeyghoaFSqZFAx3YDU+aA0gDLhWqmljOiU3u53X0yP+zfvvpn7O8fHsap4pmyR0w23OMqvi6N/KkFNeH5CM2UcIMWfEHfaYEqV6oAJ7sewL8oOxoixc2La981i+Sp7NebvZiD3tWKU2nm5ZZFfN7IV2k2RLSPMDjJWfgTzpFxTFUF5UBRKZh0jPxKVdSUDg/qyNMHBLx29ShvKqgZV4jS22m5EYEnMEfpKlTa7wpSIpZ030iaKMqdJiKXWZoBU2ChosXgAGNC055C8DlDHLAdENX9/awHKyCEs7Iy1B+bxADzE0NoDJhQL+i4aLT/OgLON2DGRor/DwAA///rmoGd" + return "eJzsfV1z47aS9v38CpRvXuctR7XXU1unKvGcbLxJJlp7JrnY2tJAJCQhJgEGAO3Rqf3xW/ggCJEAP0RQ1tjSRSpjW90PGo1Gd6PR+B48ov178FiuESNIIP4OAIFFht6Dq1/sD6/eAZAinjBcCEzJe/CPdwAAUP8ByJFgOJHfZihDkKP3YAvfAcCREJhs+Xvw31ecZ1c34GonRHH1P/J3O8rEKqFkg7fvwQZmHL0DYINRlvL3isH3gMAcNeDJj9gXkgOjZWF+4oEnP3dkQ1kO5Y8BJCngAgrMBU44oBtQ0JSDHBK4RSlY7x0+C0OhQmMJVpBggTliT4jZ3/hQdSBrCPCH5R3QBB1ZVp9DmVYfV1JNeAz9XSIuFgxxWrIEHfxRhfQR7Z8pSxu/68ArP/eaMkqBl3YTAC/Xc2IIkW/BSGgRHwBQZMF1kpVcIHajmPICJujGSue7TlxPiK3jwfr506claJFs8kxoGlEUimeLZJsnEYiIlWQUfxoMBsUCtFg0saRsv2IliQfjTyR2iAGxQxUPUHLEQcr2oMmoCeYRkya3CUh+wSSVhs1Q75mSvKAEERGP/W1FEuwgSTNMtq5QOtE0reZEJNKcKpJgQ6uZGWAmnhDjmEZUDUPQomgPswlBSe5gW5kIoVokPsJN5jkSOxpRH9XC9BBtDZryiGpoR9ykWrEtGE0Q516OPkX07bQuvaQoFxwlrd9XNFNarrOm3WsN5Hb5GXCUUJI2kdWccpRTtpfbOk4REYv1vnaK2nwzSraeX2qX6D0IffkA1Y/yjwAmoOJpMPRBfMJMlDA7JULDsg/gJuULWiCySGjZsn690A5YfyzzNWLS4kqCYIMzZP+AsvA0cgGZQGkEpXnQCgM4JglSJsYod8XDuwCeoUh20dQfPSEi+ILjfyE93Yt1mTwisfj/wcHR9V8o8cle/2I1fAr+lEPREIBEAFLMBcPrUvn8mAR0KIydl/ms6vpQ5lJhnmvcXAHnx4CNqcIuoj4IHrcF9BjtFue24QbKeOt9GjwaX0bqtAMtsI/wghLudy2PUemX0eWgRNTgOtRb+RcIJjs92JvK71D/s66DkRs3YLox4YuMi60zuBgikhMtkWpWB6+PmRZGjUPQymXhXj1MMixlWLvYbRBBAEHmmh+A3JDv8qTiuTRR5ecRmcssLZlK0SzKo3TrgG3l/FU0pcrnUnVynDDa51+5SKaLoI2FuA6D/uEwMCe0QtbeZFAgkuwPTM4N2GEu6JbBHGhMYfxJyZhcDtMFeUc2Gd7uRL8qSWqsJASTbWQbUC3DROAnpL4NDKNum4BEki70JEQxCHWS0kwtB1AoLl72sEyxWKitMwp7Rc/nJRwyZEhCQ2lEnhXJJvPaZBEBMZmWknWka+lFycgqd3wlcO53UlIomr/ocQ0eJEHQIujEpIM3g760zvIzKDncIo8gQsN2oajvBtehD1AX1YNBUuYj3E+8j4HLxGOhm2yCsVr1GeDmVZ9bq3ZS7reUISN8Aklw/zrACwmVggnBHgB5IFytGCjtYWmB0RQtCu8mVePiCcxQutpkFIb+sPIlC8SSdv7yqDFI+UIOYEVT/tvEHoIKmCnsAGYZTaCA6wzJ73UONsM5Ft/eaFO0wQSlGr5NW9am8Fr+JCgRgDegJOq7KP1uAe42ja/Lv1G/5gAyBHLMudxAZQgi//CLJPpF/fMLF1Cglf6BsTvIfG1NxU56JZJtCigBYgeFAnQDxA5Xx5HgGWcZWNdsEBGYoWy/8FrMjG6HpwR75P0r3cp4ZUNHmkr4BHEG/QtzurkMBV9gmFXoi+HAcD1U8rGDBQksYILFvj/Eq/7yLchHr7PhspGm+C3IRW05w8WCpWEIp6Cn+R/+CANE3WY/KT2oV0twQE6Om6FuxygeLslqCKSAds4BSSmIB9LhEUW0XMlbMdpNPew5Z5nP7T83kWhBBAd85h7wbw76kU5wQAPA2fvBQ8Y8wRU2CtHnDRsU5+YQu9PHWsfk4JUt4fuHh+4FbA9NKXvEZMtROKf4OiTypx4o4EgMN23nuc5DQznRmg/qUgG3aAPLzJPJHnf+7x96nTqVjECAk3WH4F+UnQyR4hbEZe0OpWITsULnbYSN95QKVYXC91ygfHQE+VY8Wb+c3AjrEmr7ZWRCq5cLuU8SRn72BJDuMROjWYaYvjkw6bjp1hIz9xDiHDadsqz8lOXkp65PjVuXqrgFi1Llf+Px+ghzNKz2+V+UROR7RzYMcsHKRJQMtYmfdwmuTR8xfVtIW77b5Wd1zgp4gYiQ1u9SpjsB4bdTpish5PBrBAS/2njixQqFbblAFevYQmFVOlwS/BWggia7kIIfFrlFW7ldxXLjhFxPs7K0tmRHUOd63Q1YM/qICEjps3Rj1JXAkqs958bsBWrxe2y/5y5RxMo1W/VoYFflV43atdYA5M58HOIZCqRs0ZpT4NYzAcfjP2lRXGNWmnW4rbK4ieM8YbmtHZsqMDblkuqb8cYwRy1ePHQvoUjeau54WqRrOOebAlMSHRHf/GrulpPH0fP2KF6gUHcGXXJz3n+XqIx3s0IODMkAQdfoTndsfqbPMnbeVz4L2EGu/BvDyVbpGn+HMrBGiFQ/bgnFDtkXkTl5CLLBBPNdFOesNQa6UTjUWFJKkD7Qwcp/LhjdMum4qZmDnPw/oUeUUCJ9f6YzNJ0iOHbUME1jGJI/LTeYprqK4lhEKSrELiokU7auKR8LiyHBcBSjWwMjjvlV1MeAOziCTmiWoURQ/zXj4+4A4kS5ZTF3GXXGUlFuj9aTuXDX5w7BTOz2URFZqgrbSEixRTOSvUYeOKIbjmF5cGg2Uhx2F0AwRWyB+SqHXASuu68pzRBs3sbva8awq7sxJO3sKiZcQBnvYm5AWArvmiCb9w1HZ3c/7ZDb48Vc9rRn98AsQ/sbZeQhQ2CLiAyedFea6paHMeIHHLCy+HIifmn2yAEjMsdhDQ3oReck3EoqmgtgKKEs1Rtybb8EzpH+WQGZwEmZQWbu8ModjybKBqcehOqbAuaFB2XbbnXlyDeYcbEyrEigPc34axmfKoBynIoHqHnIn4VvsWZwdkCSRQ+eOkHIW4f+GoNAX8VwbfhN0zGagNK6FwZ+QsQjjoQW+5WgPgT17gp5o01HOGfdie5eURoKzmphs8fMkdw/7Qubjuvm6Engh5S+m6Myi1WbFoYKyoTu04K5Zy66FtCsDWQ2jObgeYeTnRKOtg2Y15bRCynuec1H6X5IwoCSoVickyqYQgGnz9hvhhKAnNMEq13hGYtd5xrqmje/CR3v/Fk9YKg1IaDLYA04hj0wWoqBSil2rZQaUDUvq7hHav9hyBqV2NTK4He045/nDeKp+o/FZaxIyphSLwK9AJ5h32qsjh1X0dsq/WHaKrkC6T7lLHHEY+PPBP9dIqDO2fAGS2+TOkA8GR5rxlG2WWWYPEYEc/+rtOMMcYnGtNwKbSOYPNHsCaUrD8a5rFPF0yeXLjsFCxxfc35Y3tmmXEZ7OqYrbnc2ydvtatLBOK7xcA1WB9P51mtFeYTo4y7Yz3cfeni7+ZEpMZ9zgVylLy53xy93xwOf+HfHlcf6rV8bv9zS8v/N5ZZW6xPvltblvksL8uW+Swj65fZGz+0NgoTUnmi2m3195Sp4jxKEn1S+XzV2ItVtJCljgIlAbAMTpO4Qtn4KsPQ1RXV56cZe89PpPbnRUiH9HYYSAZ5gViLw5d++dIoGMearSxwsm6Hj/mo4vdCQbU7stSvYJwYJz7EQb0/HPr2gjtnDpstVtuozcNZ+utxiGy2iywU299MSz9u4u+YUqwTaoDRhnaaLTY3rXPrX1IhCPWys/1mSYPbtGBuOc+mvz9STKLw/9DPoYwIGrnQwPL01ZMWDcWmwu1zFJ+N3EDBwFwFvW5AD9hkwxuy9SSH6dyObYji4kTrlFKKg6Td5CHHJIVSfo3II5xB52ej+EneLc5qXT955eVOnf2dz2tUCdo4t4sY0SX5TjZHl5mq7Q/FmeyjTEZkSBCgDOWXI/ePqGjZNVc13o1Nc/5cwr79zHh2UI5/YXo4mW8DP0jpcGkjGMxmDukh+S7YBvMHE7MGiDg+6cdC+ev0n7Vowz63z9m/QAMTpLDnNGIRj3VdfxaI1yTY7krOgLsH3VDsVcItWMxZKaFiDyzZWp8ETLtpw2nF93U9JODnXHBWtmI/ZXzrMjeIWbOZwyq6Al/5uIGrzkkt/tw7Wl/5ul/5ul/5ul/5u9efS3+3S3+3S323KFFz6u72F/m58T5qOx9GHfY/lGunYz0SAe5Icee7HygzxuJvvniRLCetekrZWfszb4G1QMVdcAOCAZ8LbuE6oowHYvboaHI0p+1BN7mCeY7KdwwkzXIDDJuSPHYs0arevDrgDNKQH6QnV5WPHQIbrjLVeyQ6lZTbtCQYnc2XpfXOPMJwyjfSa03HhXqdHsXvQCiW1vE3ZFv202rnE4hluFFMwusGtpEIcrj7ajkdVZhEH+4MQKC+EoSsD0WoNNyooZ+3LM+idjdO0PKxNWEenQ3BJ1HqxXRK1PoCXRO0lUTtOyJdE7SVReziES6L2kqgdgu6SqL0kai+JWs8ILw9xhMZxeYgjMOLLQxz5OT/EwW3GJNqSLhBJ5VouaBTlqHeZaiYMAyAZtMYs9VyPWTTbox+AZAjlisNCveyRnzYBXffWsjiAwTHh1MIzqKlRyLKNz0eyA0FMDWhLawgSqNNzMaCYytoaRkW7MrFJVnKBGOAUbGAzu+YYzgrSSx2Y1YlSA2VCJGNSrWoD8qZWe8YdKVLm4TH1xcxTRxBTx9vDGIW3Ph8x9fzvmnCOu8Bv6UU5lTq6rXJLjrcWF/Zn2nVCJ97utoM8fHPIP4DmILpugNrhKEbg2vqlzxAL9T8CsRwT2H31AsE03JvLn98fiLJGqJj45XsQFAnIOi4WYCLQtnUQcQQYzSfQR8gRTPPNFhfMpPn7pA5f1Esu1btPRjN55btkyhaZqQTXFv6telxCzu4tg3z3K6XFjzB5pJvNDfgnY6p5x7LMshsvY/tr853vAGWOmkg+eZEhgdKbWmK3kBAq7kuiOMgY4Pfff/sFZxlKv1OTisJX6NQbQTWD1dxSVQ8GeUX7DPXWWw9WQR82ZCmmetD+28Rj+hD0BrvqDlno9r2mG7o6Nmox3C4/q87iXLMckGw/CSTDDqXgZP22tcjnP87qG765OKfv3fV2f6vm5eVx11NWXf0L9QpKGCV/0XUsd0NTi+JsTDlrvzU4+gLoyQy8dBxnzjyW5l8ZQ/jUJEBBM9ygZN3bROAn5HVog0oXcGQ1KRWl29qEtpI4PiFf8ZLL4L7VqGtwWcLBu4tGsliGrD66teaqp6M8ibuOpN0B23+qg9IqW6dPUF0IcpfyPVB1sJ9WGhANhxS62jkrGKwk/gWCvs7EXlLuZZ8imGaYhDn36dwHQ8CyhhsVg1dFOBJJlQ2VftcG4syZiSH/0/3P9v/YsUGUU3J4p35KceAHRe9BXfc+sWWsN6ciwwkcHlL1bDje0RkmR1Zu97d4GBKTh/3ROvVaNyGsxAIKxOqBBCGmiGPW0f12GkBD/SBHPApedwAZS3o6ghwJrSSnm16HVx9M5wQio/t84qO9jitUE4yy5gvo6YQ5oe7vFy9SzcWXHJgpL9OvEwOj0P/1qsWHemg/WI24pSTFKk9qqoauBSvRDdjAjKveOCV5JPSZhPM01eFd+/xlNG4H4VJTlVvdcRBPYOodXbELC5MNHTntfWZ0UnbJEWltEirTalFf8wIl4SnuV8xYGNumapINjQXLZ0PDwIrU+4xrdFCaTxtQBWRIKOs3vsMcOX9wO1cY63mQe3f4hKxy0Lsqx2UcF8sOLOtuwDY0bIaFA0y+CjBn8AG8MWwQRiOamAOGZtENg5dJglA7do6LRHHhfFNmbTQVklFN6ofvGFJD7avRYyOC0PPQNrj1PRA92GFUddRQOM9zd0X9B7iqNPVJkfm5NjB5LvPFmsaaA+BICEy2Y+dz3vApoWSDtyVTmWoLVSXFXHsFrh9aW3/tZjOYZSjDvHm4HUuIDoezl6KL1dlwOuRHn0ngdtJ0ySnayttkuTrDHyk2b6lXtMMud1e2Txargj/6TLhuftneLGt0nueiI6Krno7uQAau0XYBrm4ZJf9J11dh1xjzVUKJYDTLvC5dBMi/P1cHsJYRuL6SMdDVDbhSUdDVjYyDrv6dUIL+ceXXxpHB6jh1NJHZ8fpYmfN5ROjm1A82j4AgTUTZMe89HlNUtMZ1GgrVLvHzfIA7ZsakO8U3aRLUC9Buecj4zEhJTPq+M0adVMmiUB7w6Stm0Wegq4Ihzkvvw92xhKc7Oy8No6OlmGL+eAq4HzB/nAyWlmJFNyuJeUaov5fi943Ee3zSDqenkOny7sNkkZqeG6sh+Z7piE1fjc9OwueYrGPMkhunefF8VS7SL3K7JNflN3QDoN4AlJsEGXKydDKSUKfjwSJ8Rybm1bCeQUwI8aunIuz7ZKd7E9+Zo5eqeGlOoal2qeevqnuxV28mzuNLDfSj9kbU6LqfonMSf7NMtWoxPpcmu3PZmTazEzIrHNVhvfkkYn1hhnHMBSLiiWZlHsvvrMkCTbdOuTKaq7/8XhVLfv/SRVZ/aHiSRKAIs2vVDKsRMTy871J21XaPHYQu24ZJQpm6pCSoMycBt54yuEWrJIOBDigDuD9oIkARsTmqlj6BIUUsIb1MMojz2ZQzyeAZq+jyj9sO/dRDWE1h8CMmKUorYYRZmYLMldGaCSvivq7GrZZX/FUh5aYI+GlDdZqwyic08/pBkQCShJ/HjOtr+cdts0i+vYrO7g3NHeVihZtbtzlfPT6VJeFJ0uBuGSN9MY6xiTOOKxeb8U5PA6a51HNfXepZmruyi8Wxd3liopuWsanKLWdK7DUnvOLmw3vTRtssaZlWkemsyqpSkUeqyZyx5saFGi7QOqfyyoOau3v9j5erqjwe14uVUw7ARteqE9NcQtsigqoeUIZTdU+/Bgc6zoHqIvk1mqfQ4lCKmzLL9hW3Xmk69xzVedjfJRUwmmlxaMZ5xGi2qw73But/Kax9Fx6aUhqDQHPQp2UoBdc7yFK1QXGUfjesC+YUR/1woMF7QZ7uE4NZuCPUK0d+9QZ8kUP9Isf6RQ72S2D/8Az8iPHpc2jdTkPCgUWRYcSBoO2Isfuf4QhTmgOcxEp4GGovfmfsweDoyGfoLhEhJ3wAjzsiECMwA3dLq/Jm/H6W6Kv+wqQgtRpZRQx8+PgQXgKW5fHDbDEMxBYZhelqDTNIkkli/ZXCFPxo6FiFCjCdssSrgbVo2NoIogrDJ6mI7gsVQF8xkCHbFJ2o2Pzso9NdfhcscPOKStGQxvDgC250iTZlFs+xryhG8+y7hNCXrPEX8FqR2LpIcK2atup98MGMoOn9nSDUOBCe9aGOijZm9k9rx8+6pwc+X0iI4AXCjtbdg6EAXyz+6FPBOjqYWwmdOOT4SquZddFqoAP2PHSw0rwBwBp512badZpJdrOwL+7nHaAJe3sFo0+YYxoqaB1xuFRTqr0+F0XozEAd3aw89/5HBQaKiukeoPinewJznEAZMJvdzZxg+I+6zDnJGqus56S0/2801SfvKVKvCdeyUZ2zSAoMl/j+yMG093gl6smAWNqv3x9wXuqO865xu1HYqJloG426fUjQIVTNmlbBmyJ9c+B5+sM2tPePckRNUd/olp/NI+vHnTao7wbN9JSH0RPqLUjrJ97HwGXieeGpyabz+XUwvBkaMNK+pQwZkRNIgk9iHaCEhIZKpQYCHQhSqwNKAyxnqpmaT4nO7XV9U6vkH7Dd4Vonu+CVCeP+4WGYKJ4pe8Rkyz2u4uuSyJ96oCY8HyCZAm61rxDWlWH9ufsuSUpGIMDJ2gP4F2UnQ6S4eXEdumaRPJXDerMXc9DbUnEqzbzcNhGfoPJVmvUR7SIMznIV/oQzZBxTVUHZU1QKRh0jv1IR1aXDvTLy9GoBr108ypsKSsYdROHthl0PgScwQ+kqVNrvDqRALGm/YzVyKEtNRJpZugGmwEJFi8EJxoSmHYXgU6Y4oDsgqvv7WU9WYBCOZWWoOzaJAeYnhtAQMKF+3XHRaP91AJxvQI3NKP4vAAD//w1E8rA=" } diff --git a/metricbeat/module/kubernetes/state_deployment/_meta/data.json b/metricbeat/module/kubernetes/state_deployment/_meta/data.json index 40d0ae89c35..56ac481dc35 100644 --- a/metricbeat/module/kubernetes/state_deployment/_meta/data.json +++ b/metricbeat/module/kubernetes/state_deployment/_meta/data.json @@ -14,6 +14,10 @@ "desired": 1, "unavailable": 0, "updated": 1 + }, + "status": { + "available": "true", + "progressing": "true" } }, "namespace": "kube-system" diff --git a/metricbeat/module/kubernetes/state_deployment/_meta/fields.yml b/metricbeat/module/kubernetes/state_deployment/_meta/fields.yml index cc89c601abe..98f341b0bfc 100644 --- a/metricbeat/module/kubernetes/state_deployment/_meta/fields.yml +++ b/metricbeat/module/kubernetes/state_deployment/_meta/fields.yml @@ -8,6 +8,16 @@ type: boolean description: > Kubernetes deployment paused status + - name: status + type: group + fields: + - name: available + type: keyword + description: | + Deployment Available Condition status (true, false or unknown) + - name: progressing + type: keyword + description: Deployment Progresing Condition status (true, false or unknown) - name: replicas type: group description: > diff --git a/metricbeat/module/kubernetes/state_deployment/_meta/test/ksm.v2.4.2.plain.expected b/metricbeat/module/kubernetes/state_deployment/_meta/test/ksm.v2.4.2.plain.expected index 8d8cfcb2927..dc5ae410bda 100644 --- a/metricbeat/module/kubernetes/state_deployment/_meta/test/ksm.v2.4.2.plain.expected +++ b/metricbeat/module/kubernetes/state_deployment/_meta/test/ksm.v2.4.2.plain.expected @@ -7,6 +7,10 @@ "MetricSetFields": { "name": "kube-state-metrics", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 1, "desired": 1, @@ -33,6 +37,10 @@ "MetricSetFields": { "name": "local-path-provisioner", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 1, "desired": 1, @@ -59,6 +67,10 @@ "MetricSetFields": { "name": "coredns", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 2, "desired": 2, @@ -77,4 +89,4 @@ "Period": 0, "DisableTimeSeries": false } -] \ No newline at end of file +] diff --git a/metricbeat/module/kubernetes/state_deployment/_meta/test/ksm.v2.5.0.plain.expected b/metricbeat/module/kubernetes/state_deployment/_meta/test/ksm.v2.5.0.plain.expected index 8d8cfcb2927..dc5ae410bda 100644 --- a/metricbeat/module/kubernetes/state_deployment/_meta/test/ksm.v2.5.0.plain.expected +++ b/metricbeat/module/kubernetes/state_deployment/_meta/test/ksm.v2.5.0.plain.expected @@ -7,6 +7,10 @@ "MetricSetFields": { "name": "kube-state-metrics", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 1, "desired": 1, @@ -33,6 +37,10 @@ "MetricSetFields": { "name": "local-path-provisioner", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 1, "desired": 1, @@ -59,6 +67,10 @@ "MetricSetFields": { "name": "coredns", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 2, "desired": 2, @@ -77,4 +89,4 @@ "Period": 0, "DisableTimeSeries": false } -] \ No newline at end of file +] diff --git a/metricbeat/module/kubernetes/state_deployment/_meta/test/ksm.v2.6.0.plain.expected b/metricbeat/module/kubernetes/state_deployment/_meta/test/ksm.v2.6.0.plain.expected index 8d8cfcb2927..dc5ae410bda 100644 --- a/metricbeat/module/kubernetes/state_deployment/_meta/test/ksm.v2.6.0.plain.expected +++ b/metricbeat/module/kubernetes/state_deployment/_meta/test/ksm.v2.6.0.plain.expected @@ -7,6 +7,10 @@ "MetricSetFields": { "name": "kube-state-metrics", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 1, "desired": 1, @@ -33,6 +37,10 @@ "MetricSetFields": { "name": "local-path-provisioner", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 1, "desired": 1, @@ -59,6 +67,10 @@ "MetricSetFields": { "name": "coredns", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 2, "desired": 2, @@ -77,4 +89,4 @@ "Period": 0, "DisableTimeSeries": false } -] \ No newline at end of file +] diff --git a/metricbeat/module/kubernetes/state_deployment/_meta/test/ksm.v2.7.0.plain.expected b/metricbeat/module/kubernetes/state_deployment/_meta/test/ksm.v2.7.0.plain.expected index 8d8cfcb2927..dc5ae410bda 100644 --- a/metricbeat/module/kubernetes/state_deployment/_meta/test/ksm.v2.7.0.plain.expected +++ b/metricbeat/module/kubernetes/state_deployment/_meta/test/ksm.v2.7.0.plain.expected @@ -7,6 +7,10 @@ "MetricSetFields": { "name": "kube-state-metrics", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 1, "desired": 1, @@ -33,6 +37,10 @@ "MetricSetFields": { "name": "local-path-provisioner", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 1, "desired": 1, @@ -59,6 +67,10 @@ "MetricSetFields": { "name": "coredns", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 2, "desired": 2, @@ -77,4 +89,4 @@ "Period": 0, "DisableTimeSeries": false } -] \ No newline at end of file +] diff --git a/metricbeat/module/kubernetes/state_deployment/_meta/testdata/docs.plain-expected.json b/metricbeat/module/kubernetes/state_deployment/_meta/testdata/docs.plain-expected.json index 57c8514aa53..b9ea5131f04 100644 --- a/metricbeat/module/kubernetes/state_deployment/_meta/testdata/docs.plain-expected.json +++ b/metricbeat/module/kubernetes/state_deployment/_meta/testdata/docs.plain-expected.json @@ -9,6 +9,10 @@ "deployment": { "name": "kube-state-metrics", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 1, "desired": 1, @@ -37,6 +41,10 @@ "deployment": { "name": "local-path-provisioner", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 1, "desired": 1, @@ -65,6 +73,10 @@ "deployment": { "name": "coredns", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 2, "desired": 2, diff --git a/metricbeat/module/kubernetes/state_deployment/_meta/testdata/ksm.v2.4.2.plain-expected.json b/metricbeat/module/kubernetes/state_deployment/_meta/testdata/ksm.v2.4.2.plain-expected.json index 57c8514aa53..b9ea5131f04 100644 --- a/metricbeat/module/kubernetes/state_deployment/_meta/testdata/ksm.v2.4.2.plain-expected.json +++ b/metricbeat/module/kubernetes/state_deployment/_meta/testdata/ksm.v2.4.2.plain-expected.json @@ -9,6 +9,10 @@ "deployment": { "name": "kube-state-metrics", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 1, "desired": 1, @@ -37,6 +41,10 @@ "deployment": { "name": "local-path-provisioner", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 1, "desired": 1, @@ -65,6 +73,10 @@ "deployment": { "name": "coredns", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 2, "desired": 2, diff --git a/metricbeat/module/kubernetes/state_deployment/_meta/testdata/ksm.v2.5.0.plain-expected.json b/metricbeat/module/kubernetes/state_deployment/_meta/testdata/ksm.v2.5.0.plain-expected.json index 57c8514aa53..b9ea5131f04 100644 --- a/metricbeat/module/kubernetes/state_deployment/_meta/testdata/ksm.v2.5.0.plain-expected.json +++ b/metricbeat/module/kubernetes/state_deployment/_meta/testdata/ksm.v2.5.0.plain-expected.json @@ -9,6 +9,10 @@ "deployment": { "name": "kube-state-metrics", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 1, "desired": 1, @@ -37,6 +41,10 @@ "deployment": { "name": "local-path-provisioner", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 1, "desired": 1, @@ -65,6 +73,10 @@ "deployment": { "name": "coredns", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 2, "desired": 2, diff --git a/metricbeat/module/kubernetes/state_deployment/_meta/testdata/ksm.v2.6.0.plain-expected.json b/metricbeat/module/kubernetes/state_deployment/_meta/testdata/ksm.v2.6.0.plain-expected.json index 57c8514aa53..b9ea5131f04 100644 --- a/metricbeat/module/kubernetes/state_deployment/_meta/testdata/ksm.v2.6.0.plain-expected.json +++ b/metricbeat/module/kubernetes/state_deployment/_meta/testdata/ksm.v2.6.0.plain-expected.json @@ -9,6 +9,10 @@ "deployment": { "name": "kube-state-metrics", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 1, "desired": 1, @@ -37,6 +41,10 @@ "deployment": { "name": "local-path-provisioner", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 1, "desired": 1, @@ -65,6 +73,10 @@ "deployment": { "name": "coredns", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 2, "desired": 2, diff --git a/metricbeat/module/kubernetes/state_deployment/_meta/testdata/ksm.v2.7.0.plain-expected.json b/metricbeat/module/kubernetes/state_deployment/_meta/testdata/ksm.v2.7.0.plain-expected.json index 57c8514aa53..b9ea5131f04 100644 --- a/metricbeat/module/kubernetes/state_deployment/_meta/testdata/ksm.v2.7.0.plain-expected.json +++ b/metricbeat/module/kubernetes/state_deployment/_meta/testdata/ksm.v2.7.0.plain-expected.json @@ -9,6 +9,10 @@ "deployment": { "name": "kube-state-metrics", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 1, "desired": 1, @@ -37,6 +41,10 @@ "deployment": { "name": "local-path-provisioner", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 1, "desired": 1, @@ -65,6 +73,10 @@ "deployment": { "name": "coredns", "paused": false, + "status": { + "available": "true", + "progressing": "true" + }, "replicas": { "available": 2, "desired": 2, diff --git a/metricbeat/module/kubernetes/state_deployment/state_deployment.go b/metricbeat/module/kubernetes/state_deployment/state_deployment.go index 1e7129719b4..31bb53f9e33 100644 --- a/metricbeat/module/kubernetes/state_deployment/state_deployment.go +++ b/metricbeat/module/kubernetes/state_deployment/state_deployment.go @@ -32,7 +32,23 @@ var mapping = &p.MetricsMapping{ "kube_deployment_status_replicas_unavailable": p.Metric("replicas.unavailable"), "kube_deployment_status_replicas_available": p.Metric("replicas.available"), "kube_deployment_spec_replicas": p.Metric("replicas.desired"), - "kube_deployment_spec_paused": p.BooleanMetric("paused"), + /* + This is how deployment_status_condition field will be exported: + + kube_deployment_status_condition{namespace="default",deployment="test-deployment",condition="Available",status="true"} 0 + kube_deployment_status_condition{namespace="default",deployment="test-deployment",condition="Available",status="false"} 1 + kube_deployment_status_condition{namespace="default",deployment="test-deployment",condition="Available",status="unknown"} 0 + kube_deployment_status_condition{namespace="default",deployment="test-deployment",condition="Progressing",status="true"} 1 + kube_deployment_status_condition{namespace="default",deployment="test-deployment",condition="Progressing",status="false"} 0 + kube_deployment_status_condition{namespace="default",deployment="test-deployment",condition="Progressing",status="unknown"} 0 + */ + "kube_deployment_status_condition": p.LabelMetric("status", "status", p.OpFilterMap( + "condition", map[string]string{ + "Progressing": "progressing", + "Available": "available", + }, + )), //The current status conditions of a deployment + "kube_deployment_spec_paused": p.BooleanMetric("paused"), }, Labels: map[string]p.LabelMap{ diff --git a/metricbeat/module/kvm/dommemstat/dommemstat.go b/metricbeat/module/kvm/dommemstat/dommemstat.go index 2714204d0af..f8585deb533 100644 --- a/metricbeat/module/kvm/dommemstat/dommemstat.go +++ b/metricbeat/module/kvm/dommemstat/dommemstat.go @@ -18,6 +18,8 @@ package dommemstat import ( + "errors" + "fmt" "net" "net/url" "time" @@ -25,8 +27,6 @@ import ( "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/elastic-agent-libs/mapstr" - "github.com/pkg/errors" - "github.com/digitalocean/go-libvirt" "github.com/digitalocean/go-libvirt/libvirttest" @@ -101,7 +101,7 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { c, err = net.DialTimeout(u.Scheme, address, m.Timeout) if err != nil { - return errors.Wrapf(err, "cannot connect to %v", u) + return fmt.Errorf("cannot connect to %v: %w", u, err) } } @@ -109,11 +109,11 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { l := libvirt.New(c) if err = l.Connect(); err != nil { - return errors.Wrap(err, "error connecting to libvirtd") + return fmt.Errorf("error connecting to libvirtd: %w", err) } defer func() { if err = l.Disconnect(); err != nil { - msg := errors.Wrap(err, "failed to disconnect") + msg := fmt.Errorf("failed to disconnect: %w", err) report.Error(msg) m.Logger().Error(msg) } @@ -121,20 +121,20 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { domains, err := l.Domains() if err != nil { - return errors.Wrap(err, "error listing domains") + return fmt.Errorf("error listing domains: %w", err) } for _, d := range domains { gotDomainMemoryStats, err := l.DomainMemoryStats(d, maximumStats, flags) if err != nil { - msg := errors.Wrapf(err, "error fetching memory stats for domain %s", d.Name) + msg := fmt.Errorf("error fetching memory stats for domain %s: %w", d.Name, err) report.Error(msg) m.Logger().Error(msg) continue } if len(gotDomainMemoryStats) == 0 { - msg := errors.Errorf("no memory stats for domain %s", d.Name) + msg := fmt.Errorf("no memory stats for domain %s", d.Name) report.Error(msg) m.Logger().Error(msg) continue diff --git a/metricbeat/module/kvm/status/status.go b/metricbeat/module/kvm/status/status.go index fdbb7a22b7a..320fe4a5df0 100644 --- a/metricbeat/module/kvm/status/status.go +++ b/metricbeat/module/kvm/status/status.go @@ -18,12 +18,11 @@ package status import ( + "fmt" "net" "net/url" "time" - "github.com/pkg/errors" - "github.com/digitalocean/go-libvirt" "github.com/digitalocean/go-libvirt/libvirttest" @@ -90,7 +89,7 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { c, err = net.DialTimeout(u.Scheme, address, m.Timeout) if err != nil { - return errors.Wrapf(err, "cannot connect to %v", u) + return fmt.Errorf("cannot connect to %v: %w", u, err) } } @@ -98,11 +97,11 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { l := libvirt.New(c) if err = l.Connect(); err != nil { - return errors.Wrap(err, "error connecting to libvirtd") + return fmt.Errorf("error connecting to libvirtd: %w", err) } defer func() { if err = l.Disconnect(); err != nil { - msg := errors.Wrap(err, "failed to disconnect") + msg := fmt.Errorf("failed to disconnect: %w", err) report.Error(msg) m.Logger().Error(msg) } @@ -110,7 +109,7 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { domains, err := l.Domains() if err != nil { - return errors.Wrap(err, "error listing domains") + return fmt.Errorf("error listing domains: %w", err) } for _, d := range domains { diff --git a/metricbeat/module/linux/conntrack/conntrack.go b/metricbeat/module/linux/conntrack/conntrack.go index 3970ff689c8..602a786b574 100644 --- a/metricbeat/module/linux/conntrack/conntrack.go +++ b/metricbeat/module/linux/conntrack/conntrack.go @@ -18,7 +18,8 @@ package conntrack import ( - "github.com/pkg/errors" + "fmt" + "github.com/prometheus/procfs" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" @@ -63,11 +64,11 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(report mb.ReporterV2) error { newFS, err := procfs.NewFS(m.mod.ResolveHostFS("/proc")) if err != nil { - return errors.Wrapf(err, "error creating new Host FS at %s", m.mod.ResolveHostFS("/proc")) + return fmt.Errorf("error creating new Host FS at %s: %w", m.mod.ResolveHostFS("/proc"), err) } conntrackStats, err := newFS.ConntrackStat() if err != nil { - return errors.Wrap(err, "error fetching conntrack stats") + return fmt.Errorf("error fetching conntrack stats: %w", err) } summedEvents := procfs.ConntrackStatEntry{} diff --git a/metricbeat/module/linux/iostat/iostat.go b/metricbeat/module/linux/iostat/iostat.go index f7143a37243..14604f8aea4 100644 --- a/metricbeat/module/linux/iostat/iostat.go +++ b/metricbeat/module/linux/iostat/iostat.go @@ -20,7 +20,7 @@ package iostat import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/metricbeat/mb" @@ -71,7 +71,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(report mb.ReporterV2) error { IOstats, err := diskio.IOCounters(m.includeDevices...) if err != nil { - return errors.Wrap(err, "disk io counters") + return fmt.Errorf("disk io counters: %w", err) } // Sample the current cpu counter @@ -89,7 +89,7 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { } result, err := m.stats.CalcIOStatistics(counters) if err != nil { - return errors.Wrap(err, "error calculating iostat") + return fmt.Errorf("error calculating iostat: %w", err) } IOstats := AddLinuxIOStat(result) event.DeepUpdate(IOstats) diff --git a/metricbeat/module/linux/ksm/data.go b/metricbeat/module/linux/ksm/data.go index 3ceb42c5231..94055224ca0 100644 --- a/metricbeat/module/linux/ksm/data.go +++ b/metricbeat/module/linux/ksm/data.go @@ -18,10 +18,9 @@ package ksm import ( + "fmt" "path/filepath" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/module/linux" ) @@ -40,37 +39,37 @@ func fetchKSMStats(ksmPath string) (ksmData, error) { // ReadIntFromFile returns pretty verbose error strings, so omit errors.Wrap here pshared, err := linux.ReadIntFromFile(filepath.Join(ksmPath, "pages_shared"), 10) if err != nil { - return ksmData{}, errors.Wrap(err, "error reading from pages_shared") + return ksmData{}, fmt.Errorf("error reading from pages_shared: %w", err) } pSharing, err := linux.ReadIntFromFile(filepath.Join(ksmPath, "pages_sharing"), 10) if err != nil { - return ksmData{}, errors.Wrap(err, "error reading from pages_sharing") + return ksmData{}, fmt.Errorf("error reading from pages_sharing: %w", err) } pUnshared, err := linux.ReadIntFromFile(filepath.Join(ksmPath, "pages_unshared"), 10) if err != nil { - return ksmData{}, errors.Wrap(err, "error reading from pages_unshared") + return ksmData{}, fmt.Errorf("error reading from pages_unshared: %w", err) } pVolatile, err := linux.ReadIntFromFile(filepath.Join(ksmPath, "pages_volatile"), 10) if err != nil { - return ksmData{}, errors.Wrap(err, "error reading from pages_volatile") + return ksmData{}, fmt.Errorf("error reading from pages_volatile: %w", err) } fScans, err := linux.ReadIntFromFile(filepath.Join(ksmPath, "full_scans"), 10) if err != nil { - return ksmData{}, errors.Wrap(err, "error reading from full_scans") + return ksmData{}, fmt.Errorf("error reading from full_scans: %w", err) } stableChains, err := linux.ReadIntFromFile(filepath.Join(ksmPath, "stable_node_chains"), 10) if err != nil { - return ksmData{}, errors.Wrap(err, "error reading from stable_node_chains") + return ksmData{}, fmt.Errorf("error reading from stable_node_chains: %w", err) } stableDups, err := linux.ReadIntFromFile(filepath.Join(ksmPath, "stable_node_dups"), 10) if err != nil { - return ksmData{}, errors.Wrap(err, "error reading from stable_node_dups ") + return ksmData{}, fmt.Errorf("error reading from stable_node_dups : %w", err) } return ksmData{PagesShared: pshared, PagesSharing: pSharing, PagesUnshared: pUnshared, diff --git a/metricbeat/module/linux/ksm/ksm.go b/metricbeat/module/linux/ksm/ksm.go index abbd899cd8a..1bf344be184 100644 --- a/metricbeat/module/linux/ksm/ksm.go +++ b/metricbeat/module/linux/ksm/ksm.go @@ -18,7 +18,7 @@ package ksm import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/metricbeat/mb" @@ -62,7 +62,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(report mb.ReporterV2) error { ksmData, err := fetchKSMStats(m.mod.ResolveHostFS("/sys/kernel/mm/ksm")) if err != nil { - return errors.Wrap(err, "error fetching KSM stats") + return fmt.Errorf("error fetching KSM stats: %w", err) } report.Event(mb.Event{ diff --git a/metricbeat/module/linux/memory/data.go b/metricbeat/module/linux/memory/data.go index de41ed5df00..dfc5f3276ae 100644 --- a/metricbeat/module/linux/memory/data.go +++ b/metricbeat/module/linux/memory/data.go @@ -23,8 +23,6 @@ import ( "strconv" "strings" - "github.com/pkg/errors" - "github.com/elastic/elastic-agent-libs/mapstr" "github.com/elastic/elastic-agent-libs/transform/typeconv" util "github.com/elastic/elastic-agent-system-metrics/metric" @@ -37,7 +35,7 @@ import ( func FetchLinuxMemStats(baseMap mapstr.M, hostfs resolve.Resolver) error { vmstat, err := GetVMStat(hostfs) if err != nil { - return errors.Wrap(err, "error fetching VMStats") + return fmt.Errorf("error fetching VMStats: %w", err) } pageStats := mapstr.M{} @@ -55,7 +53,7 @@ func FetchLinuxMemStats(baseMap mapstr.M, hostfs resolve.Resolver) error { thp, err := getHugePages(hostfs) if err != nil { - return errors.Wrap(err, "error getting huge pages") + return fmt.Errorf("error getting huge pages: %w", err) } baseMap["hugepages"] = thp @@ -71,12 +69,12 @@ func FetchLinuxMemStats(baseMap mapstr.M, hostfs resolve.Resolver) error { // This way very similar metrics aren't split across different modules, even though Linux reports them in different places. eventRaw, err := metrics.Get(hostfs) if err != nil { - return errors.Wrap(err, "error fetching memory metrics") + return fmt.Errorf("error fetching memory metrics: %w", err) } swap := mapstr.M{} err = typeconv.Convert(&swap, &eventRaw.Swap) if err != nil { - return errors.Wrap(err, "error converting raw event") + return fmt.Errorf("error converting raw event: %w", err) } baseMap["swap"] = swap @@ -122,7 +120,7 @@ func getHugePages(hostfs resolve.Resolver) (mapstr.M, error) { // see https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt table, err := memory.ParseMeminfo(hostfs) if err != nil { - return nil, errors.Wrap(err, "error parsing meminfo") + return nil, fmt.Errorf("error parsing meminfo: %w", err) } thp := mapstr.M{} @@ -168,7 +166,7 @@ func GetVMStat(hostfs resolve.Resolver) (map[string]uint64, error) { vmstatFile := hostfs.ResolveHostFS("proc/vmstat") content, err := os.ReadFile(vmstatFile) if err != nil { - return nil, errors.Wrapf(err, "error reading vmstat from %s", vmstatFile) + return nil, fmt.Errorf("error reading vmstat from %s: %w", vmstatFile, err) } // I'm not a fan of throwing stuff directly to maps, but this is a huge amount of kernel/config specific metrics, and we're the only consumer of this for now. @@ -181,7 +179,7 @@ func GetVMStat(hostfs resolve.Resolver) (map[string]uint64, error) { num, err := strconv.ParseUint(string(parts[1]), 10, 64) if err != nil { - return nil, errors.Wrapf(err, "failed to parse value %s", parts[1]) + return nil, fmt.Errorf("failed to parse value %s: %w", parts[1], err) } vmstat[parts[0]] = num diff --git a/metricbeat/module/linux/memory/memory.go b/metricbeat/module/linux/memory/memory.go index e74b8b36e60..01eef01cb4b 100644 --- a/metricbeat/module/linux/memory/memory.go +++ b/metricbeat/module/linux/memory/memory.go @@ -18,7 +18,7 @@ package memory import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/metricbeat/mb" @@ -61,7 +61,7 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { rootEvent := mapstr.M{} err := FetchLinuxMemStats(rootEvent, m.mod) if err != nil { - return errors.Wrap(err, "error fetching memory stats") + return fmt.Errorf("error fetching memory stats: %w", err) } report.Event(mb.Event{ MetricSetFields: rootEvent, diff --git a/metricbeat/module/linux/pageinfo/pageinfo.go b/metricbeat/module/linux/pageinfo/pageinfo.go index d32243812f2..fc1be3fca35 100644 --- a/metricbeat/module/linux/pageinfo/pageinfo.go +++ b/metricbeat/module/linux/pageinfo/pageinfo.go @@ -19,10 +19,9 @@ package pageinfo import ( "bufio" + "fmt" "os" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/elastic-agent-libs/mapstr" @@ -67,7 +66,7 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { fd, err := os.Open(pagePath) if err != nil { - return errors.Wrap(err, "error opening file") + return fmt.Errorf("error opening file: %w", err) } defer fd.Close() @@ -75,7 +74,7 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { zones, err := readPageFile(reader) if err != nil { - return errors.Wrap(err, "error reading pagetypeinfo") + return fmt.Errorf("error reading pagetypeinfo: %w", err) } report.Event(mb.Event{ diff --git a/metricbeat/module/linux/pageinfo/reader.go b/metricbeat/module/linux/pageinfo/reader.go index 9af65dfb1f7..39a5062f6d0 100644 --- a/metricbeat/module/linux/pageinfo/reader.go +++ b/metricbeat/module/linux/pageinfo/reader.go @@ -19,11 +19,10 @@ package pageinfo import ( "bufio" + "fmt" "io" "regexp" "strconv" - - "github.com/pkg/errors" ) // zones represents raw pagetypeinfo data @@ -83,7 +82,7 @@ func readPageFile(reader *bufio.Reader) (pageInfo, error) { match := matches[0] nodeLevel, err = strconv.ParseInt(string(match[1]), 10, 64) if err != nil { - return pageInfo{}, errors.Wrapf(err, "error parsing node number: %s", string(match[1])) + return pageInfo{}, fmt.Errorf("error parsing node number: %s: %w", string(match[1]), err) } if nodes[nodeLevel].DMA == nil { nodes[nodeLevel] = zones{ @@ -100,7 +99,7 @@ func readPageFile(reader *bufio.Reader) (pageInfo, error) { for order, count := range match[4:] { zoneOrders[order], err = strconv.ParseInt(string(count), 10, 64) if err != nil { - return pageInfo{}, errors.Wrapf(err, "error parsing zone: %s", string(count)) + return pageInfo{}, fmt.Errorf("error parsing zone: %s: %w", string(count), err) } nodes[nodeLevel].OrderSummary[order] += zoneOrders[order] if zoneType == "DMA" { diff --git a/metricbeat/module/linux/pressure/pressure.go b/metricbeat/module/linux/pressure/pressure.go index 529fb69ed33..292dcd194a7 100644 --- a/metricbeat/module/linux/pressure/pressure.go +++ b/metricbeat/module/linux/pressure/pressure.go @@ -21,7 +21,6 @@ import ( "fmt" "runtime" - "github.com/pkg/errors" "github.com/prometheus/procfs" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" @@ -75,7 +74,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(report mb.ReporterV2) error { events, err := fetchLinuxPSIStats(m) if err != nil { - return errors.Wrap(err, "error fetching PSI stats") + return fmt.Errorf("error fetching PSI stats: %w", err) } for _, event := range events { @@ -92,13 +91,13 @@ func fetchLinuxPSIStats(m *MetricSet) ([]mapstr.M, error) { procfs, err := procfs.NewFS(m.mod.ResolveHostFS("/proc")) if err != nil { - return nil, errors.Wrapf(err, "error creating new Host FS at %s", m.mod.ResolveHostFS("/proc")) + return nil, fmt.Errorf("error creating new Host FS at %s: %w", m.mod.ResolveHostFS("/proc"), err) } for _, resource := range resources { psiMetric, err := procfs.PSIStatsForResource(resource) if err != nil { - return nil, errors.Wrap(err, "check that /proc/pressure is available, and/or enabled") + return nil, fmt.Errorf("check that /proc/pressure is available, and/or enabled: %w", err) } event := mapstr.M{ diff --git a/metricbeat/module/linux/rapl/rapl.go b/metricbeat/module/linux/rapl/rapl.go index b0181f399bd..5dd823efc54 100644 --- a/metricbeat/module/linux/rapl/rapl.go +++ b/metricbeat/module/linux/rapl/rapl.go @@ -20,6 +20,7 @@ package rapl import ( + "errors" "fmt" "io/ioutil" "os" @@ -31,7 +32,6 @@ import ( "time" "github.com/fearful-symmetry/gorapl/rapl" - "github.com/pkg/errors" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" @@ -86,7 +86,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { sys := base.Module().(resolve.Resolver) CPUList, err := getMSRCPUs(sys) if err != nil { - return nil, errors.Wrap(err, "error getting list of CPUs to query") + return nil, fmt.Errorf("error getting list of CPUs to query: %w", err) } // check to see if msr-safe is installed @@ -97,12 +97,12 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, errors.New("no msr_safe device found. Is the kernel module loaded?") } if err != nil { - return nil, errors.Wrapf(err, "could not check msr_safe device at %s", queryPath) + return nil, fmt.Errorf("could not check msr_safe device at %s: %w", queryPath, err) } } else { user, err := user.Current() if err != nil { - return nil, errors.Wrap(err, "error fetching user list") + return nil, fmt.Errorf("error fetching user list: %w", err) } if user.Uid != "0" { return nil, errors.New("linux/rapl must run as root if not using msr-safe") @@ -119,7 +119,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { } handler, err := rapl.CreateNewHandler(cpu, formatPath) if err != nil { - return nil, errors.Wrapf(err, "error creating handler at path %s for CPU %d", formatPath, cpu) + return nil, fmt.Errorf("error creating handler at path %s for CPU %d: %w", formatPath, cpu, err) } handlers[cpu] = handler @@ -207,7 +207,7 @@ func (m *MetricSet) updatePower() map[int]map[rapl.RAPLDomain]energyUsage { func getMSRCPUs(hostfs resolve.Resolver) ([]int, error) { CPUs, err := topoPkgCPUMap(hostfs) if err != nil { - return nil, errors.Wrap(err, "error fetching CPU topology") + return nil, fmt.Errorf("error fetching CPU topology: %w", err) } coreList := []int{} for _, cores := range CPUs { @@ -244,16 +244,16 @@ func topoPkgCPUMap(hostfs resolve.Resolver) (map[int][]int, error) { fullPkg := hostfs.ResolveHostFS(filepath.Join(sysdir, file.Name(), "/topology/physical_package_id")) dat, err := ioutil.ReadFile(fullPkg) if err != nil { - return nil, errors.Wrapf(err, "error reading file %s", fullPkg) + return nil, fmt.Errorf("error reading file %s: %w", fullPkg, err) } phys, err := strconv.ParseInt(strings.TrimSpace(string(dat)), 10, 64) if err != nil { - return nil, errors.Wrapf(err, "error parsing value from %s", fullPkg) + return nil, fmt.Errorf("error parsing value from %s: %w", fullPkg, err) } var cpuCore int _, err = fmt.Sscanf(file.Name(), "cpu%d", &cpuCore) if err != nil { - return nil, errors.Wrapf(err, "error fetching CPU core value from string %s", file.Name()) + return nil, fmt.Errorf("error fetching CPU core value from string %s: %w", file.Name(), err) } pkgList, ok := cpuMap[int(phys)] if !ok { diff --git a/metricbeat/module/linux/util.go b/metricbeat/module/linux/util.go index 87b9bcd0512..def4603c816 100644 --- a/metricbeat/module/linux/util.go +++ b/metricbeat/module/linux/util.go @@ -18,11 +18,10 @@ package linux import ( + "fmt" "io/ioutil" "strconv" "strings" - - "github.com/pkg/errors" ) // ReadIntFromFile reads a single int value from a path and returns an int64. @@ -31,14 +30,14 @@ func ReadIntFromFile(path string, base int) (int64, error) { raw, err := ioutil.ReadFile(path) if err != nil { - return 0, errors.Wrapf(err, "error reading file %s", path) + return 0, fmt.Errorf("error reading file %s: %w", path, err) } clean := strings.TrimSpace(string(raw)) intval, err := strconv.ParseInt(clean, 10, 64) if err != nil { - return 0, errors.Wrapf(err, "error converting string: %s", clean) + return 0, fmt.Errorf("error converting string: %s: %w", clean, err) } return intval, nil diff --git a/metricbeat/module/logstash/logstash.go b/metricbeat/module/logstash/logstash.go index 8623bcd9a80..6e796633612 100644 --- a/metricbeat/module/logstash/logstash.go +++ b/metricbeat/module/logstash/logstash.go @@ -22,8 +22,6 @@ import ( "fmt" "net/url" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/helper/elastic" "github.com/elastic/beats/v7/metricbeat/mb" @@ -109,7 +107,7 @@ func NewMetricSet(base mb.BaseMetricSet) (*MetricSet, error) { func GetPipelines(m *MetricSet) ([]PipelineState, string, error) { content, err := fetchPath(m.HTTP, "_node/pipelines", "graph=true") if err != nil { - return nil, "", errors.Wrap(err, "could not fetch node pipelines") + return nil, "", fmt.Errorf("could not fetch node pipelines: %w", err) } pipelinesResponse := struct { @@ -121,7 +119,7 @@ func GetPipelines(m *MetricSet) ([]PipelineState, string, error) { err = json.Unmarshal(content, &pipelinesResponse) if err != nil { - return nil, "", errors.Wrap(err, "could not parse node pipelines response") + return nil, "", fmt.Errorf("could not parse node pipelines response: %w", err) } var pipelines []PipelineState diff --git a/metricbeat/module/logstash/node/data.go b/metricbeat/module/logstash/node/data.go index 0ccc42ef7df..49c17b73275 100644 --- a/metricbeat/module/logstash/node/data.go +++ b/metricbeat/module/logstash/node/data.go @@ -19,8 +19,7 @@ package node import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/libbeat/common" s "github.com/elastic/beats/v7/libbeat/common/schema" @@ -86,7 +85,7 @@ func eventMapping(r mb.ReporterV2, content []byte, pipelines []logstash.Pipeline var data map[string]interface{} err := json.Unmarshal(content, &data) if err != nil { - return errors.Wrap(err, "failure parsing Logstash Node API response") + return fmt.Errorf("failure parsing Logstash Node API response: %w", err) } pipelines = getUserDefinedPipelines(pipelines) @@ -96,7 +95,7 @@ func eventMapping(r mb.ReporterV2, content []byte, pipelines []logstash.Pipeline for _, pipeline := range pipelines { fields, err := schema.Apply(data) if err != nil { - return errors.Wrap(err, "failure applying node schema") + return fmt.Errorf("failure applying node schema: %w", err) } removeClusterUUIDsFromPipeline(pipeline) diff --git a/metricbeat/module/memcached/stats/stats.go b/metricbeat/module/memcached/stats/stats.go index 6e9ebb66d3f..890a8c701f7 100644 --- a/metricbeat/module/memcached/stats/stats.go +++ b/metricbeat/module/memcached/stats/stats.go @@ -19,12 +19,11 @@ package stats import ( "bufio" + "fmt" "net" "net/url" "strings" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" ) @@ -54,18 +53,18 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { network, address, err := getNetworkAndAddress(m.HostData()) if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } conn, err := net.DialTimeout(network, address, m.Module().Config().Timeout) if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } defer conn.Close() _, err = conn.Write([]byte("stats\n")) if err != nil { - return errors.Wrap(err, "error in connection") + return fmt.Errorf("error in connection: %w", err) } scanner := bufio.NewScanner(conn) @@ -95,7 +94,7 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { func getNetworkAndAddress(hostData mb.HostData) (network string, address string, err error) { u, err := url.Parse(hostData.URI) if err != nil { - err = errors.Wrap(err, "invalid URL") + err = fmt.Errorf("invalid URL: %w", err) return } diff --git a/metricbeat/module/munin/munin.go b/metricbeat/module/munin/munin.go index fe2b981d6ba..4438f510d4c 100644 --- a/metricbeat/module/munin/munin.go +++ b/metricbeat/module/munin/munin.go @@ -19,6 +19,8 @@ package munin import ( "bufio" + "errors" + "fmt" "io" "net" "regexp" @@ -26,8 +28,6 @@ import ( "strings" "time" - "github.com/pkg/errors" - "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -88,7 +88,7 @@ func (n *Node) List() ([]string, error) { func (n *Node) Fetch(plugin string, sanitize bool) (mapstr.M, error) { _, err := io.WriteString(n.writer, "fetch "+plugin+"\n") if err != nil { - return nil, errors.Wrapf(err, "failed to fetch metrics for plugin '%s'", plugin) + return nil, fmt.Errorf("failed to fetch metrics for plugin '%s': %w", plugin, err) } event := mapstr.M{} diff --git a/metricbeat/module/munin/node/node.go b/metricbeat/module/munin/node/node.go index a0668a98681..13c3362e863 100644 --- a/metricbeat/module/munin/node/node.go +++ b/metricbeat/module/munin/node/node.go @@ -18,10 +18,10 @@ package node import ( + "errors" + "fmt" "time" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/munin" "github.com/elastic/elastic-agent-libs/mapstr" @@ -70,7 +70,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(r mb.ReporterV2) error { node, err := munin.Connect(m.Host(), m.timeout) if err != nil { - return errors.Wrap(err, "error in Connect") + return fmt.Errorf("error in Connect: %w", err) } defer node.Close() @@ -78,14 +78,14 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { if len(plugins) == 0 { plugins, err = node.List() if err != nil { - return errors.Wrap(err, "error getting plugin list") + return fmt.Errorf("error getting plugin list: %w", err) } } for _, plugin := range plugins { metrics, err := node.Fetch(plugin, m.sanitize) if err != nil { - msg := errors.Wrap(err, "error fetching metrics") + msg := fmt.Errorf("error fetching metrics: %w", err) r.Error(err) m.Logger().Error(msg) continue diff --git a/metricbeat/module/mysql/galera_status/status.go b/metricbeat/module/mysql/galera_status/status.go index 54b4d8a90d8..d1dc68cd0a2 100644 --- a/metricbeat/module/mysql/galera_status/status.go +++ b/metricbeat/module/mysql/galera_status/status.go @@ -26,12 +26,11 @@ package galera_status import ( "database/sql" + "fmt" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/mysql" - - "github.com/pkg/errors" ) // init registers the MetricSet with the central registry. @@ -61,7 +60,7 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { var err error m.db, err = mysql.NewDB(m.HostData().URI) if err != nil { - return errors.Wrap(err, "Galera-status fetch failed") + return fmt.Errorf("Galera-status fetch failed: %w", err) } } @@ -114,5 +113,5 @@ func (m *MetricSet) Close() error { if m.db == nil { return nil } - return errors.Wrap(m.db.Close(), "failed to close mysql database client") + return fmt.Errorf("failed to close mysql database client: %w", m.db.Close()) } diff --git a/metricbeat/module/mysql/mysql.go b/metricbeat/module/mysql/mysql.go index dc7793f72e3..35388a9a1bd 100644 --- a/metricbeat/module/mysql/mysql.go +++ b/metricbeat/module/mysql/mysql.go @@ -22,11 +22,11 @@ package mysql import ( "database/sql" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/go-sql-driver/mysql" - "github.com/pkg/errors" ) func init() { @@ -64,7 +64,7 @@ func ParseDSN(mod mb.Module, host string) (mb.HostData, error) { config, err := mysql.ParseDSN(host) if err != nil { - return mb.HostData{}, errors.Wrapf(err, "error parsing mysql host") + return mb.HostData{}, fmt.Errorf("error parsing mysql host: %w", err) } if config.User == "" { @@ -102,7 +102,7 @@ func ParseDSN(mod mb.Module, host string) (mb.HostData, error) { func NewDB(dsn string) (*sql.DB, error) { db, err := sql.Open("mysql", dsn) if err != nil { - return nil, errors.Wrap(err, "sql open failed") + return nil, fmt.Errorf("sql open failed: %w", err) } return db, nil } diff --git a/metricbeat/module/mysql/query/query.go b/metricbeat/module/mysql/query/query.go index 1af5d3644cd..35881d76401 100644 --- a/metricbeat/module/mysql/query/query.go +++ b/metricbeat/module/mysql/query/query.go @@ -25,8 +25,7 @@ package query import ( "context" - - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/metricbeat/helper/sql" @@ -82,7 +81,7 @@ func (m *MetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) error { var err error m.db, err = sql.NewDBClient("mysql", m.HostData().URI, m.Logger()) if err != nil { - return errors.Wrap(err, "mysql-status fetch failed") + return fmt.Errorf("mysql-status fetch failed: %w", err) } } @@ -142,5 +141,5 @@ func (m *MetricSet) Close() error { if m.db == nil { return nil } - return errors.Wrap(m.db.Close(), "failed to close mysql database client") + return fmt.Errorf("failed to close mysql database client: %w", m.db.Close()) } diff --git a/metricbeat/module/mysql/status/status.go b/metricbeat/module/mysql/status/status.go index b2da6ec3551..dd57f7e23c9 100644 --- a/metricbeat/module/mysql/status/status.go +++ b/metricbeat/module/mysql/status/status.go @@ -25,11 +25,10 @@ package status import ( "database/sql" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/mysql" - - "github.com/pkg/errors" ) func init() { @@ -56,7 +55,7 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { var err error m.db, err = mysql.NewDB(m.HostData().URI) if err != nil { - return errors.Wrap(err, "mysql-status fetch failed") + return fmt.Errorf("mysql-status fetch failed: %w", err) } } @@ -109,5 +108,5 @@ func (m *MetricSet) Close() error { if m.db == nil { return nil } - return errors.Wrap(m.db.Close(), "failed to close mysql database client") + return fmt.Errorf("failed to close mysql database client: %w", m.db.Close()) } diff --git a/metricbeat/module/nats/_meta/Dockerfile b/metricbeat/module/nats/_meta/Dockerfile index 24757de5400..85751dda114 100644 --- a/metricbeat/module/nats/_meta/Dockerfile +++ b/metricbeat/module/nats/_meta/Dockerfile @@ -2,16 +2,16 @@ ARG NATS_VERSION=2.0.4 FROM nats:$NATS_VERSION # build stage -FROM golang:1.13-alpine3.11 AS build-env -RUN apk --no-cache add build-base git mercurial gcc -RUN cd src && go get -d github.com/nats-io/nats.go/ -RUN cd src/github.com/nats-io/nats.go/examples/nats-bench && git checkout tags/v1.10.0 && go build . +FROM golang:1.20.7 AS build-env +RUN apt-get install git mercurial gcc +RUN git clone https://github.com/nats-io/nats.go.git /nats-go +RUN cd /nats-go/examples/nats-bench && git checkout tags/v1.10.0 && go build . # create an enhanced container with nc command available since nats is based # on scratch image making healthcheck impossible FROM alpine:latest COPY --from=0 / /opt/nats -COPY --from=build-env /go/src/github.com/nats-io/nats.go/examples/nats-bench/nats-bench /nats-bench +COPY --from=build-env /nats-go/examples/nats-bench/nats-bench /nats-bench COPY run.sh /run.sh # Expose client, management, and cluster ports EXPOSE 4222 8222 6222 diff --git a/metricbeat/module/nats/connection/connection.go b/metricbeat/module/nats/connection/connection.go index b491d35ef30..0fc8cff2b60 100644 --- a/metricbeat/module/nats/connection/connection.go +++ b/metricbeat/module/nats/connection/connection.go @@ -18,7 +18,7 @@ package connection import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -84,11 +84,11 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(r mb.ReporterV2) error { content, err := m.http.FetchContent() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } err = eventsMapping(r, content) if err != nil { - return errors.Wrap(err, "error in mapping") + return fmt.Errorf("error in mapping: %w", err) } return nil } diff --git a/metricbeat/module/nats/connection/data.go b/metricbeat/module/nats/connection/data.go index cc2b26dfbb5..c385ddcdc1d 100644 --- a/metricbeat/module/nats/connection/data.go +++ b/metricbeat/module/nats/connection/data.go @@ -19,10 +19,9 @@ package connection import ( "encoding/json" + "fmt" "time" - "github.com/pkg/errors" - s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" "github.com/elastic/beats/v7/metricbeat/mb" @@ -63,26 +62,26 @@ type Connections struct { func eventMapping(content map[string]interface{}, fieldsSchema s.Schema) (mb.Event, error) { fields, err := fieldsSchema.Apply(content) if err != nil { - return mb.Event{}, errors.Wrap(err, "error applying connection schema") + return mb.Event{}, fmt.Errorf("error applying connection schema: %w", err) } err = util.UpdateDuration(fields, "uptime") if err != nil { - return mb.Event{}, errors.Wrap(err, "failure updating uptime key") + return mb.Event{}, fmt.Errorf("failure updating uptime key: %w", err) } err = util.UpdateDuration(fields, "idle_time") if err != nil { - return mb.Event{}, errors.Wrap(err, "failure updating idle_time key") + return mb.Event{}, fmt.Errorf("failure updating idle_time key: %w", err) } moduleFields, err := moduleSchema.Apply(content) if err != nil { - return mb.Event{}, errors.Wrap(err, "error applying module schema") + return mb.Event{}, fmt.Errorf("error applying module schema: %w", err) } if err != nil { - return mb.Event{}, errors.Wrap(err, "failure parsing server timestamp") + return mb.Event{}, fmt.Errorf("failure parsing server timestamp: %w", err) } event := mb.Event{ MetricSetFields: fields, @@ -96,7 +95,7 @@ func eventsMapping(r mb.ReporterV2, content []byte) error { var err error connections := Connections{} if err = json.Unmarshal(content, &connections); err != nil { - return errors.Wrap(err, "failure parsing NATS connections API response") + return fmt.Errorf("failure parsing NATS connections API response: %w", err) } for _, con := range connections.Connections { @@ -104,7 +103,7 @@ func eventsMapping(r mb.ReporterV2, content []byte) error { con["server_id"] = connections.ServerID evt, err = eventMapping(con, connectionsSchema) if err != nil { - r.Error(errors.Wrap(err, "error mapping connection event")) + r.Error(fmt.Errorf("error mapping connection event: %w", err)) continue } evt.Timestamp = connections.Now diff --git a/metricbeat/module/nats/connections/connections.go b/metricbeat/module/nats/connections/connections.go index 8fcfb652e9f..3fb41cc5e2f 100644 --- a/metricbeat/module/nats/connections/connections.go +++ b/metricbeat/module/nats/connections/connections.go @@ -18,7 +18,7 @@ package connections import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -85,11 +85,11 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(r mb.ReporterV2) error { content, err := m.http.FetchContent() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } err = eventMapping(r, content) if err != nil { - return errors.Wrap(err, "error in mapping") + return fmt.Errorf("error in mapping: %w", err) } return nil } diff --git a/metricbeat/module/nats/connections/data.go b/metricbeat/module/nats/connections/data.go index 241f46f77d2..200bb32c8ad 100644 --- a/metricbeat/module/nats/connections/data.go +++ b/metricbeat/module/nats/connections/data.go @@ -19,8 +19,7 @@ package connections import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" @@ -45,21 +44,21 @@ func eventMapping(r mb.ReporterV2, content []byte) error { err := json.Unmarshal(content, &inInterface) if err != nil { - return errors.Wrap(err, "failure parsing NATS connections API response") + return fmt.Errorf("failure parsing NATS connections API response: %w", err) } metricSetFields, err := connectionsSchema.Apply(inInterface) if err != nil { - return errors.Wrap(err, "failure applying connections schema") + return fmt.Errorf("failure applying connections schema: %w", err) } moduleFields, err := moduleSchema.Apply(inInterface) if err != nil { - return errors.Wrap(err, "failure applying module schema") + return fmt.Errorf("failure applying module schema: %w", err) } timestamp, err := util.GetNatsTimestamp(moduleFields) if err != nil { - return errors.Wrap(err, "failure parsing server timestamp") + return fmt.Errorf("failure parsing server timestamp: %w", err) } event := mb.Event{ MetricSetFields: metricSetFields, diff --git a/metricbeat/module/nats/route/data.go b/metricbeat/module/nats/route/data.go index 932933f9d9d..853c7a9cb83 100644 --- a/metricbeat/module/nats/route/data.go +++ b/metricbeat/module/nats/route/data.go @@ -19,10 +19,9 @@ package route import ( "encoding/json" + "fmt" "time" - "github.com/pkg/errors" - s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" "github.com/elastic/beats/v7/metricbeat/mb" @@ -62,16 +61,16 @@ type Routes struct { func eventMapping(content map[string]interface{}, fieldsSchema s.Schema) (mb.Event, error) { fields, err := fieldsSchema.Apply(content) if err != nil { - return mb.Event{}, errors.Wrap(err, "error applying routes schema") + return mb.Event{}, fmt.Errorf("error applying routes schema: %w", err) } moduleFields, err := moduleSchema.Apply(content) if err != nil { - return mb.Event{}, errors.Wrap(err, "error applying module schema") + return mb.Event{}, fmt.Errorf("error applying module schema: %w", err) } if err != nil { - return mb.Event{}, errors.Wrap(err, "failure parsing server timestamp") + return mb.Event{}, fmt.Errorf("failure parsing server timestamp: %w", err) } event := mb.Event{ MetricSetFields: fields, @@ -85,7 +84,7 @@ func eventsMapping(r mb.ReporterV2, content []byte) error { var err error connections := Routes{} if err = json.Unmarshal(content, &connections); err != nil { - return errors.Wrap(err, "failure parsing NATS connections API response") + return fmt.Errorf("failure parsing NATS connections API response: %w", err) } for _, con := range connections.Routes { @@ -93,7 +92,7 @@ func eventsMapping(r mb.ReporterV2, content []byte) error { con["server_id"] = connections.ServerID evt, err = eventMapping(con, routesSchema) if err != nil { - r.Error(errors.Wrap(err, "error mapping connection event")) + r.Error(fmt.Errorf("error mapping connection event: %w", err)) continue } evt.Timestamp = connections.Now diff --git a/metricbeat/module/nats/route/route.go b/metricbeat/module/nats/route/route.go index bf5814157f2..a2c1cb20f3d 100644 --- a/metricbeat/module/nats/route/route.go +++ b/metricbeat/module/nats/route/route.go @@ -18,7 +18,7 @@ package route import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -84,11 +84,11 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(r mb.ReporterV2) error { content, err := m.http.FetchContent() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } err = eventsMapping(r, content) if err != nil { - return errors.Wrap(err, "error in mapping") + return fmt.Errorf("error in mapping: %w", err) } return nil diff --git a/metricbeat/module/nats/routes/data.go b/metricbeat/module/nats/routes/data.go index 02eb686e709..78c7cb10c30 100644 --- a/metricbeat/module/nats/routes/data.go +++ b/metricbeat/module/nats/routes/data.go @@ -19,8 +19,7 @@ package routes import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" @@ -45,20 +44,20 @@ func eventMapping(r mb.ReporterV2, content []byte) error { err := json.Unmarshal(content, &inInterface) if err != nil { - return errors.Wrap(err, "failure parsing Nats routes API response") + return fmt.Errorf("failure parsing Nats routes API response: %w", err) } metricSetFields, err := routesSchema.Apply(inInterface) if err != nil { - return errors.Wrap(err, "failure applying routes schema") + return fmt.Errorf("failure applying routes schema: %w", err) } moduleFields, err := moduleSchema.Apply(inInterface) if err != nil { - return errors.Wrap(err, "failure applying module schema") + return fmt.Errorf("failure applying module schema: %w", err) } timestamp, err := util.GetNatsTimestamp(moduleFields) if err != nil { - errors.Wrap(err, "failure parsing server timestamp") + return fmt.Errorf("failure parsing server timestamp: %w", err) } event := mb.Event{ MetricSetFields: metricSetFields, diff --git a/metricbeat/module/nats/routes/routes.go b/metricbeat/module/nats/routes/routes.go index 88eeebab41a..b3db4c4f7f7 100644 --- a/metricbeat/module/nats/routes/routes.go +++ b/metricbeat/module/nats/routes/routes.go @@ -18,7 +18,7 @@ package routes import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -85,11 +85,11 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(r mb.ReporterV2) error { content, err := m.http.FetchContent() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } err = eventMapping(r, content) if err != nil { - return errors.Wrap(err, "error in mapping") + return fmt.Errorf("error in mapping: %w", err) } return nil diff --git a/metricbeat/module/nats/stats/data.go b/metricbeat/module/nats/stats/data.go index 7bc72177a54..c7f2ca719ef 100644 --- a/metricbeat/module/nats/stats/data.go +++ b/metricbeat/module/nats/stats/data.go @@ -19,8 +19,7 @@ package stats import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" @@ -71,30 +70,29 @@ func eventMapping(r mb.ReporterV2, content []byte) error { err := json.Unmarshal(content, &inInterface) if err != nil { - return errors.Wrap(err, "failure parsing Nats stats API response") + return fmt.Errorf("failure parsing Nats stats API response: %w", err) } metricsetMetrics, err = statsSchema.Apply(inInterface) if err != nil { - return errors.Wrap(err, "failure applying stats schema") + return fmt.Errorf("failure applying stats schema: %w", err) } err = util.UpdateDuration(metricsetMetrics, "uptime") if err != nil { - return errors.Wrap(err, "failure updating uptime key") + return fmt.Errorf("failure updating uptime key: %w", err) } d, err := metricsetMetrics.GetValue("http_req_stats") if err != nil { - return errors.Wrap(err, "failure retrieving http_req_stats key") + return fmt.Errorf("failure retrieving http_req_stats key: %w", err) } httpStats, ok := d.(mapstr.M) if !ok { - return errors.Wrap(err, "failure casting http_req_stats to common.Mapstr") - + return fmt.Errorf("failure casting http_req_stats to common.Mapstr") } err = metricsetMetrics.Delete("http_req_stats") if err != nil { - return errors.Wrap(err, "failure deleting http_req_stats key") + return fmt.Errorf("failure deleting http_req_stats key: %w", err) } metricsetMetrics["http"] = mapstr.M{ @@ -110,23 +108,23 @@ func eventMapping(r mb.ReporterV2, content []byte) error { } cpu, err := metricsetMetrics.GetValue("cpu") if err != nil { - return errors.Wrap(err, "failure retrieving cpu key") + return fmt.Errorf("failure retrieving cpu key: %w", err) } cpuUtil, ok := cpu.(float64) if !ok { - return errors.Wrap(err, "failure casting cpu to float64") + return fmt.Errorf("failure casting cpu to float64") } _, err = metricsetMetrics.Put("cpu", cpuUtil/100.0) if err != nil { - return errors.Wrap(err, "failure updating cpu key") + return fmt.Errorf("failure updating cpu key: %w", err) } moduleMetrics, err := moduleSchema.Apply(inInterface) if err != nil { - return errors.Wrap(err, "failure applying module schema") + return fmt.Errorf("failure applying module schema: %w", err) } timestamp, err := util.GetNatsTimestamp(moduleMetrics) if err != nil { - return errors.Wrap(err, "failure parsing server timestamp") + return fmt.Errorf("failure parsing server timestamp: %w", err) } evt := mb.Event{ MetricSetFields: metricsetMetrics, diff --git a/metricbeat/module/nats/stats/stats.go b/metricbeat/module/nats/stats/stats.go index fe1cfbdd975..5d5d69ff584 100644 --- a/metricbeat/module/nats/stats/stats.go +++ b/metricbeat/module/nats/stats/stats.go @@ -18,7 +18,7 @@ package stats import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -85,11 +85,11 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(r mb.ReporterV2) error { content, err := m.http.FetchContent() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } err = eventMapping(r, content) if err != nil { - return errors.Wrap(err, "error in mapping") + return fmt.Errorf("error in mapping: %w", err) } return nil diff --git a/metricbeat/module/nats/subscriptions/data.go b/metricbeat/module/nats/subscriptions/data.go index a92bcae4b14..e06a39e913b 100644 --- a/metricbeat/module/nats/subscriptions/data.go +++ b/metricbeat/module/nats/subscriptions/data.go @@ -19,8 +19,7 @@ package subscriptions import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" @@ -50,12 +49,12 @@ func eventMapping(r mb.ReporterV2, content []byte) error { err := json.Unmarshal(content, &inInterface) if err != nil { - return errors.Wrap(err, "failure parsing Nats subscriptions API response") + return fmt.Errorf("failure parsing Nats subscriptions API response: %w", err) } event.MetricSetFields, err = subscriptionsSchema.Apply(inInterface) if err != nil { - return errors.Wrap(err, "failure applying subscriptions schema") + return fmt.Errorf("failure applying subscriptions schema: %w", err) } r.Event(event) diff --git a/metricbeat/module/nats/subscriptions/subscriptions.go b/metricbeat/module/nats/subscriptions/subscriptions.go index e9183f214c7..903606a3721 100644 --- a/metricbeat/module/nats/subscriptions/subscriptions.go +++ b/metricbeat/module/nats/subscriptions/subscriptions.go @@ -18,7 +18,7 @@ package subscriptions import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -85,11 +85,11 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(r mb.ReporterV2) error { content, err := m.http.FetchContent() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } err = eventMapping(r, content) if err != nil { - return errors.Wrap(err, "error in mapping") + return fmt.Errorf("error in mapping: %w", err) } return nil diff --git a/metricbeat/module/nats/util/util.go b/metricbeat/module/nats/util/util.go index 1decbf0bb9f..ada2e12ef01 100644 --- a/metricbeat/module/nats/util/util.go +++ b/metricbeat/module/nats/util/util.go @@ -23,8 +23,6 @@ import ( "strings" "time" - "github.com/pkg/errors" - "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -39,7 +37,7 @@ func convertUptimeToSeconds(uptime string) (seconds int64, err error) { uptime = split[1] years, err = strconv.ParseInt(split[0], 10, 64) if err != nil { - err = errors.Wrap(err, "invalid years format in json data") + err = fmt.Errorf("invalid years format in json data: %w", err) return } seconds += years * 31536000 @@ -50,7 +48,7 @@ func convertUptimeToSeconds(uptime string) (seconds int64, err error) { uptime = split[1] days, err = strconv.ParseInt(split[0], 10, 64) if err != nil { - err = errors.Wrap(err, "invalid days format in json data") + err = fmt.Errorf("invalid days format in json data: %w", err) return } seconds += days * 86400 @@ -61,7 +59,7 @@ func convertUptimeToSeconds(uptime string) (seconds int64, err error) { uptime = split[1] hours, err = strconv.ParseInt(split[0], 10, 64) if err != nil { - err = errors.Wrap(err, "invalid hours format in json data") + err = fmt.Errorf("invalid hours format in json data: %w", err) return } seconds += hours * 3600 @@ -72,7 +70,7 @@ func convertUptimeToSeconds(uptime string) (seconds int64, err error) { uptime = split[1] minutes, err = strconv.ParseInt(split[0], 10, 64) if err != nil { - err = errors.Wrap(err, "invalid minutes format in json data") + err = fmt.Errorf("invalid minutes format in json data: %w", err) return } seconds += minutes * 60 @@ -83,7 +81,7 @@ func convertUptimeToSeconds(uptime string) (seconds int64, err error) { uptime = split[1] secs, err = strconv.ParseInt(split[0], 10, 64) if err != nil { - err = errors.Wrap(err, "invalid seconds format in json data") + err = fmt.Errorf("invalid seconds format in json data: %w", err) return } seconds += secs @@ -99,11 +97,11 @@ func UpdateDuration(event mapstr.M, key string) error { } itemConverted, err := convertUptimeToSeconds(item.(string)) if err != nil { - return errors.Wrap(err, fmt.Sprintf("failure converting %v key from string to integer", key)) + return fmt.Errorf(fmt.Sprintf("failure converting %v key from string to integer", key)+": %w", err) } _, err = event.Put(key, itemConverted) if err != nil { - return errors.Wrap(err, fmt.Sprintf("failure updating %v key", key)) + return fmt.Errorf(fmt.Sprintf("failure updating %v key", key)+": %w", err) } return nil } diff --git a/metricbeat/module/nginx/stubstatus/stubstatus.go b/metricbeat/module/nginx/stubstatus/stubstatus.go index 7df0acf5336..5c28a82f464 100644 --- a/metricbeat/module/nginx/stubstatus/stubstatus.go +++ b/metricbeat/module/nginx/stubstatus/stubstatus.go @@ -19,7 +19,7 @@ package stubstatus import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -78,7 +78,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { scanner, err := m.http.FetchScanner() if err != nil { - return errors.Wrap(err, "error fetching status") + return fmt.Errorf("error fetching status: %w", err) } event, _ := eventMapping(scanner, m) reporter.Event(mb.Event{MetricSetFields: event}) diff --git a/metricbeat/module/php_fpm/pool/pool.go b/metricbeat/module/php_fpm/pool/pool.go index ec15fb46915..cc322529863 100644 --- a/metricbeat/module/php_fpm/pool/pool.go +++ b/metricbeat/module/php_fpm/pool/pool.go @@ -19,8 +19,7 @@ package pool import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -58,16 +57,16 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { content, err := m.HTTP.FetchContent() if err != nil { - return errors.Wrap(err, "error in http fetch") + return fmt.Errorf("error in http fetch: %w", err) } var stats map[string]interface{} err = json.Unmarshal(content, &stats) if err != nil { - return errors.Wrap(err, "error unmarshalling json") + return fmt.Errorf("error unmarshalling json: %w", err) } event, err := schema.Apply(stats) if err != nil { - return errors.Wrap(err, "error in event mapping") + return fmt.Errorf("error in event mapping: %w", err) } reporter.Event(mb.Event{ MetricSetFields: event, diff --git a/metricbeat/module/postgresql/activity/activity.go b/metricbeat/module/postgresql/activity/activity.go index ac2748cd582..b97830c8edc 100644 --- a/metricbeat/module/postgresql/activity/activity.go +++ b/metricbeat/module/postgresql/activity/activity.go @@ -19,8 +19,7 @@ package activity import ( "context" - - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/postgresql" @@ -60,7 +59,7 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { results, err := m.QueryStats(ctx, "SELECT * FROM pg_stat_activity") if err != nil { - return errors.Wrap(err, "error in QueryStats") + return fmt.Errorf("error in QueryStats: %w", err) } for _, result := range results { diff --git a/metricbeat/module/postgresql/bgwriter/bgwriter.go b/metricbeat/module/postgresql/bgwriter/bgwriter.go index bb7dd702b93..a33df10ae0f 100644 --- a/metricbeat/module/postgresql/bgwriter/bgwriter.go +++ b/metricbeat/module/postgresql/bgwriter/bgwriter.go @@ -21,8 +21,6 @@ import ( "context" "fmt" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/postgresql" ) @@ -57,7 +55,7 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { ctx := context.Background() results, err := m.QueryStats(ctx, "SELECT * FROM pg_stat_bgwriter") if err != nil { - return errors.Wrap(err, "error in QueryStats") + return fmt.Errorf("error in QueryStats: %w", err) } if len(results) == 0 { return fmt.Errorf("No results from the pg_stat_bgwriter query") diff --git a/metricbeat/module/postgresql/database/database.go b/metricbeat/module/postgresql/database/database.go index 0b23a25d27b..48fc032ad81 100644 --- a/metricbeat/module/postgresql/database/database.go +++ b/metricbeat/module/postgresql/database/database.go @@ -19,8 +19,7 @@ package database import ( "context" - - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/postgresql" @@ -59,7 +58,7 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { ctx := context.Background() results, err := m.QueryStats(ctx, "SELECT * FROM pg_stat_database") if err != nil { - return errors.Wrap(err, "error in QueryStats") + return fmt.Errorf("error in QueryStats: %w", err) } for _, result := range results { diff --git a/metricbeat/module/postgresql/docker-compose.yml b/metricbeat/module/postgresql/docker-compose.yml index acb591bfbfc..d29c19350a9 100644 --- a/metricbeat/module/postgresql/docker-compose.yml +++ b/metricbeat/module/postgresql/docker-compose.yml @@ -2,17 +2,18 @@ version: '2.3' services: postgresql: - image: docker.elastic.co/integrations-ci/beats-postgresql:${POSTGRESQL_VERSION:-13.2}-2 + image: docker.elastic.co/integrations-ci/beats-postgresql:${POSTGRESQL_VERSION:-13.11}-2 build: context: ./_meta args: - POSTGRESQL_VERSION: ${POSTGRESQL_VERSION:-13.2} + POSTGRESQL_VERSION: ${POSTGRESQL_VERSION:-13.11} environment: POSTGRES_PASSWORD: postgres ports: - 5432 healthcheck: - test: ["CMD-SHELL", "pg_isready -U postgres -p 5432"] - interval: 5s - timeout: 5s + test: ['CMD-SHELL', 'psql -h localhost -p 5432 -U postgres -c select 1 -d postgres'] + interval: 15s + timeout: 30s retries: 5 + start_period: 15s \ No newline at end of file diff --git a/metricbeat/module/postgresql/metricset.go b/metricbeat/module/postgresql/metricset.go index ca0a4e4a4d0..3ebcff4dafe 100644 --- a/metricbeat/module/postgresql/metricset.go +++ b/metricbeat/module/postgresql/metricset.go @@ -20,8 +20,7 @@ package postgresql import ( "context" "database/sql" - - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/elastic-agent-libs/logp" @@ -46,7 +45,7 @@ func (ms *MetricSet) DB(ctx context.Context) (*sql.Conn, error) { if ms.db == nil { db, err := sql.Open("postgres", ms.HostData().URI) if err != nil { - return nil, errors.Wrap(err, "failed to open connection") + return nil, fmt.Errorf("failed to open connection: %w", err) } ms.db = db } @@ -57,18 +56,18 @@ func (ms *MetricSet) DB(ctx context.Context) (*sql.Conn, error) { func (ms *MetricSet) QueryStats(ctx context.Context, query string) ([]map[string]interface{}, error) { db, err := ms.DB(ctx) if err != nil { - return nil, errors.Wrap(err, "failed to obtain a connection with the database") + return nil, fmt.Errorf("failed to obtain a connection with the database: %w", err) } defer db.Close() rows, err := db.QueryContext(ctx, query) if err != nil { - return nil, errors.Wrap(err, "failed to query database") + return nil, fmt.Errorf("failed to query database: %w", err) } columns, err := rows.Columns() if err != nil { - return nil, errors.Wrap(err, "scanning columns") + return nil, fmt.Errorf("scanning columns: %w", err) } vals := make([][]byte, len(columns)) valPointers := make([]interface{}, len(columns)) @@ -81,7 +80,7 @@ func (ms *MetricSet) QueryStats(ctx context.Context, query string) ([]map[string for rows.Next() { err = rows.Scan(valPointers...) if err != nil { - return nil, errors.Wrap(err, "scanning row") + return nil, fmt.Errorf("scanning row: %w", err) } result := map[string]interface{}{} @@ -100,5 +99,9 @@ func (ms *MetricSet) Close() error { if ms.db == nil { return nil } - return errors.Wrap(ms.db.Close(), "failed to close connection") + + if err := ms.db.Close(); err != nil { + return fmt.Errorf("failed to close connection: %w", err) + } + return nil } diff --git a/metricbeat/module/postgresql/statement/statement.go b/metricbeat/module/postgresql/statement/statement.go index d2ef253e8ad..e0677d1f522 100644 --- a/metricbeat/module/postgresql/statement/statement.go +++ b/metricbeat/module/postgresql/statement/statement.go @@ -19,8 +19,7 @@ package statement import ( "context" - - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/postgresql" @@ -61,7 +60,7 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { ctx := context.Background() results, err := m.QueryStats(ctx, "SELECT * FROM pg_stat_statements") if err != nil { - return errors.Wrap(err, "QueryStats") + return fmt.Errorf("QueryStats: %w", err) } for _, result := range results { diff --git a/metricbeat/module/prometheus/collector/_meta/data.json b/metricbeat/module/prometheus/collector/_meta/data.json index a46b63c74fe..dba9f7771c4 100644 --- a/metricbeat/module/prometheus/collector/_meta/data.json +++ b/metricbeat/module/prometheus/collector/_meta/data.json @@ -11,10 +11,12 @@ }, "prometheus": { "labels": { - "job": "prometheus" + "job": "prometheus", + "listener_name": "http" }, "metrics": { - "up": 1 + "net_conntrack_listener_conn_accepted_total": 3, + "net_conntrack_listener_conn_closed_total": 0 } }, "service": { diff --git a/metricbeat/module/rabbitmq/connection/connection.go b/metricbeat/module/rabbitmq/connection/connection.go index 80acc279e0f..d23c492be6c 100644 --- a/metricbeat/module/rabbitmq/connection/connection.go +++ b/metricbeat/module/rabbitmq/connection/connection.go @@ -18,7 +18,7 @@ package connection import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/rabbitmq" @@ -50,7 +50,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { content, err := m.HTTP.FetchContent() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } return eventsMapping(content, r) diff --git a/metricbeat/module/rabbitmq/connection/data.go b/metricbeat/module/rabbitmq/connection/data.go index 3d92544431c..7aaa0ad04db 100644 --- a/metricbeat/module/rabbitmq/connection/data.go +++ b/metricbeat/module/rabbitmq/connection/data.go @@ -19,8 +19,7 @@ package connection import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" @@ -64,7 +63,7 @@ func eventsMapping(content []byte, r mb.ReporterV2) error { var connections []map[string]interface{} err := json.Unmarshal(content, &connections) if err != nil { - return errors.Wrap(err, "error in unmarshal") + return fmt.Errorf("error in unmarshal: %w", err) } for _, node := range connections { diff --git a/metricbeat/module/rabbitmq/exchange/data.go b/metricbeat/module/rabbitmq/exchange/data.go index e181c76a3a2..77d43fcab1d 100644 --- a/metricbeat/module/rabbitmq/exchange/data.go +++ b/metricbeat/module/rabbitmq/exchange/data.go @@ -19,8 +19,7 @@ package exchange import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" @@ -59,7 +58,7 @@ func eventsMapping(content []byte, r mb.ReporterV2) error { var exchanges []map[string]interface{} err := json.Unmarshal(content, &exchanges) if err != nil { - return errors.Wrap(err, "error in unmarshal") + return fmt.Errorf("error in unmarshal: %w", err) } for _, exchange := range exchanges { diff --git a/metricbeat/module/rabbitmq/exchange/exchange.go b/metricbeat/module/rabbitmq/exchange/exchange.go index 69a07d316a9..1e1779b18ca 100644 --- a/metricbeat/module/rabbitmq/exchange/exchange.go +++ b/metricbeat/module/rabbitmq/exchange/exchange.go @@ -18,7 +18,7 @@ package exchange import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/rabbitmq" @@ -52,7 +52,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { content, err := m.HTTP.FetchContent() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } return eventsMapping(content, r) diff --git a/metricbeat/module/rabbitmq/node/data.go b/metricbeat/module/rabbitmq/node/data.go index 60cedb40ec0..bbf24e3fd59 100644 --- a/metricbeat/module/rabbitmq/node/data.go +++ b/metricbeat/module/rabbitmq/node/data.go @@ -19,8 +19,7 @@ package node import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" @@ -151,7 +150,7 @@ func eventsMapping(r mb.ReporterV2, content []byte) error { var nodes []map[string]interface{} err := json.Unmarshal(content, &nodes) if err != nil { - return errors.Wrap(err, "error in Unmarshal") + return fmt.Errorf("error in Unmarshal: %w", err) } for _, node := range nodes { diff --git a/metricbeat/module/rabbitmq/node/node.go b/metricbeat/module/rabbitmq/node/node.go index cbb575129cb..8c409ec991e 100644 --- a/metricbeat/module/rabbitmq/node/node.go +++ b/metricbeat/module/rabbitmq/node/node.go @@ -19,8 +19,7 @@ package node import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/rabbitmq" @@ -66,7 +65,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return &ClusterMetricSet{ms}, nil default: - return nil, errors.Errorf("incorrect node.collect: %s", config.Collect) + return nil, fmt.Errorf("incorrect node.collect: %s", config.Collect) } } @@ -83,7 +82,7 @@ func (m *MetricSet) fetchOverview() (*apiOverview, error) { var apiOverview apiOverview err = json.Unmarshal(d, &apiOverview) if err != nil { - return nil, errors.Wrap(err, string(d)) + return nil, fmt.Errorf(string(d)+": %d", err) } return &apiOverview, nil } @@ -92,17 +91,17 @@ func (m *MetricSet) fetchOverview() (*apiOverview, error) { func (m *MetricSet) Fetch(r mb.ReporterV2) error { o, err := m.fetchOverview() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } node, err := rabbitmq.NewMetricSet(m.BaseMetricSet, rabbitmq.NodesPath+"/"+o.Node) if err != nil { - return errors.Wrap(err, "error creating new metricset") + return fmt.Errorf("error creating new metricset: %w", err) } content, err := node.HTTP.FetchJSON() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } evt := eventMapping(content) @@ -114,7 +113,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { func (m *ClusterMetricSet) Fetch(r mb.ReporterV2) error { content, err := m.HTTP.FetchContent() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } return eventsMapping(r, content) diff --git a/metricbeat/module/rabbitmq/queue/data.go b/metricbeat/module/rabbitmq/queue/data.go index 70529b5f219..682abcbc194 100644 --- a/metricbeat/module/rabbitmq/queue/data.go +++ b/metricbeat/module/rabbitmq/queue/data.go @@ -19,8 +19,7 @@ package queue import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/elastic-agent-libs/mapstr" @@ -88,7 +87,7 @@ func eventsMapping(content []byte, r mb.ReporterV2) error { var queues []map[string]interface{} err := json.Unmarshal(content, &queues) if err != nil { - return errors.Wrap(err, "error in mapping") + return fmt.Errorf("error in mapping: %w", err) } for _, queue := range queues { diff --git a/metricbeat/module/rabbitmq/queue/queue.go b/metricbeat/module/rabbitmq/queue/queue.go index 2dc1afcb9a0..6a4e40ed3bd 100644 --- a/metricbeat/module/rabbitmq/queue/queue.go +++ b/metricbeat/module/rabbitmq/queue/queue.go @@ -18,7 +18,7 @@ package queue import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/rabbitmq" @@ -50,7 +50,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { content, err := m.HTTP.FetchContent() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } return eventsMapping(content, r) diff --git a/metricbeat/module/redis/info/info.go b/metricbeat/module/redis/info/info.go index f292e10e852..bef98dd9f1a 100644 --- a/metricbeat/module/redis/info/info.go +++ b/metricbeat/module/redis/info/info.go @@ -18,10 +18,9 @@ package info import ( + "fmt" "strconv" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" "github.com/elastic/beats/v7/metricbeat/module/redis" @@ -45,7 +44,7 @@ type MetricSet struct { func New(base mb.BaseMetricSet) (mb.MetricSet, error) { ms, err := redis.NewMetricSet(base) if err != nil { - return nil, errors.Wrap(err, "failed to create 'info' metricset") + return nil, fmt.Errorf("failed to create 'info' metricset: %w", err) } return &MetricSet{ms}, nil } @@ -55,14 +54,14 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { conn := m.Connection() defer func() { if err := conn.Close(); err != nil { - m.Logger().Debug(errors.Wrapf(err, "failed to release connection")) + m.Logger().Debug(fmt.Errorf("failed to release connection: %w", err)) } }() // Fetch all INFO. info, err := redis.FetchRedisInfo("all", conn) if err != nil { - return errors.Wrap(err, "failed to fetch redis info") + return fmt.Errorf("failed to fetch redis info: %w", err) } // In 5.0 some fields are renamed, maintain both names, old ones will be deprecated @@ -81,7 +80,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { slowLogLength, err := redis.FetchSlowLogLength(conn) if err != nil { - return errors.Wrap(err, "failed to fetch slow log length") + return fmt.Errorf("failed to fetch slow log length: %w", err) } info["slowlog_len"] = strconv.FormatInt(slowLogLength, 10) diff --git a/metricbeat/module/redis/key/key.go b/metricbeat/module/redis/key/key.go index c01fe25a318..32b82bcc2ef 100644 --- a/metricbeat/module/redis/key/key.go +++ b/metricbeat/module/redis/key/key.go @@ -20,8 +20,6 @@ package key import ( "fmt" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" "github.com/elastic/beats/v7/metricbeat/module/redis" @@ -55,12 +53,12 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { }{} err := base.Module().UnpackConfig(&config) if err != nil { - return nil, errors.Wrap(err, "failed to read configuration for 'key' metricset") + return nil, fmt.Errorf("failed to read configuration for 'key' metricset: %w", err) } ms, err := redis.NewMetricSet(base) if err != nil { - return nil, errors.Wrap(err, "failed to create 'key' metricset") + return nil, fmt.Errorf("failed to create 'key' metricset: %w", err) } return &MetricSet{ @@ -74,7 +72,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { conn := m.Connection() defer func() { if err := conn.Close(); err != nil { - m.Logger().Debug(errors.Wrapf(err, "failed to release connection")) + m.Logger().Debug(fmt.Errorf("failed to release connection: %w", err)) } }() @@ -86,7 +84,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { keyspace = *p.Keyspace } if err := redis.Select(conn, keyspace); err != nil { - msg := errors.Wrapf(err, "Failed to select keyspace %d", keyspace) + msg := fmt.Errorf("Failed to select keyspace %d: %w", keyspace, err) m.Logger().Error(msg) r.Error(err) continue @@ -94,7 +92,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { keys, err := redis.FetchKeys(conn, p.Pattern, p.Limit) if err != nil { - msg := errors.Wrapf(err, "Failed to list keys in keyspace %d with pattern '%s'", keyspace, p.Pattern) + msg := fmt.Errorf("Failed to list keys in keyspace %d with pattern '%s': %w", keyspace, p.Pattern, err) m.Logger().Error(msg) r.Error(err) continue diff --git a/metricbeat/module/redis/keyspace/keyspace.go b/metricbeat/module/redis/keyspace/keyspace.go index a9710fa67f9..afa6a485fc3 100644 --- a/metricbeat/module/redis/keyspace/keyspace.go +++ b/metricbeat/module/redis/keyspace/keyspace.go @@ -18,7 +18,7 @@ package keyspace import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" @@ -43,7 +43,7 @@ type MetricSet struct { func New(base mb.BaseMetricSet) (mb.MetricSet, error) { ms, err := redis.NewMetricSet(base) if err != nil { - return nil, errors.Wrap(err, "failed to create 'keyspace' metricset") + return nil, fmt.Errorf("failed to create 'keyspace' metricset: %w", err) } return &MetricSet{ms}, nil } @@ -53,14 +53,14 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { conn := m.Connection() defer func() { if err := conn.Close(); err != nil { - m.Logger().Debug(errors.Wrapf(err, "failed to release connection")) + m.Logger().Debug(fmt.Errorf("failed to release connection: %w", err)) } }() // Fetch default INFO. info, err := redis.FetchRedisInfo("keyspace", conn) if err != nil { - return errors.Wrap(err, "Failed to fetch redis info for keyspaces") + return fmt.Errorf("Failed to fetch redis info for keyspaces: %w", err) } m.Logger().Debugf("Redis INFO from %s: %+v", m.Host(), info) diff --git a/metricbeat/module/system/core/config.go b/metricbeat/module/system/core/config.go index 09b1b25f7ed..8ac1d2f9575 100644 --- a/metricbeat/module/system/core/config.go +++ b/metricbeat/module/system/core/config.go @@ -18,10 +18,10 @@ package core import ( + "errors" + "fmt" "strings" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/common/cfgwarn" metrics "github.com/elastic/elastic-agent-system-metrics/metric/cpu" ) @@ -56,7 +56,7 @@ func (c Config) Validate() (metrics.MetricOpts, error) { case ticks: opts.Ticks = true default: - return opts, errors.Errorf("invalid core.metrics value '%v' (valid "+ + return opts, fmt.Errorf("invalid core.metrics value '%v' (valid "+ "options are %v and %v)", metric, percentages, ticks) } } diff --git a/metricbeat/module/system/cpu/config.go b/metricbeat/module/system/cpu/config.go index 9533bf7b296..ef9d78fe0ce 100644 --- a/metricbeat/module/system/cpu/config.go +++ b/metricbeat/module/system/cpu/config.go @@ -18,10 +18,10 @@ package cpu import ( + "errors" + "fmt" "strings" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/common/cfgwarn" metrics "github.com/elastic/elastic-agent-system-metrics/metric/cpu" ) @@ -59,7 +59,7 @@ func (c Config) Validate() (metrics.MetricOpts, error) { case ticks: opts.Ticks = true default: - return opts, errors.Errorf("invalid cpu.metrics value '%v' (valid "+ + return opts, fmt.Errorf("invalid cpu.metrics value '%v' (valid "+ "options are %v, %v, and %v)", metric, percentages, normalizedPercentages, ticks) } diff --git a/metricbeat/module/system/entropy/entropy.go b/metricbeat/module/system/entropy/entropy.go index 5bc83210eab..a9ea007af69 100644 --- a/metricbeat/module/system/entropy/entropy.go +++ b/metricbeat/module/system/entropy/entropy.go @@ -20,12 +20,11 @@ package entropy import ( + "fmt" "io/ioutil" "strconv" "strings" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/elastic-agent-libs/mapstr" @@ -68,11 +67,11 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(report mb.ReporterV2) error { entropy, err := getEntropyData(m.mod.ResolveHostFS("/proc/sys/kernel/random/entropy_avail")) if err != nil { - return errors.Wrap(err, "error getting entropy") + return fmt.Errorf("error getting entropy: %w", err) } poolsize, err := getEntropyData(m.mod.ResolveHostFS("/proc/sys/kernel/random/poolsize")) if err != nil { - return errors.Wrap(err, "error getting poolsize") + return fmt.Errorf("error getting poolsize: %w", err) } report.Event(mb.Event{ MetricSetFields: mapstr.M{ @@ -88,12 +87,12 @@ func getEntropyData(path string) (int, error) { //This will be a number in the range 0 to 4096. raw, err := ioutil.ReadFile(path) if err != nil { - return 0, errors.Wrap(err, "error reading from random") + return 0, fmt.Errorf("error reading from random: %w", err) } intval, err := strconv.ParseInt(strings.TrimSpace(string(raw)), 10, 64) if err != nil { - return 0, errors.Wrap(err, "error parsing from random") + return 0, fmt.Errorf("error parsing from random: %w", err) } return int(intval), nil diff --git a/metricbeat/module/system/filesystem/_meta/docs.asciidoc b/metricbeat/module/system/filesystem/_meta/docs.asciidoc index 94e97806448..691f9943e31 100644 --- a/metricbeat/module/system/filesystem/_meta/docs.asciidoc +++ b/metricbeat/module/system/filesystem/_meta/docs.asciidoc @@ -17,7 +17,7 @@ not be collected from filesystems matching these types. This setting also affects the `fsstats` metricset. If this option is not set, metricbeat ignores all types for virtual devices in systems where this information is available (e.g. all types marked as `nodev` in `/proc/filesystems` in Linux systems). This can be set to an empty list (`[]`) -to make filebeat report all filesystems, regardless of type. +to make metricbeat report all filesystems, regardless of type. [float] === Filtering diff --git a/metricbeat/module/system/load/load.go b/metricbeat/module/system/load/load.go index c674a5dc819..a28571124f1 100644 --- a/metricbeat/module/system/load/load.go +++ b/metricbeat/module/system/load/load.go @@ -20,7 +20,7 @@ package load import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" @@ -52,7 +52,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(r mb.ReporterV2) error { load, err := cpu.Load() if err != nil { - return errors.Wrap(err, "failed to get CPU load values") + return fmt.Errorf("failed to get CPU load values: %w", err) } avgs := load.Averages() diff --git a/metricbeat/module/system/network/network.go b/metricbeat/module/system/network/network.go index 1e153d082a5..97700ba66f5 100644 --- a/metricbeat/module/system/network/network.go +++ b/metricbeat/module/system/network/network.go @@ -20,6 +20,8 @@ package network import ( + "fmt" + "math" "strings" "github.com/elastic/beats/v7/metricbeat/mb" @@ -27,7 +29,6 @@ import ( "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" - "github.com/pkg/errors" "github.com/shirou/gopsutil/v3/net" ) @@ -43,16 +44,17 @@ func init() { // MetricSet for fetching system network IO metrics. type MetricSet struct { mb.BaseMetricSet - interfaces map[string]struct{} - prevCounters networkCounter + interfaces map[string]struct{} + prevInterfaceCounter map[string]networkCounter + currentGaugeCounter map[string]networkCounter } // networkCounter stores previous network counter values for calculating gauges in next collection type networkCounter struct { - prevNetworkInBytes uint64 - prevNetworkInPackets uint64 - prevNetworkOutBytes uint64 - prevNetworkOutPackets uint64 + NetworkInBytes uint64 + NetworkInPackets uint64 + NetworkOutBytes uint64 + NetworkOutPackets uint64 } // New is a mb.MetricSetFactory that returns a new MetricSet. @@ -76,9 +78,10 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { } return &MetricSet{ - BaseMetricSet: base, - interfaces: interfaceSet, - prevCounters: networkCounter{}, + BaseMetricSet: base, + interfaces: interfaceSet, + prevInterfaceCounter: map[string]networkCounter{}, + currentGaugeCounter: map[string]networkCounter{}, }, nil } @@ -86,11 +89,9 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(r mb.ReporterV2) error { stats, err := net.IOCounters(true) if err != nil { - return errors.Wrap(err, "network io counters") + return fmt.Errorf("network io counters: %w", err) } - var networkInBytes, networkOutBytes, networkInPackets, networkOutPackets uint64 - for _, counters := range stats { if m.interfaces != nil { // Select stats by interface name. @@ -104,30 +105,60 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { MetricSetFields: ioCountersToMapStr(counters), }) - // accumulate values from all interfaces - networkInBytes += counters.BytesRecv - networkOutBytes += counters.BytesSent - networkInPackets += counters.PacketsRecv - networkOutPackets += counters.PacketsSent + // sum the values at a per-interface level + // Makes us less likely to overload a value somewhere. + prevCounters, ok := m.prevInterfaceCounter[counters.Name] + if !ok { + m.prevInterfaceCounter[counters.Name] = networkCounter{ + NetworkInBytes: counters.BytesRecv, + NetworkInPackets: counters.PacketsRecv, + NetworkOutBytes: counters.BytesSent, + NetworkOutPackets: counters.PacketsSent, + } + continue + } + // create current set of gauges + currentDiff := networkCounter{ + NetworkInBytes: createGaugeWithRollover(counters.BytesRecv, prevCounters.NetworkInBytes), + NetworkInPackets: createGaugeWithRollover(counters.PacketsRecv, prevCounters.NetworkInPackets), + NetworkOutBytes: createGaugeWithRollover(counters.BytesSent, prevCounters.NetworkOutBytes), + NetworkOutPackets: createGaugeWithRollover(counters.PacketsSent, prevCounters.NetworkOutPackets), + } + + m.currentGaugeCounter[counters.Name] = currentDiff + + m.prevInterfaceCounter[counters.Name] = networkCounter{ + NetworkInBytes: counters.BytesRecv, + NetworkInPackets: counters.PacketsRecv, + NetworkOutBytes: counters.BytesSent, + NetworkOutPackets: counters.PacketsSent, + } if !isOpen { return nil } } - if m.prevCounters != (networkCounter{}) { - // convert network metrics from counters to gauges + if len(m.currentGaugeCounter) != 0 { + + var totalNetworkInBytes, totalNetworkInPackets, totalNetworkOutBytes, totalNetworkOutPackets uint64 + for _, iface := range m.currentGaugeCounter { + totalNetworkInBytes += iface.NetworkInBytes + totalNetworkInPackets += iface.NetworkInPackets + totalNetworkOutBytes += iface.NetworkOutBytes + totalNetworkOutPackets += iface.NetworkOutPackets + } r.Event(mb.Event{ RootFields: mapstr.M{ "host": mapstr.M{ "network": mapstr.M{ "ingress": mapstr.M{ - "bytes": networkInBytes - m.prevCounters.prevNetworkInBytes, - "packets": networkInPackets - m.prevCounters.prevNetworkInPackets, + "bytes": totalNetworkInBytes, + "packets": totalNetworkInPackets, }, "egress": mapstr.M{ - "bytes": networkOutBytes - m.prevCounters.prevNetworkOutBytes, - "packets": networkOutPackets - m.prevCounters.prevNetworkOutPackets, + "bytes": totalNetworkOutBytes, + "packets": totalNetworkOutPackets, }, }, }, @@ -135,15 +166,43 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { }) } - // update prevCounters - m.prevCounters.prevNetworkInBytes = networkInBytes - m.prevCounters.prevNetworkInPackets = networkInPackets - m.prevCounters.prevNetworkOutBytes = networkOutBytes - m.prevCounters.prevNetworkOutPackets = networkOutPackets - return nil } +// Create a gauged difference between two numbers, taking into account rollover that might happen, and the current number might be lower. +// The /proc/net/dev interface is defined in net/core/net-procfs.c, +// where it prints the data from rtnl_link_stats64 defined in uapi/linux/if_link.h. +// There's an extra bit of logic here: the underlying network device object in the kernel, net_device, +// can define either ndo_get_stats64() or ndo_get_stats() as a metrics callback, with the latter returning an unsigned long (32 bit) set of metrics. +// See dev_get_stats() in net/core/dev.c for context. The exact implementation depends upon the network driver. +// For example, the tg3 network driver used by the broadcom network controller on my dev machine +// uses 64 bit metrics, defined in the drivers/net/ethernet/broadcom/tg3.h, +// with the ndo_get_stats64() callback defined in net/ethernet/broadcom/tg3.c. +// Long story short, we can't be completely sure if we're rolling over at max_u32 or max_u64. +// if the previous value was > max_u32, do math assuming we've rolled over at max_u64. +// On windows: This uses GetIfEntry: https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_if_row2 which uses ulong64. +// On Darwin we just call netstat. +// I'm assuming rollover behavior is similar. +func createGaugeWithRollover(current uint64, prev uint64) uint64 { + // base case: no rollover + if current >= prev { + return current - prev + } + + // case: rollover + // case: we rolled over at 64 bits + if prev > math.MaxUint32 { + debugf("Warning: Rollover 64 bit gauge detected. Current value: %d, previous: %d", current, prev) + remaining := math.MaxUint64 - prev + return current + remaining + 1 // the +1 counts the actual "rollover" increment. + } + // case: we rolled over at 32 bits + debugf("Warning: Rollover 32 bit gauge detected. Current value: %d, previous: %d", current, prev) + remaining := math.MaxUint32 - prev + return current + remaining + 1 + +} + func ioCountersToMapStr(counters net.IOCountersStat) mapstr.M { return mapstr.M{ "name": counters.Name, diff --git a/metricbeat/module/system/network/network_test.go b/metricbeat/module/system/network/network_test.go index 9337ff36d0c..a986d989e2e 100644 --- a/metricbeat/module/system/network/network_test.go +++ b/metricbeat/module/system/network/network_test.go @@ -20,10 +20,16 @@ package network import ( + "math" + "os" + "path/filepath" + "runtime" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/elastic/beats/v7/metricbeat/mb" mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" _ "github.com/elastic/beats/v7/metricbeat/module/system" ) @@ -40,6 +46,162 @@ func TestFetch(t *testing.T) { events[0].BeatEvent("system", "network").Fields.StringToPrint()) } +func TestNormalHostMetrics(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skip("test requires linux") + } + basePath, err := os.Getwd() + require.NoError(t, err) + + reporter := mbtest.NewReportingMetricSetV2Error(t, getConfig()) + + firstProc := filepath.Join(basePath, "/tests/testdata/proc/") + err = os.Setenv("HOST_PROC", firstProc) + require.NoError(t, err) + + // get initial metrics + _, errs := mbtest.ReportingFetchV2Error(reporter) + require.Empty(t, errs) + + // second event + secondProc := filepath.Join(basePath, "/tests/testdata2/proc/") + err = os.Setenv("HOST_PROC", secondProc) + require.NoError(t, err) + + events, errs := mbtest.ReportingFetchV2Error(reporter) + require.Empty(t, errs) + found, evt := findRootEvent(events) + require.True(t, found) + + t.Logf("second event: %+v", evt.RootFields.StringToPrint()) + + // check second values + ingressBytes, err := evt.RootFields.GetValue("host.network.ingress.bytes") + require.NoError(t, err) + require.Equal(t, uint64(110), ingressBytes) + + ingressPackets, err := evt.RootFields.GetValue("host.network.ingress.packets") + require.NoError(t, err) + require.Equal(t, uint64(200000), ingressPackets) + + egressPackets, err := evt.RootFields.GetValue("host.network.egress.packets") + require.NoError(t, err) + require.Equal(t, uint64(200001), egressPackets) + + egressBytes, err := evt.RootFields.GetValue("host.network.egress.bytes") + require.NoError(t, err) + require.Equal(t, uint64(30100), egressBytes) +} + +func TestRollover(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skip("test requires linux") + } + basePath, err := os.Getwd() + require.NoError(t, err) + + reporter := mbtest.NewReportingMetricSetV2Error(t, getConfig()) + + firstProc := filepath.Join(basePath, "/tests/rollover/proc/") + err = os.Setenv("HOST_PROC", firstProc) + require.NoError(t, err) + + _, errs := mbtest.ReportingFetchV2Error(reporter) + require.Empty(t, errs) + + secondProc := filepath.Join(basePath, "/tests/rollover2/proc/") + err = os.Setenv("HOST_PROC", secondProc) + require.NoError(t, err) + + events, errs := mbtest.ReportingFetchV2Error(reporter) + require.Empty(t, errs) + found, evt := findRootEvent(events) + require.True(t, found) + + t.Logf("second event: %+v", evt.RootFields.StringToPrint()) + + ingressBytes, err := evt.RootFields.GetValue("host.network.ingress.bytes") + require.NoError(t, err) + require.Equal(t, uint64(601), ingressBytes) + + egressBytes, err := evt.RootFields.GetValue("host.network.egress.bytes") + require.NoError(t, err) + require.Equal(t, uint64(902), egressBytes) +} + +func TestRollover32(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skip("test requires linux") + } + basePath, err := os.Getwd() + require.NoError(t, err) + + reporter := mbtest.NewReportingMetricSetV2Error(t, getConfig()) + + firstProc := filepath.Join(basePath, "/tests/rollover32/proc/") + err = os.Setenv("HOST_PROC", firstProc) + require.NoError(t, err) + + _, errs := mbtest.ReportingFetchV2Error(reporter) + require.Empty(t, errs) + + secondProc := filepath.Join(basePath, "/tests/rollover32_2/proc/") + err = os.Setenv("HOST_PROC", secondProc) + require.NoError(t, err) + + events, errs := mbtest.ReportingFetchV2Error(reporter) + require.Empty(t, errs) + found, evt := findRootEvent(events) + require.True(t, found) + + t.Logf("second event: %+v", evt.RootFields.StringToPrint()) + + egressBytes, err := evt.RootFields.GetValue("host.network.egress.bytes") + require.NoError(t, err) + require.Equal(t, uint64(3037888886), egressBytes) + + ingressBytes, err := evt.RootFields.GetValue("host.network.ingress.bytes") + require.NoError(t, err) + require.Equal(t, uint64(1101), ingressBytes) +} + +func TestGauge(t *testing.T) { + var prevu32 uint64 = math.MaxUint32 - 10 + var currentu32 uint64 = 10 + + resultu32 := createGaugeWithRollover(currentu32, prevu32) + require.Equal(t, uint64(21), resultu32) + + var prevNoRollover uint64 = 347458374592 + var currentNoRollover = prevNoRollover + 3452 + resultNoRollover := createGaugeWithRollover(currentNoRollover, prevNoRollover) + require.Equal(t, uint64(3452), resultNoRollover) + + var prevu64 uint64 = math.MaxUint64 - 5000 + var currentu64 uint64 = 32 + resultu64 := createGaugeWithRollover(currentu64, prevu64) + require.Equal(t, uint64(5033), resultu64) +} + +func TestGaugeRolloverIncrement(t *testing.T) { + // test to see if we're properly incrementing when we rollover + // i.e do we count the increment from MAX_INT to 0? + var prevU64 uint64 = math.MaxUint64 + current := uint64(0) + + resultu32 := createGaugeWithRollover(current, prevU64) + require.Equal(t, uint64(1), resultu32) +} + +func findRootEvent(events []mb.Event) (bool, mb.Event) { + for _, evt := range events { + if len(evt.RootFields) > 0 { + return true, evt + } + } + return false, mb.Event{} +} + func TestData(t *testing.T) { f := mbtest.NewReportingMetricSetV2Error(t, getConfig()) err := mbtest.WriteEventsReporterV2Error(f, t, ".") diff --git a/metricbeat/module/system/network/tests/rollover/proc/net/dev b/metricbeat/module/system/network/tests/rollover/proc/net/dev new file mode 100644 index 00000000000..9615d2be029 --- /dev/null +++ b/metricbeat/module/system/network/tests/rollover/proc/net/dev @@ -0,0 +1,19 @@ +Inter-| Receive | Transmit + face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed + lo: 1755060487 7078052 0 0 0 0 0 0 1755090578 7178052 0 0 0 0 0 0 + eno1: 19784529479 34542897 0 10 0 0 0 8323480 18446744073709550915 22626696 0 0 0 0 0 0 + eno2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + eno3: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + eno4: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +br-b47241fe51cb: 5120 82 0 0 0 0 0 13 2740 26 0 0 0 0 0 0 +docker0: 12103120 25910 0 0 0 0 0 0 22016528 36782 0 0 0 0 0 0 +veth9b054b1: 0 0 0 0 0 0 0 0 39236 509 0 0 0 0 0 0 +vboxnet0: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +vboxnet1: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +vboxnet2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +br-35e03ac12f61: 89956121 334556 0 0 0 0 0 0 52243734 140194 0 0 0 0 0 0 +vethb4fe756: 936385 1467 0 0 0 0 0 0 434283 1377 0 0 0 0 0 0 +vethdb936ec: 6617151565 13069604 0 0 0 0 0 0 21159216788 17045495 0 0 0 0 0 0 +vethad845c5: 11372445599 12688613 0 0 0 0 0 0 3863963160 9807427 0 0 0 0 0 0 +veth1bb2863: 2309909319 2277661 0 0 0 0 0 0 1590377742 1850698 0 0 0 0 0 0 +vethf0ccf73: 18446744073709551115 2681033 0 0 0 0 0 0 1257078810 2119145 0 0 0 0 0 0 \ No newline at end of file diff --git a/metricbeat/module/system/network/tests/rollover2/proc/net/dev b/metricbeat/module/system/network/tests/rollover2/proc/net/dev new file mode 100644 index 00000000000..096271c2c55 --- /dev/null +++ b/metricbeat/module/system/network/tests/rollover2/proc/net/dev @@ -0,0 +1,19 @@ +Inter-| Receive | Transmit + face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed + lo: 1755060487 7078052 0 0 0 0 0 0 1755090579 7178052 0 0 0 0 0 0 + eno1: 19784529479 34542897 0 10 0 0 0 8323480 200 22626696 0 0 0 0 0 0 + eno2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + eno3: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + eno4: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +br-b47241fe51cb: 5120 82 0 0 0 0 0 13 2740 26 0 0 0 0 0 0 +docker0: 12103120 25910 0 0 0 0 0 0 22016528 36782 0 0 0 0 0 0 +veth9b054b1: 0 0 0 0 0 0 0 0 39236 509 0 0 0 0 0 0 +vboxnet0: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +vboxnet1: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +vboxnet2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +br-35e03ac12f61: 89956121 334556 0 0 0 0 0 0 52243734 140194 0 0 0 0 0 0 +vethb4fe756: 936385 1467 0 0 0 0 0 0 434283 1377 0 0 0 0 0 0 +vethdb936ec: 6617151565 13069604 0 0 0 0 0 0 21159216788 17045495 0 0 0 0 0 0 +vethad845c5: 11372445599 12688613 0 0 0 0 0 0 3863963160 9807427 0 0 0 0 0 0 +veth1bb2863: 2309909319 2277661 0 0 0 0 0 0 1590377742 1850698 0 0 0 0 0 0 +vethf0ccf73: 100 2681033 0 0 0 0 0 0 1257078810 2119145 0 0 0 0 0 0 \ No newline at end of file diff --git a/metricbeat/module/system/network/tests/rollover32/proc/net/dev b/metricbeat/module/system/network/tests/rollover32/proc/net/dev new file mode 100644 index 00000000000..9527149e7e6 --- /dev/null +++ b/metricbeat/module/system/network/tests/rollover32/proc/net/dev @@ -0,0 +1,19 @@ +Inter-| Receive | Transmit + face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed + lo: 1755060487 7078052 0 0 0 0 0 0 1755090578 7178052 0 0 0 0 0 0 + eno1: 19784529479 34542897 0 10 0 0 0 8323480 18446744073709550915 22626696 0 0 0 0 0 0 + eno2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + eno3: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + eno4: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +br-b47241fe51cb: 5120 82 0 0 0 0 0 13 2740 26 0 0 0 0 0 0 +docker0: 12103120 25910 0 0 0 0 0 0 22016528 36782 0 0 0 0 0 0 +veth9b054b1: 0 0 0 0 0 0 0 0 39236 509 0 0 0 0 0 0 +vboxnet0: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +vboxnet1: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +vboxnet2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +br-35e03ac12f61: 89956121 334556 0 0 0 0 0 0 52243734 140194 0 0 0 0 0 0 +vethb4fe756: 936385 1467 0 0 0 0 0 0 434283 1377 0 0 0 0 0 0 +vethdb936ec: 6617151565 13069604 0 0 0 0 0 0 21159216788 17045495 0 0 0 0 0 0 +vethad845c5: 11372445599 12688613 0 0 0 0 0 0 3863963160 9807427 0 0 0 0 0 0 +veth1bb2863: 2309909319 2277661 0 0 0 0 0 0 1590377742 1850698 0 0 0 0 0 0 +vethf0ccf73: 4294966595 2681033 0 0 0 0 0 0 1257078810 2119145 0 0 0 0 0 0 \ No newline at end of file diff --git a/metricbeat/module/system/network/tests/rollover32_2/proc/net/dev b/metricbeat/module/system/network/tests/rollover32_2/proc/net/dev new file mode 100644 index 00000000000..2dd56e0f092 --- /dev/null +++ b/metricbeat/module/system/network/tests/rollover32_2/proc/net/dev @@ -0,0 +1,19 @@ +Inter-| Receive | Transmit + face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed + lo: 1755060487 7078052 0 0 0 0 0 0 1755090578 7178052 0 0 0 0 0 0 + eno1: 19784529479 34542897 0 10 0 0 0 8323480 18446744073709550915 22626696 0 0 0 0 0 0 + eno2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + eno3: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + eno4: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +br-b47241fe51cb: 5120 82 0 0 0 0 0 13 2740 26 0 0 0 0 0 0 +docker0: 12103120 25910 0 0 0 0 0 0 22016528 36782 0 0 0 0 0 0 +veth9b054b1: 0 0 0 0 0 0 0 0 39236 509 0 0 0 0 0 0 +vboxnet0: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +vboxnet1: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +vboxnet2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +br-35e03ac12f61: 89956121 334556 0 0 0 0 0 0 52243734 140194 0 0 0 0 0 0 +vethb4fe756: 936385 1467 0 0 0 0 0 0 434283 1377 0 0 0 0 0 0 +vethdb936ec: 6617151565 13069604 0 0 0 0 0 0 21159216788 17045495 0 0 0 0 0 0 +vethad845c5: 11372445599 12688613 0 0 0 0 0 0 3863963160 9807427 0 0 0 0 0 0 +veth1bb2863: 2309909319 2277661 0 0 0 0 0 0 1590377742 1850698 0 0 0 0 0 0 +vethf0ccf73: 400 2681033 0 0 0 0 0 0 400 2119145 0 0 0 0 0 0 \ No newline at end of file diff --git a/metricbeat/module/system/network/tests/testdata/proc/net/dev b/metricbeat/module/system/network/tests/testdata/proc/net/dev new file mode 100644 index 00000000000..0134ed8b80f --- /dev/null +++ b/metricbeat/module/system/network/tests/testdata/proc/net/dev @@ -0,0 +1,19 @@ +Inter-| Receive | Transmit + face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed + lo: 1755060477 7078052 0 0 0 0 0 0 1755060477 7078052 0 0 0 0 0 0 + eno1: 19784529479 34542897 0 10 0 0 0 8323480 23923097145 22626696 0 0 0 0 0 0 + eno2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + eno3: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + eno4: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +br-b47241fe51cb: 5120 82 0 0 0 0 0 13 2740 26 0 0 0 0 0 0 +docker0: 12103120 25910 0 0 0 0 0 0 22016528 36782 0 0 0 0 0 0 +veth9b054b1: 0 0 0 0 0 0 0 0 39236 509 0 0 0 0 0 0 +vboxnet0: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +vboxnet1: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +vboxnet2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +br-35e03ac12f61: 89956121 134556 0 0 0 0 0 0 52243734 140194 0 0 0 0 0 0 +vethb4fe756: 936385 1467 0 0 0 0 0 0 434283 1377 0 0 0 0 0 0 +vethdb936ec: 6617151565 13069604 0 0 0 0 0 0 21159216788 17045495 0 0 0 0 0 0 +vethad845c5: 11372445599 12688613 0 0 0 0 0 0 3863963160 9807427 0 0 0 0 0 0 +veth1bb2863: 2309909319 2277661 0 0 0 0 0 0 1590377742 1850698 0 0 0 0 0 0 +vethf0ccf73: 7610217646 2681033 0 0 0 0 0 0 1257078810 2019144 0 0 0 0 0 0 diff --git a/metricbeat/module/system/network/tests/testdata2/proc/net/dev b/metricbeat/module/system/network/tests/testdata2/proc/net/dev new file mode 100644 index 00000000000..069d2f08756 --- /dev/null +++ b/metricbeat/module/system/network/tests/testdata2/proc/net/dev @@ -0,0 +1,19 @@ +Inter-| Receive | Transmit + face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed + lo: 1755060487 7078052 0 0 0 0 0 0 1755090577 7178052 0 0 0 0 0 0 + eno1: 19784529479 34542897 0 10 0 0 0 8323480 23923097145 22626696 0 0 0 0 0 0 + eno2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + eno3: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + eno4: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +br-b47241fe51cb: 5120 82 0 0 0 0 0 13 2740 26 0 0 0 0 0 0 +docker0: 12103120 25910 0 0 0 0 0 0 22016528 36782 0 0 0 0 0 0 +veth9b054b1: 0 0 0 0 0 0 0 0 39236 509 0 0 0 0 0 0 +vboxnet0: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +vboxnet1: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +vboxnet2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +br-35e03ac12f61: 89956121 334556 0 0 0 0 0 0 52243734 140194 0 0 0 0 0 0 +vethb4fe756: 936385 1467 0 0 0 0 0 0 434283 1377 0 0 0 0 0 0 +vethdb936ec: 6617151565 13069604 0 0 0 0 0 0 21159216788 17045495 0 0 0 0 0 0 +vethad845c5: 11372445599 12688613 0 0 0 0 0 0 3863963160 9807427 0 0 0 0 0 0 +veth1bb2863: 2309909319 2277661 0 0 0 0 0 0 1590377742 1850698 0 0 0 0 0 0 +vethf0ccf73: 7610217746 2681033 0 0 0 0 0 0 1257078810 2119145 0 0 0 0 0 0 \ No newline at end of file diff --git a/metricbeat/module/system/network/tests/testdata3/proc/net/dev b/metricbeat/module/system/network/tests/testdata3/proc/net/dev new file mode 100644 index 00000000000..2fd838117be --- /dev/null +++ b/metricbeat/module/system/network/tests/testdata3/proc/net/dev @@ -0,0 +1,31 @@ +Inter-| Receive | Transmit + face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed + lo: 7963008922 5463536 0 0 0 0 0 0 7963008922 5463536 0 0 0 0 0 0 + eth0: 398444176031 113433103 0 0 0 0 0 0 101889838567 106565184 0 0 0 0 0 0 +docker0: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +veth6670df22: 514618373 1547898 0 0 0 0 0 0 528358917 1539922 0 0 0 0 0 0 +veth46fce54b: 1372847132 13810810 0 0 0 0 0 0 68795908814 15049988 0 0 0 0 0 0 +veth80b394bf: 381102744 4399867 0 0 0 0 0 0 3323567373 4212968 0 0 0 0 0 0 +veth3fac0d3a: 1069716043 3369370 0 0 0 0 0 0 11282617637 3689656 0 0 0 0 0 0 +veth3b200419: 308 4 0 0 0 0 0 0 446 5 0 0 0 0 0 0 +veth44b2ac6c: 36323 457 0 0 0 0 0 0 2815590 505 0 0 0 0 0 0 +vethc4bb14c8: 35702 429 0 0 0 0 0 0 2827840 464 0 0 0 0 0 0 +veth93f91757: 18808 200 0 0 0 0 0 0 2646887 251 0 0 0 0 0 0 +vetha7cff99a: 20128 195 0 0 0 0 0 0 2641971 249 0 0 0 0 0 0 +veth2c664383: 21595 231 0 0 0 0 0 0 2703706 262 0 0 0 0 0 0 +veth1305edb0: 16621 195 0 0 0 0 0 0 2627343 226 0 0 0 0 0 0 +veth23fc0f56: 628 8 0 0 0 0 0 0 446 5 0 0 0 0 0 0 +veth65891994: 308 4 0 0 0 0 0 0 446 5 0 0 0 0 0 0 +vethdc791306: 389968462 723281 0 0 0 0 0 0 350535082 731321 0 0 0 0 0 0 +vethdbb1e8cc: 966415949 3412980 0 0 0 0 0 0 3786100854 3417295 0 0 0 0 0 0 +veth08cafe8e: 1488277219 959331 0 0 0 0 0 0 151128472 610027 0 0 0 0 0 0 +vethc48fd2d7: 7901294175 2354094 0 0 0 0 0 0 2915337605 2345844 0 0 0 0 0 0 +veth2bf4c62f: 547312101 1007781 0 0 0 0 0 0 150262077 1166200 0 0 0 0 0 0 +veth93d9e375: 5800146319 4086288 0 0 0 0 0 0 2372657603 3670868 0 0 0 0 0 0 +veth0228c277: 4522851970 1217721 0 0 0 0 0 0 92417022 876822 0 0 0 0 0 0 +veth2f280795: 3026042574 2295286 0 0 0 0 0 0 14061850234 2758346 0 0 0 0 0 0 +vetha9cf24aa: 6695686090 2813131 0 0 0 0 0 0 2402989575 2645630 0 0 0 0 0 0 +veth613a14c9: 3158414047 2131687 0 0 0 0 0 0 1973949394 1925351 0 0 0 0 0 0 +vethe584bff8: 3499555388 2158277 0 0 0 0 0 0 854125987 2034534 0 0 0 0 0 0 +veth9d517f8c: 7897675838 9099992 0 0 0 0 0 0 3335685281 7343157 0 0 0 0 0 0 +veth8e6b8c88: 7869425980 2383121 0 0 0 0 0 0 2933011516 2376166 0 0 0 0 0 0 \ No newline at end of file diff --git a/metricbeat/module/system/network_summary/network_summary.go b/metricbeat/module/system/network_summary/network_summary.go index 92a48d23b61..757ed56b267 100644 --- a/metricbeat/module/system/network_summary/network_summary.go +++ b/metricbeat/module/system/network_summary/network_summary.go @@ -18,7 +18,8 @@ package network_summary import ( - "github.com/pkg/errors" + "errors" + "fmt" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/metricbeat/mb" @@ -64,7 +65,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(report mb.ReporterV2) error { counterInfo, err := fetchNetStats() if err != nil { - return errors.Wrap(err, "Error fetching stats") + return fmt.Errorf("Error fetching stats: %w", err) } if counterInfo == nil { return errors.New("NetworkCounters not available on this platform") @@ -81,13 +82,13 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { func fetchNetStats() (*sysinfotypes.NetworkCountersInfo, error) { h, err := sysinfo.Host() if err != nil { - return nil, errors.Wrap(err, "failed to read self process information") + return nil, fmt.Errorf("failed to read self process information: %w", err) } if vmstatHandle, ok := h.(sysinfotypes.NetworkCounters); ok { info, err := vmstatHandle.NetworkCounters() if err != nil { - return nil, errors.Wrap(err, "error getting network counters") + return nil, fmt.Errorf("error getting network counters: %w", err) } return info, nil } diff --git a/metricbeat/module/system/process/process.go b/metricbeat/module/system/process/process.go index 6256f90f742..2274f1846d6 100644 --- a/metricbeat/module/system/process/process.go +++ b/metricbeat/module/system/process/process.go @@ -20,11 +20,10 @@ package process import ( + "fmt" "os" "runtime" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" "github.com/elastic/elastic-agent-libs/logp" @@ -104,7 +103,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(r mb.ReporterV2) error { procs, roots, err := m.stats.Get() if err != nil { - return errors.Wrap(err, "process stats") + return fmt.Errorf("process stats: %w", err) } for evtI := range procs { diff --git a/metricbeat/module/system/raid/blockinfo/getdev.go b/metricbeat/module/system/raid/blockinfo/getdev.go index bb2bae080ee..02527c80636 100644 --- a/metricbeat/module/system/raid/blockinfo/getdev.go +++ b/metricbeat/module/system/raid/blockinfo/getdev.go @@ -22,15 +22,13 @@ import ( "io/ioutil" "os" "path/filepath" - - "github.com/pkg/errors" ) // ListAll lists all the multi-disk devices in a RAID array func ListAll(path string) ([]MDDevice, error) { dir, err := ioutil.ReadDir(path) if err != nil { - return nil, errors.Wrap(err, "could not read directory") + return nil, fmt.Errorf("could not read directory: %w", err) } var mds []MDDevice for _, item := range dir { @@ -40,7 +38,7 @@ func ListAll(path string) ([]MDDevice, error) { } dev, err := getMDDevice(testpath) if err != nil { - return nil, errors.Wrap(err, "could not get device info") + return nil, fmt.Errorf("could not get device info: %w", err) } mds = append(mds, dev) } @@ -56,7 +54,7 @@ func ListAll(path string) ([]MDDevice, error) { func getMDDevice(path string) (MDDevice, error) { _, err := os.Stat(path) if err != nil { - return MDDevice{}, errors.Wrap(err, "path does not exist") + return MDDevice{}, fmt.Errorf("path does not exist: %w", err) } //This is the best heuristic I've found so far for identifying an md device. diff --git a/metricbeat/module/system/raid/blockinfo/parser.go b/metricbeat/module/system/raid/blockinfo/parser.go index 7c517b6f534..3763ed28ab6 100644 --- a/metricbeat/module/system/raid/blockinfo/parser.go +++ b/metricbeat/module/system/raid/blockinfo/parser.go @@ -24,8 +24,6 @@ import ( "strconv" "strings" - "github.com/pkg/errors" - "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" ) @@ -65,7 +63,7 @@ func parseIntVal(path string) (int64, error) { func getSyncStatus(path string, size int64) (SyncStatus, error) { raw, err := ioutil.ReadFile(filepath.Join(path, "md", "sync_completed")) if err != nil { - return SyncStatus{}, errors.Wrap(err, "could not open sync_completed") + return SyncStatus{}, fmt.Errorf("could not open sync_completed: %w", err) } completedState := strings.TrimSpace(string(raw)) if completedState == "none" { @@ -80,12 +78,12 @@ func getSyncStatus(path string, size int64) (SyncStatus, error) { current, err := strconv.ParseInt(matches[0], 10, 64) if err != nil { - return SyncStatus{}, errors.Wrap(err, "could not parse data sync_completed") + return SyncStatus{}, fmt.Errorf("could not parse data sync_completed: %w", err) } total, err := strconv.ParseInt(matches[1], 10, 64) if err != nil { - return SyncStatus{}, errors.Wrap(err, "could not parse data sync_completed") + return SyncStatus{}, fmt.Errorf("could not parse data sync_completed: %w", err) } return SyncStatus{Complete: current, Total: total}, nil @@ -99,27 +97,27 @@ func newMD(path string) (MDDevice, error) { dev.Name = filepath.Base(path) size, err := parseIntVal(filepath.Join(path, "size")) if err != nil { - return dev, errors.Wrap(err, "could not get device size") + return dev, fmt.Errorf("could not get device size: %w", err) } dev.Size = size //RAID array state state, err := ioutil.ReadFile(filepath.Join(path, "md", "array_state")) if err != nil { - return dev, errors.Wrap(err, "could not open array_state") + return dev, fmt.Errorf("could not open array_state: %w", err) } dev.ArrayState = strings.TrimSpace(string(state)) //get total disks disks, err := getDisks(path) if err != nil { - return dev, errors.Wrap(err, "could not get disk data") + return dev, fmt.Errorf("could not get disk data: %w", err) } dev.DiskStates = disks level, err := ioutil.ReadFile(filepath.Join(path, "md", "level")) if err != nil { - return dev, errors.Wrap(err, "could not get raid level") + return dev, fmt.Errorf("could not get raid level: %w", err) } dev.Level = strings.TrimSpace(string(level)) @@ -130,14 +128,14 @@ func newMD(path string) (MDDevice, error) { //Will be idle if nothing is going on syncAction, err := ioutil.ReadFile(filepath.Join(path, "md", "sync_action")) if err != nil { - return dev, errors.Wrap(err, "could not open sync_action") + return dev, fmt.Errorf("could not open sync_action: %w", err) } dev.SyncAction = strings.TrimSpace(string(syncAction)) //sync status syncStats, err := getSyncStatus(path, dev.Size) if err != nil { - return dev, errors.Wrap(err, "error getting sync data") + return dev, fmt.Errorf("error getting sync data: %w", err) } dev.SyncStatus = syncStats @@ -151,7 +149,7 @@ func getDisks(path string) (DiskStates, error) { //so far, haven't found a less hacky way to do this. devices, err := filepath.Glob(filepath.Join(path, "md", "dev-*")) if err != nil { - return DiskStates{}, errors.Wrap(err, "could not get device list") + return DiskStates{}, fmt.Errorf("could not get device list: %w", err) } var disks DiskStates @@ -191,7 +189,7 @@ func getDisks(path string) (DiskStates, error) { func getDisk(path string) (string, error) { state, err := ioutil.ReadFile(filepath.Join(path, "state")) if err != nil { - return "", errors.Wrap(err, "error getting disk state") + return "", fmt.Errorf("error getting disk state: %w", err) } return strings.TrimSpace(string(state)), nil diff --git a/metricbeat/module/system/raid/raid.go b/metricbeat/module/system/raid/raid.go index 238b067fd6f..191027657d7 100644 --- a/metricbeat/module/system/raid/raid.go +++ b/metricbeat/module/system/raid/raid.go @@ -18,7 +18,7 @@ package raid import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" @@ -62,7 +62,7 @@ func blockto1024(b int64) int64 { func (m *MetricSet) Fetch(r mb.ReporterV2) error { devices, err := blockinfo.ListAll(m.mod.ResolveHostFS("/sys/block")) if err != nil { - return errors.Wrap(err, "failed to parse sysfs") + return fmt.Errorf("failed to parse sysfs: %w", err) } for _, blockDev := range devices { diff --git a/metricbeat/module/system/service/data.go b/metricbeat/module/system/service/data.go index 1597beb6052..cb2329a7307 100644 --- a/metricbeat/module/system/service/data.go +++ b/metricbeat/module/system/service/data.go @@ -20,10 +20,10 @@ package service import ( + "fmt" "time" "github.com/coreos/go-systemd/v22/dbus" - "github.com/pkg/errors" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/elastic-agent-libs/mapstr" @@ -63,7 +63,7 @@ type Properties struct { func formProperties(unit dbus.UnitStatus, props Properties) (mb.Event, error) { timeSince, err := timeSince(props, unit.ActiveState) if err != nil { - return mb.Event{}, errors.Wrap(err, "error getting timestamp") + return mb.Event{}, fmt.Errorf("error getting timestamp: %w", err) } event := mb.Event{ diff --git a/metricbeat/module/system/service/service.go b/metricbeat/module/system/service/service.go index 6a19c8b828c..f46e8c0287a 100644 --- a/metricbeat/module/system/service/service.go +++ b/metricbeat/module/system/service/service.go @@ -20,11 +20,11 @@ package service import ( + "fmt" "path/filepath" "github.com/coreos/go-systemd/v22/dbus" "github.com/mitchellh/mapstructure" - "github.com/pkg/errors" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/metricbeat/mb" @@ -67,12 +67,12 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { conn, err := dbus.New() if err != nil { - return nil, errors.Wrap(err, "error connecting to dbus") + return nil, fmt.Errorf("error connecting to dbus: %w", err) } unitFunction, err := instrospectForUnitMethods() if err != nil { - return nil, errors.Wrap(err, "error finding ListUnits Method") + return nil, fmt.Errorf("error finding ListUnits Method: %w", err) } return &MetricSet{ @@ -90,7 +90,7 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { units, err := m.unitList(m.conn, m.cfg.StateFilter, m.cfg.PatternFilter) if err != nil { - return errors.Wrap(err, "error getting list of running units") + return fmt.Errorf("error getting list of running units: %w", err) } for _, unit := range units { @@ -134,11 +134,11 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { func getProps(conn *dbus.Conn, unit string) (Properties, error) { rawProps, err := conn.GetAllProperties(unit) if err != nil { - return Properties{}, errors.Wrap(err, "error getting list of running units") + return Properties{}, fmt.Errorf("error getting list of running units: %w", err) } parsed := Properties{} if err := mapstructure.Decode(rawProps, &parsed); err != nil { - return parsed, errors.Wrap(err, "error decoding properties") + return parsed, fmt.Errorf("error decoding properties: %w", err) } return parsed, nil } diff --git a/metricbeat/module/system/socket/socket.go b/metricbeat/module/system/socket/socket.go index 1d6b1dc65f1..a0828d69b4f 100644 --- a/metricbeat/module/system/socket/socket.go +++ b/metricbeat/module/system/socket/socket.go @@ -27,8 +27,6 @@ import ( "strconv" "syscall" - "github.com/pkg/errors" - sock "github.com/elastic/beats/v7/metricbeat/helper/socket" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" @@ -119,7 +117,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { sockets, err := m.netlink.GetSocketList() if err != nil { - return errors.Wrap(err, "failed requesting socket dump") + return fmt.Errorf("failed requesting socket dump: %w", err) } debugf("netlink returned %d sockets", len(sockets)) diff --git a/metricbeat/module/system/socket/socket_test.go b/metricbeat/module/system/socket/socket_test.go index 63b8712550b..3d27844f022 100644 --- a/metricbeat/module/system/socket/socket_test.go +++ b/metricbeat/module/system/socket/socket_test.go @@ -27,7 +27,6 @@ import ( "strings" "testing" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -153,7 +152,7 @@ func TestFetch(t *testing.T) { func getRequiredValue(t testing.TB, key string, m mapstr.M) interface{} { v, err := m.GetValue(key) if err != nil { - t.Fatal(errors.Wrapf(err, "failed to get value for key '%s'", key)) + t.Fatal(fmt.Errorf("failed to get value for key '%s': %w", key, err)) } if v == nil { t.Fatalf("key %v not found in %v", key, m) diff --git a/metricbeat/module/system/socket_summary/socket_summary.go b/metricbeat/module/system/socket_summary/socket_summary.go index 57d4312fad9..5301887413a 100644 --- a/metricbeat/module/system/socket_summary/socket_summary.go +++ b/metricbeat/module/system/socket_summary/socket_summary.go @@ -18,9 +18,9 @@ package socket_summary import ( + "fmt" "syscall" - "github.com/pkg/errors" "github.com/shirou/gopsutil/v3/net" "github.com/elastic/beats/v7/metricbeat/mb" @@ -155,7 +155,7 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { conns, err := connections("inet") if err != nil { - return errors.Wrap(err, "error getting connections") + return fmt.Errorf("error getting connections: %w", err) } stats := calculateConnStats(conns) diff --git a/metricbeat/module/system/socket_summary/sockstat_linux.go b/metricbeat/module/system/socket_summary/sockstat_linux.go index b198f322ff3..965cea3bd14 100644 --- a/metricbeat/module/system/socket_summary/sockstat_linux.go +++ b/metricbeat/module/system/socket_summary/sockstat_linux.go @@ -24,7 +24,6 @@ import ( "fmt" "os" - "github.com/pkg/errors" "github.com/shirou/gopsutil/v3/net" "github.com/elastic/elastic-agent-libs/mapstr" @@ -66,7 +65,7 @@ func applyEnhancements(data mapstr.M, sys resolve.Resolver) (mapstr.M, error) { stat, err := parseSockstat(dir) if err != nil { - return nil, errors.Wrap(err, "error getting sockstat data") + return nil, fmt.Errorf("error getting sockstat data: %w", err) } data.Put("tcp.all.orphan", stat.TCPOrphan) data.Put("tcp.memory", pageSize*stat.TCPMem) @@ -113,7 +112,7 @@ func parseSockstat(path string) (SockStat, error) { txt := scanner.Text() count, err := fmt.Sscanf(txt, scanfLines[iter], scanfOut[iter]...) if err != nil { - return ss, errors.Wrap(err, "error reading sockstat") + return ss, fmt.Errorf("error reading sockstat: %w", err) } if count != len(scanfOut[iter]) { return ss, fmt.Errorf("did not match fields in line %s", scanfLines[iter]) @@ -123,7 +122,7 @@ func parseSockstat(path string) (SockStat, error) { } if err = scanner.Err(); err != nil { - return ss, errors.Wrap(err, "error in scan") + return ss, fmt.Errorf("error in scan: %w", err) } return ss, nil diff --git a/metricbeat/module/system/uptime/uptime.go b/metricbeat/module/system/uptime/uptime.go index fa5640b92c0..20a03bdc13c 100644 --- a/metricbeat/module/system/uptime/uptime.go +++ b/metricbeat/module/system/uptime/uptime.go @@ -20,7 +20,7 @@ package uptime import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" @@ -49,7 +49,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(r mb.ReporterV2) error { var uptime sigar.Uptime if err := uptime.Get(); err != nil { - return errors.Wrap(err, "failed to get uptime") + return fmt.Errorf("failed to get uptime: %w", err) } r.Event(mb.Event{ diff --git a/metricbeat/module/traefik/health/health.go b/metricbeat/module/traefik/health/health.go index ec528d83a98..26861d7b32a 100644 --- a/metricbeat/module/traefik/health/health.go +++ b/metricbeat/module/traefik/health/health.go @@ -18,7 +18,7 @@ package health import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -66,7 +66,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { data, err := m.http.FetchJSON() if err != nil { - return errors.Wrap(err, "failed to sample health") + return fmt.Errorf("failed to sample health: %w", err) } metricSetFields, _ := eventMapping(data) diff --git a/metricbeat/module/uwsgi/status/data.go b/metricbeat/module/uwsgi/status/data.go index 6c690db50f9..23dd353290e 100644 --- a/metricbeat/module/uwsgi/status/data.go +++ b/metricbeat/module/uwsgi/status/data.go @@ -19,8 +19,7 @@ package status import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/elastic-agent-libs/mapstr" @@ -81,7 +80,7 @@ func eventsMapping(content []byte, reporter mb.ReporterV2) error { var stats uwsgiStat err := json.Unmarshal(content, &stats) if err != nil { - return errors.Wrap(err, "uwsgi statistics parsing failed") + return fmt.Errorf("uwsgi statistics parsing failed: %w", err) } totalRequests := 0 diff --git a/metricbeat/module/uwsgi/status/status.go b/metricbeat/module/uwsgi/status/status.go index e343ada60c6..8ade1df3a8d 100644 --- a/metricbeat/module/uwsgi/status/status.go +++ b/metricbeat/module/uwsgi/status/status.go @@ -18,6 +18,7 @@ package status import ( + "errors" "fmt" "io" "io/ioutil" @@ -26,8 +27,6 @@ import ( "net/url" "strings" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/uwsgi" "github.com/elastic/elastic-agent-libs/mapstr" @@ -55,8 +54,7 @@ func fetchStatData(URL string) ([]byte, error) { u, err := url.Parse(URL) if err != nil { - - return nil, errors.Wrap(err, "parsing uwsgi stats url failed") + return nil, fmt.Errorf("parsing uwsgi stats url failed: %w", err) } switch u.Scheme { @@ -93,7 +91,7 @@ func fetchStatData(URL string) ([]byte, error) { data, err := ioutil.ReadAll(reader) if err != nil { - return nil, errors.Wrap(err, "uwsgi data read failed") + return nil, fmt.Errorf("uwsgi data read failed: %w", err) } return data, nil diff --git a/metricbeat/module/vsphere/_meta/Dockerfile b/metricbeat/module/vsphere/_meta/Dockerfile index d003155c7f7..f54e001b936 100644 --- a/metricbeat/module/vsphere/_meta/Dockerfile +++ b/metricbeat/module/vsphere/_meta/Dockerfile @@ -1,7 +1,7 @@ ARG VSPHERE_GOLANG_VERSION -FROM golang:${VSPHERE_GOLANG_VERSION}-alpine +FROM golang:1.20.7 -RUN apk add --no-cache curl git +RUN apt-get install curl git RUN go install github.com/vmware/govmomi/vcsim@v0.30.4 HEALTHCHECK --interval=1s --retries=60 --timeout=10s CMD curl http://localhost:8989/ diff --git a/metricbeat/module/vsphere/datastore/datastore.go b/metricbeat/module/vsphere/datastore/datastore.go index 0afba2c7ca8..4ec84b4b74e 100644 --- a/metricbeat/module/vsphere/datastore/datastore.go +++ b/metricbeat/module/vsphere/datastore/datastore.go @@ -19,8 +19,7 @@ package datastore import ( "context" - - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/vsphere" @@ -61,12 +60,12 @@ func (m *MetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) error { client, err := govmomi.NewClient(ctx, m.HostURL, m.Insecure) if err != nil { - return errors.Wrap(err, "error in NewClient") + return fmt.Errorf("error in NewClient: %w", err) } defer func() { if err := client.Logout(ctx); err != nil { - m.Logger().Debug(errors.Wrap(err, "error trying to logout from vshphere")) + m.Logger().Debug(fmt.Errorf("error trying to logout from vshphere: %w", err)) } }() @@ -77,19 +76,19 @@ func (m *MetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) error { v, err := mgr.CreateContainerView(ctx, c.ServiceContent.RootFolder, []string{"Datastore"}, true) if err != nil { - return errors.Wrap(err, "error in CreateContainerView") + return fmt.Errorf("error in CreateContainerView: %w", err) } defer func() { if err := v.Destroy(ctx); err != nil { - m.Logger().Debug(errors.Wrap(err, "error trying to destroy view from vshphere")) + m.Logger().Debug(fmt.Errorf("error trying to destroy view from vshphere: %w", err)) } }() // Retrieve summary property for all datastores var dst []mo.Datastore if err = v.Retrieve(ctx, []string{"Datastore"}, []string{"summary"}, &dst); err != nil { - return errors.Wrap(err, "error in Retrieve") + return fmt.Errorf("error in Retrieve: %w", err) } for _, ds := range dst { diff --git a/metricbeat/module/vsphere/host/host.go b/metricbeat/module/vsphere/host/host.go index b299d6b83b0..457d4a65452 100644 --- a/metricbeat/module/vsphere/host/host.go +++ b/metricbeat/module/vsphere/host/host.go @@ -19,11 +19,10 @@ package host import ( "context" + "errors" "fmt" "strings" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/vsphere" "github.com/elastic/elastic-agent-libs/mapstr" @@ -66,12 +65,12 @@ func (m *MetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) error { client, err := govmomi.NewClient(ctx, m.HostURL, m.Insecure) if err != nil { - return errors.Wrap(err, "error in NewClient") + return fmt.Errorf("error in NewClient: %w", err) } defer func() { if err := client.Logout(ctx); err != nil { - m.Logger().Debug(errors.Wrap(err, "error trying to logout from vshphere")) + m.Logger().Debug(fmt.Errorf("error trying to logout from vshphere: %w", err)) } }() @@ -82,12 +81,12 @@ func (m *MetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) error { v, err := mgr.CreateContainerView(ctx, c.ServiceContent.RootFolder, []string{"HostSystem"}, true) if err != nil { - return errors.Wrap(err, "error in CreateContainerView") + return fmt.Errorf("error in CreateContainerView: %w", err) } defer func() { if err := v.Destroy(ctx); err != nil { - m.Logger().Debug(errors.Wrap(err, "error trying to destroy view from vshphere")) + m.Logger().Debug(fmt.Errorf("error trying to destroy view from vshphere: %w", err)) } }() @@ -95,7 +94,7 @@ func (m *MetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) error { var hst []mo.HostSystem err = v.Retrieve(ctx, []string{"HostSystem"}, []string{"summary"}, &hst) if err != nil { - return errors.Wrap(err, "error in Retrieve") + return fmt.Errorf("error in Retrieve: %w", err) } for _, hs := range hst { diff --git a/metricbeat/module/vsphere/virtualmachine/virtualmachine.go b/metricbeat/module/vsphere/virtualmachine/virtualmachine.go index 546a40fdbfb..fd1a20ee3a7 100644 --- a/metricbeat/module/vsphere/virtualmachine/virtualmachine.go +++ b/metricbeat/module/vsphere/virtualmachine/virtualmachine.go @@ -19,6 +19,7 @@ package virtualmachine import ( "context" + "errors" "fmt" "strings" @@ -26,7 +27,6 @@ import ( "github.com/elastic/beats/v7/metricbeat/module/vsphere" "github.com/elastic/elastic-agent-libs/mapstr" - "github.com/pkg/errors" "github.com/vmware/govmomi" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/property" @@ -80,12 +80,12 @@ func (m *MetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) error { client, err := govmomi.NewClient(ctx, m.HostURL, m.Insecure) if err != nil { - return errors.Wrap(err, "error in NewClient") + return fmt.Errorf("error in NewClient: %w", err) } defer func() { if err := client.Logout(ctx); err != nil { - m.Logger().Debug(errors.Wrap(err, "error trying to logout from vshphere")) + m.Logger().Debug(fmt.Errorf("error trying to logout from vshphere: %w", err)) } }() @@ -97,7 +97,7 @@ func (m *MetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) error { var err error customFieldsMap, err = setCustomFieldsMap(ctx, c) if err != nil { - return errors.Wrap(err, "error in setCustomFieldsMap") + return fmt.Errorf("error in setCustomFieldsMap: %w", err) } } @@ -106,12 +106,12 @@ func (m *MetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) error { v, err := mgr.CreateContainerView(ctx, c.ServiceContent.RootFolder, []string{"VirtualMachine"}, true) if err != nil { - return errors.Wrap(err, "error in CreateContainerView") + return fmt.Errorf("error in CreateContainerView: %w", err) } defer func() { if err := v.Destroy(ctx); err != nil { - m.Logger().Debug(errors.Wrap(err, "error trying to destroy view from vshphere")) + m.Logger().Debug(fmt.Errorf("error trying to destroy view from vshphere: %w", err)) } }() @@ -119,7 +119,7 @@ func (m *MetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) error { var vmt []mo.VirtualMachine err = v.Retrieve(ctx, []string{"VirtualMachine"}, []string{"summary"}, &vmt) if err != nil { - return errors.Wrap(err, "error in Retrieve") + return fmt.Errorf("error in Retrieve: %w", err) } for _, vm := range vmt { @@ -278,11 +278,11 @@ func setCustomFieldsMap(ctx context.Context, client *vim25.Client) (map[int32]st customFieldsManager, err := object.GetCustomFieldsManager(client) if err != nil { - return nil, errors.Wrap(err, "failed to get custom fields manager") + return nil, fmt.Errorf("failed to get custom fields manager: %w", err) } field, err := customFieldsManager.Field(ctx) if err != nil { - return nil, errors.Wrap(err, "failed to get custom fields") + return nil, fmt.Errorf("failed to get custom fields: %w", err) } for _, def := range field { diff --git a/metricbeat/module/windows/perfmon/config.go b/metricbeat/module/windows/perfmon/config.go index 3544b88683f..f16c9c3b324 100644 --- a/metricbeat/module/windows/perfmon/config.go +++ b/metricbeat/module/windows/perfmon/config.go @@ -20,9 +20,9 @@ package perfmon import ( + "errors" + "fmt" "time" - - "github.com/pkg/errors" ) var allowedFormats = []string{"float", "large", "long"} @@ -63,7 +63,7 @@ func (counter *QueryCounter) InitDefaults() { func (counter *QueryCounter) Validate() error { if !isValidFormat(counter.Format) { - return errors.Errorf("initialization failed: format '%s' "+ + return fmt.Errorf("initialization failed: format '%s' "+ "for counter '%s' is invalid (must be float, large or long)", counter.Format, counter.Name) } diff --git a/metricbeat/module/windows/perfmon/data.go b/metricbeat/module/windows/perfmon/data.go index 2565ab11604..9add5c03896 100644 --- a/metricbeat/module/windows/perfmon/data.go +++ b/metricbeat/module/windows/perfmon/data.go @@ -25,8 +25,6 @@ import ( "strconv" "strings" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/helper/windows/pdh" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/elastic-agent-libs/logp" @@ -71,7 +69,7 @@ func (re *Reader) groupToEvents(counters map[string][]pdh.CounterValue) []mb.Eve if _, ok := eventMap[eventKey]; !ok { eventMap[eventKey] = &mb.Event{ MetricSetFields: mapstr.M{}, - Error: errors.Wrapf(val.Err.Error, "failed on query=%v", counterPath), + Error: fmt.Errorf("failed on query=%v: %w", counterPath, val.Err.Error), } if val.Instance != "" { // will ignore instance index diff --git a/metricbeat/module/windows/perfmon/perfmon.go b/metricbeat/module/windows/perfmon/perfmon.go index 084d9a7cc91..19f231383f1 100644 --- a/metricbeat/module/windows/perfmon/perfmon.go +++ b/metricbeat/module/windows/perfmon/perfmon.go @@ -20,7 +20,7 @@ package perfmon import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb/parse" @@ -48,7 +48,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { } reader, err := NewReader(config) if err != nil { - return nil, errors.Wrap(err, "initialization of reader failed") + return nil, fmt.Errorf("initialization of reader failed: %w", err) } return &MetricSet{ BaseMetricSet: base, @@ -69,12 +69,12 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { if m.reader.config.RefreshWildcardCounters { err := m.reader.RefreshCounterPaths() if err != nil { - return errors.Wrap(err, "failed retrieving counters") + return fmt.Errorf("failed retrieving counters: %w", err) } } events, err := m.reader.Read() if err != nil { - return errors.Wrap(err, "failed reading counters") + return fmt.Errorf("failed reading counters: %w", err) } for _, event := range events { @@ -91,7 +91,7 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { func (m *MetricSet) Close() error { err := m.reader.Close() if err != nil { - return errors.Wrap(err, "failed to close pdh query") + return fmt.Errorf("failed to close pdh query: %w", err) } return nil } diff --git a/metricbeat/module/windows/service/reader.go b/metricbeat/module/windows/service/reader.go index 11e5c901f17..379d4cb738f 100644 --- a/metricbeat/module/windows/service/reader.go +++ b/metricbeat/module/windows/service/reader.go @@ -22,10 +22,10 @@ package service import ( "crypto/sha256" "encoding/base64" + "fmt" "strconv" "syscall" - "github.com/pkg/errors" "golang.org/x/sys/windows/registry" "github.com/elastic/elastic-agent-libs/mapstr" @@ -53,7 +53,7 @@ type Reader struct { func NewReader() (*Reader, error) { handle, err := openSCManager("", "", ScManagerEnumerateService|ScManagerConnect) if err != nil { - return nil, errors.Wrap(err, "initialization failed") + return nil, fmt.Errorf("initialization failed: %w", err) } guid, err := getMachineGUID() @@ -151,12 +151,12 @@ func getMachineGUID() (string, error) { k, err := registry.OpenKey(key, path, registry.READ|registry.WOW64_64KEY) if err != nil { - return "", errors.Wrapf(err, `failed to open HKLM\%v`, path) + return "", fmt.Errorf(`failed to open HKLM\%v: %w`, path, err) } guid, _, err := k.GetStringValue(name) if err != nil { - return "", errors.Wrapf(err, `failed to get value of HKLM\%v\%v`, path, name) + return "", fmt.Errorf(`failed to get value of HKLM\%v\%v: %w`, path, name, err) } return guid, nil diff --git a/metricbeat/module/windows/service/service_status.go b/metricbeat/module/windows/service/service_status.go index f2a9ed853b1..571d324cb59 100644 --- a/metricbeat/module/windows/service/service_status.go +++ b/metricbeat/module/windows/service/service_status.go @@ -21,6 +21,7 @@ package service import ( "bytes" + "fmt" "os" "strconv" "syscall" @@ -30,7 +31,8 @@ import ( "github.com/elastic/beats/v7/libbeat/common" - "github.com/pkg/errors" + "errors" + "golang.org/x/sys/windows" "github.com/elastic/elastic-agent-libs/logp" @@ -146,7 +148,7 @@ func GetServiceStates(handle Handle, state ServiceEnumState, protectedServices m servicesBuffer = make([]byte, len(servicesBuffer)+int(bytesNeeded)) continue } - return nil, errors.Wrap(ServiceErrno(err.(syscall.Errno)), "error while calling the _EnumServicesStatusEx api") + return nil, fmt.Errorf("error while calling the _EnumServicesStatusEx api: %w", ServiceErrno(err.(syscall.Errno))) } break @@ -213,7 +215,7 @@ func getServiceInformation(rawService *EnumServiceStatusProcess, servicesBuffer serviceHandle, err := openServiceHandle(handle, service.ServiceName, ServiceQueryConfig) if err != nil { - return service, errors.Wrapf(err, "error while opening service %s", service.ServiceName) + return service, fmt.Errorf("error while opening service %s: %w", service.ServiceName, err) } defer closeHandle(serviceHandle) @@ -279,7 +281,7 @@ func getAdditionalServiceInfo(serviceHandle Handle, service *Status) error { buffer = make([]byte, len(buffer)+int(bytesNeeded)) continue } - return errors.Wrapf(ServiceErrno(err.(syscall.Errno)), "error while querying the service configuration %s", service.ServiceName) + return fmt.Errorf("error while querying the service configuration %s: %w", service.ServiceName, ServiceErrno(err.(syscall.Errno))) } serviceQueryConfig := (*QueryServiceConfig)(unsafe.Pointer(&buffer[0])) service.StartType = ServiceStartType(serviceQueryConfig.DwStartType) @@ -311,7 +313,7 @@ func getOptionalServiceInfo(serviceHandle Handle, service *Status) error { if service.StartType == StartTypeAutomatic { delayedInfoBuffer, err := queryServiceConfig2(serviceHandle, ConfigDelayedAutoStartInfo) if err != nil { - return errors.Wrapf(err, "error while querying rhe service configuration %s", service.ServiceName) + return fmt.Errorf("error while querying rhe service configuration %s: %w", service.ServiceName, err) } delayedInfo = (*serviceDelayedAutoStartInfo)(unsafe.Pointer(&delayedInfoBuffer[0])) diff --git a/metricbeat/module/zookeeper/mntr/mntr.go b/metricbeat/module/zookeeper/mntr/mntr.go index cd4e1000352..f08c7d36287 100644 --- a/metricbeat/module/zookeeper/mntr/mntr.go +++ b/metricbeat/module/zookeeper/mntr/mntr.go @@ -44,11 +44,11 @@ ZooKeeper mntr Command Output package mntr import ( + "fmt" + "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" "github.com/elastic/beats/v7/metricbeat/module/zookeeper" - - "github.com/pkg/errors" ) func init() { @@ -75,12 +75,12 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(r mb.ReporterV2) error { outputReader, err := zookeeper.RunCommand("mntr", m.Host(), m.Module().Config().Timeout) if err != nil { - return errors.Wrap(err, "mntr command failed") + return fmt.Errorf("mntr command failed: %w", err) } serverID, err := zookeeper.ServerID(m.Host(), m.Module().Config().Timeout) if err != nil { - return errors.Wrap(err, "error obtaining server id") + return fmt.Errorf("error obtaining server id: %w", err) } eventMapping(serverID, outputReader, r, m.Logger()) diff --git a/metricbeat/module/zookeeper/server/data.go b/metricbeat/module/zookeeper/server/data.go index 3619d018119..9d389abf5bf 100644 --- a/metricbeat/module/zookeeper/server/data.go +++ b/metricbeat/module/zookeeper/server/data.go @@ -20,13 +20,14 @@ package server import ( "bufio" "encoding/binary" + "fmt" "io" "regexp" "strconv" "strings" "time" - "github.com/pkg/errors" + "errors" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" @@ -69,7 +70,7 @@ func parseSrvr(i io.Reader, logger *logp.Logger) (mapstr.M, string, error) { if strings.Contains(line, "Zxid") { xid, err := parseZxid(line) if err != nil { - err = errors.Wrapf(err, "error parsing 'zxid' line '%s'", line) + err = fmt.Errorf("error parsing 'zxid' line '%s': %w", line, err) logger.Debug(err.Error()) continue } @@ -82,7 +83,7 @@ func parseSrvr(i io.Reader, logger *logp.Logger) (mapstr.M, string, error) { if strings.Contains(line, "Latency") { latency, err := parseLatencyLine(line) if err != nil { - err = errors.Wrapf(err, "error parsing 'latency values' line '%s'", line) + err = fmt.Errorf("error parsing 'latency values' line '%s': %w", line, err) logger.Debug(err.Error()) continue } @@ -95,7 +96,7 @@ func parseSrvr(i io.Reader, logger *logp.Logger) (mapstr.M, string, error) { if strings.Contains(line, "Proposal sizes") { proposalSizes, err := parseProposalSizes(line) if err != nil { - err = errors.Wrapf(err, "error parsing 'proposal sizes' line '%s'", line) + err = fmt.Errorf("error parsing 'proposal sizes' line '%s': %w", line, err) logger.Debug(err.Error()) continue } @@ -138,7 +139,7 @@ func parseSrvr(i io.Reader, logger *logp.Logger) (mapstr.M, string, error) { val, err := strconv.ParseInt(result[2], 10, 64) if err != nil { - err = errors.Wrapf(err, "error trying to parse value '%s' as int", result[2]) + err = fmt.Errorf("error trying to parse value '%s' as int: %w", result[2], err) logger.Debug(err.Error()) continue } @@ -155,16 +156,16 @@ func parseZxid(line string) (mapstr.M, error) { zxidSplit := strings.Split(line, " ") if len(zxidSplit) < 2 { - return nil, errors.Errorf("less than 2 tokens (%v) after splitting", zxidSplit) + return nil, fmt.Errorf("less than 2 tokens (%v) after splitting", zxidSplit) } zxidString := zxidSplit[1] if len(zxidString) < 3 { - return nil, errors.Errorf("less than 3 characters on '%s'", zxidString) + return nil, fmt.Errorf("less than 3 characters on '%s'", zxidString) } zxid, err := strconv.ParseInt(zxidString[2:], 16, 64) if err != nil { - return nil, errors.Wrapf(err, "error trying to parse value '%s' to int", zxidString[2:]) + return nil, fmt.Errorf("error trying to parse value '%s' to int: %w", zxidString[2:], err) } bs := make([]byte, 8) @@ -185,28 +186,28 @@ func parseProposalSizes(line string) (mapstr.M, error) { initialSplit := strings.Split(line, " ") if len(initialSplit) < 4 { - return nil, errors.Errorf("less than 4 tokens (%v) after splitting", initialSplit) + return nil, fmt.Errorf("less than 4 tokens (%v) after splitting", initialSplit) } values := strings.Split(initialSplit[3], "/") if len(values) < 3 { - return nil, errors.Errorf("less than 3 tokens (%v) after splitting", values) + return nil, fmt.Errorf("less than 3 tokens (%v) after splitting", values) } last, err := strconv.ParseInt(values[0], 10, 64) if err != nil { - return nil, errors.Wrapf(err, "error trying to parse 'last' value as int from '%s'", values[0]) + return nil, fmt.Errorf("error trying to parse 'last' value as int from '%s': %w", values[0], err) } output.Put("last", last) min, err := strconv.ParseInt(values[1], 10, 64) if err != nil { - return nil, errors.Wrapf(err, "error trying to parse 'min' value as int from '%s'", values[1]) + return nil, fmt.Errorf("error trying to parse 'min' value as int from '%s': %w", values[1], err) } output.Put("min", min) max, err := strconv.ParseInt(values[2], 10, 64) if err != nil { - return nil, errors.Wrapf(err, "error trying to parse 'max' value as int from '%s'", values[2]) + return nil, fmt.Errorf("error trying to parse 'max' value as int from '%s': %w", values[2], err) } output.Put("max", max) @@ -218,24 +219,24 @@ func parseLatencyLine(line string) (mapstr.M, error) { values := latencyCapturer.FindStringSubmatch(line) if len(values) < 4 { - return nil, errors.Errorf("less than 4 fields (%v) after splitting", values) + return nil, fmt.Errorf("less than 4 fields (%v) after splitting", values) } min, err := strconv.ParseInt(values[1], 10, 64) if err != nil { - return nil, errors.Wrapf(err, "error trying to parse 'min' value '%s' as int", values[1]) + return nil, fmt.Errorf("error trying to parse 'min' value '%s' as int: %w", values[1], err) } output.Put("min", min) avg, err := strconv.ParseInt(values[2], 10, 64) if err != nil { - return nil, errors.Wrapf(err, "error trying to parse 'avg' value '%s' as int", values[2]) + return nil, fmt.Errorf("error trying to parse 'avg' value '%s' as int: %w", values[2], err) } output.Put("avg", avg) max, err := strconv.ParseInt(values[3], 10, 64) if err != nil { - return nil, errors.Wrapf(err, "error trying to parse 'max' value '%s' as int", values[3]) + return nil, fmt.Errorf("error trying to parse 'max' value '%s' as int: %w", values[3], err) } output.Put("max", max) diff --git a/metricbeat/module/zookeeper/server/server.go b/metricbeat/module/zookeeper/server/server.go index 141556a5fb3..86dde3df443 100644 --- a/metricbeat/module/zookeeper/server/server.go +++ b/metricbeat/module/zookeeper/server/server.go @@ -39,7 +39,7 @@ Proposal sizes last/min/max: -3/-999/-1 package server import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" @@ -71,18 +71,18 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { outputReader, err := zookeeper.RunCommand("srvr", m.Host(), m.Module().Config().Timeout) if err != nil { - return errors.Wrap(err, "srvr command failed") + return fmt.Errorf("srvr command failed: %w", err) } metricsetFields, version, err := parseSrvr(outputReader, m.Logger()) if err != nil { - return errors.Wrap(err, "error parsing srvr output") + return fmt.Errorf("error parsing srvr output: %w", err) } serverID, err := zookeeper.ServerID(m.Host(), m.Module().Config().Timeout) if err != nil { - return errors.Wrap(err, "error obtaining server id") + return fmt.Errorf("error obtaining server id: %w", err) } event := mb.Event{ diff --git a/metricbeat/module/zookeeper/zookeeper.go b/metricbeat/module/zookeeper/zookeeper.go index 745080d7435..3c8c4573177 100644 --- a/metricbeat/module/zookeeper/zookeeper.go +++ b/metricbeat/module/zookeeper/zookeeper.go @@ -23,13 +23,13 @@ package zookeeper import ( "bufio" "bytes" + "errors" + "fmt" "io" "io/ioutil" "net" "strings" "time" - - "github.com/pkg/errors" ) // RunCommand establishes a TCP connection the ZooKeeper command port that @@ -39,7 +39,7 @@ import ( func RunCommand(command, address string, timeout time.Duration) (io.Reader, error) { conn, err := net.DialTimeout("tcp", address, timeout) if err != nil { - return nil, errors.Wrapf(err, "connection to host '%s' failed", address) + return nil, fmt.Errorf("connection to host '%s' failed: %w", address, err) } defer conn.Close() @@ -52,12 +52,12 @@ func RunCommand(command, address string, timeout time.Duration) (io.Reader, erro // Write four-letter command. _, err = conn.Write([]byte(command)) if err != nil { - return nil, errors.Wrapf(err, "writing command '%s' failed", command) + return nil, fmt.Errorf("writing command '%s' failed: %w", command, err) } result, err := ioutil.ReadAll(conn) if err != nil { - return nil, errors.Wrap(err, "read failed") + return nil, fmt.Errorf("read failed: %w", err) } return bytes.NewReader(result), nil @@ -67,7 +67,7 @@ func RunCommand(command, address string, timeout time.Duration) (io.Reader, erro func ServerID(address string, timeout time.Duration) (string, error) { response, err := RunCommand("conf", address, timeout) if err != nil { - return "", errors.Wrap(err, "execution of 'conf' command failed") + return "", fmt.Errorf("execution of 'conf' command failed: %w", err) } scanner := bufio.NewScanner(response) diff --git a/metricbeat/modules.d/aerospike.yml.disabled b/metricbeat/modules.d/aerospike.yml.disabled index a2fbdf83d2c..5294b90301e 100644 --- a/metricbeat/modules.d/aerospike.yml.disabled +++ b/metricbeat/modules.d/aerospike.yml.disabled @@ -1,5 +1,5 @@ # Module: aerospike -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-aerospike.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-aerospike.html - module: aerospike #metricsets: diff --git a/metricbeat/modules.d/apache.yml.disabled b/metricbeat/modules.d/apache.yml.disabled index 28e34fe429a..9c3adaa97d8 100644 --- a/metricbeat/modules.d/apache.yml.disabled +++ b/metricbeat/modules.d/apache.yml.disabled @@ -1,5 +1,5 @@ # Module: apache -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-apache.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-apache.html - module: apache #metricsets: diff --git a/metricbeat/modules.d/beat-xpack.yml.disabled b/metricbeat/modules.d/beat-xpack.yml.disabled index 0d254a465a1..98cd8c7edef 100644 --- a/metricbeat/modules.d/beat-xpack.yml.disabled +++ b/metricbeat/modules.d/beat-xpack.yml.disabled @@ -1,5 +1,5 @@ # Module: beat -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-beat.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-beat.html - module: beat xpack.enabled: true diff --git a/metricbeat/modules.d/beat.yml.disabled b/metricbeat/modules.d/beat.yml.disabled index af2907f77b4..cb26d83a5cf 100644 --- a/metricbeat/modules.d/beat.yml.disabled +++ b/metricbeat/modules.d/beat.yml.disabled @@ -1,5 +1,5 @@ # Module: beat -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-beat.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-beat.html - module: beat metricsets: diff --git a/metricbeat/modules.d/ceph-mgr.yml.disabled b/metricbeat/modules.d/ceph-mgr.yml.disabled index 84932d3f4c0..9d06114f79f 100644 --- a/metricbeat/modules.d/ceph-mgr.yml.disabled +++ b/metricbeat/modules.d/ceph-mgr.yml.disabled @@ -1,5 +1,5 @@ # Module: ceph -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-ceph.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-ceph.html - module: ceph metricsets: diff --git a/metricbeat/modules.d/ceph.yml.disabled b/metricbeat/modules.d/ceph.yml.disabled index 7e875b274bd..550ea8fe6ea 100644 --- a/metricbeat/modules.d/ceph.yml.disabled +++ b/metricbeat/modules.d/ceph.yml.disabled @@ -1,5 +1,5 @@ # Module: ceph -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-ceph.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-ceph.html - module: ceph #metricsets: diff --git a/metricbeat/modules.d/consul.yml.disabled b/metricbeat/modules.d/consul.yml.disabled index d9b9dc5085d..9344dd8c999 100644 --- a/metricbeat/modules.d/consul.yml.disabled +++ b/metricbeat/modules.d/consul.yml.disabled @@ -1,5 +1,5 @@ # Module: consul -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-consul.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-consul.html - module: consul metricsets: diff --git a/metricbeat/modules.d/couchbase.yml.disabled b/metricbeat/modules.d/couchbase.yml.disabled index fbb8a53b4ca..088f98b45c5 100644 --- a/metricbeat/modules.d/couchbase.yml.disabled +++ b/metricbeat/modules.d/couchbase.yml.disabled @@ -1,5 +1,5 @@ # Module: couchbase -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-couchbase.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-couchbase.html - module: couchbase #metricsets: diff --git a/metricbeat/modules.d/couchdb.yml.disabled b/metricbeat/modules.d/couchdb.yml.disabled index 265878fc9db..2a2eb9a5613 100644 --- a/metricbeat/modules.d/couchdb.yml.disabled +++ b/metricbeat/modules.d/couchdb.yml.disabled @@ -1,5 +1,5 @@ # Module: couchdb -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-couchdb.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-couchdb.html - module: couchdb metricsets: ["server"] diff --git a/metricbeat/modules.d/docker.yml.disabled b/metricbeat/modules.d/docker.yml.disabled index bf5950eb6e7..88af5d21288 100644 --- a/metricbeat/modules.d/docker.yml.disabled +++ b/metricbeat/modules.d/docker.yml.disabled @@ -1,5 +1,5 @@ # Module: docker -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-docker.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-docker.html - module: docker #metricsets: diff --git a/metricbeat/modules.d/dropwizard.yml.disabled b/metricbeat/modules.d/dropwizard.yml.disabled index 5baa6349452..1103a314d1d 100644 --- a/metricbeat/modules.d/dropwizard.yml.disabled +++ b/metricbeat/modules.d/dropwizard.yml.disabled @@ -1,5 +1,5 @@ # Module: dropwizard -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-dropwizard.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-dropwizard.html - module: dropwizard #metricsets: diff --git a/metricbeat/modules.d/elasticsearch-xpack.yml.disabled b/metricbeat/modules.d/elasticsearch-xpack.yml.disabled index c7f57b84f54..d89c8b5d29b 100644 --- a/metricbeat/modules.d/elasticsearch-xpack.yml.disabled +++ b/metricbeat/modules.d/elasticsearch-xpack.yml.disabled @@ -1,5 +1,5 @@ # Module: elasticsearch -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-elasticsearch.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-elasticsearch.html - module: elasticsearch xpack.enabled: true diff --git a/metricbeat/modules.d/elasticsearch.yml.disabled b/metricbeat/modules.d/elasticsearch.yml.disabled index 271f927e301..aadd41d5946 100644 --- a/metricbeat/modules.d/elasticsearch.yml.disabled +++ b/metricbeat/modules.d/elasticsearch.yml.disabled @@ -1,5 +1,5 @@ # Module: elasticsearch -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-elasticsearch.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-elasticsearch.html - module: elasticsearch #metricsets: diff --git a/metricbeat/modules.d/envoyproxy.yml.disabled b/metricbeat/modules.d/envoyproxy.yml.disabled index 67d638f0b48..ca75daff085 100644 --- a/metricbeat/modules.d/envoyproxy.yml.disabled +++ b/metricbeat/modules.d/envoyproxy.yml.disabled @@ -1,5 +1,5 @@ # Module: envoyproxy -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-envoyproxy.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-envoyproxy.html - module: envoyproxy #metricsets: diff --git a/metricbeat/modules.d/etcd.yml.disabled b/metricbeat/modules.d/etcd.yml.disabled index 5a6fa8cd179..5aa30fb86e7 100644 --- a/metricbeat/modules.d/etcd.yml.disabled +++ b/metricbeat/modules.d/etcd.yml.disabled @@ -1,5 +1,5 @@ # Module: etcd -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-etcd.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-etcd.html - module: etcd #metricsets: diff --git a/metricbeat/modules.d/golang.yml.disabled b/metricbeat/modules.d/golang.yml.disabled index 8bb65e090e1..9f9e5624fa3 100644 --- a/metricbeat/modules.d/golang.yml.disabled +++ b/metricbeat/modules.d/golang.yml.disabled @@ -1,5 +1,5 @@ # Module: golang -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-golang.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-golang.html - module: golang #metricsets: diff --git a/metricbeat/modules.d/graphite.yml.disabled b/metricbeat/modules.d/graphite.yml.disabled index 78f7c32b304..3354715923c 100644 --- a/metricbeat/modules.d/graphite.yml.disabled +++ b/metricbeat/modules.d/graphite.yml.disabled @@ -1,5 +1,5 @@ # Module: graphite -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-graphite.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-graphite.html - module: graphite #metricsets: diff --git a/metricbeat/modules.d/haproxy.yml.disabled b/metricbeat/modules.d/haproxy.yml.disabled index 2c61ee0c55d..e95f687253d 100644 --- a/metricbeat/modules.d/haproxy.yml.disabled +++ b/metricbeat/modules.d/haproxy.yml.disabled @@ -1,5 +1,5 @@ # Module: haproxy -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-haproxy.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-haproxy.html - module: haproxy #metricsets: diff --git a/metricbeat/modules.d/http.yml.disabled b/metricbeat/modules.d/http.yml.disabled index 0ce5b5c0f85..63ebd2ee093 100644 --- a/metricbeat/modules.d/http.yml.disabled +++ b/metricbeat/modules.d/http.yml.disabled @@ -1,5 +1,5 @@ # Module: http -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-http.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-http.html - module: http #metricsets: diff --git a/metricbeat/modules.d/jolokia.yml.disabled b/metricbeat/modules.d/jolokia.yml.disabled index 2190273485f..b58782353ec 100644 --- a/metricbeat/modules.d/jolokia.yml.disabled +++ b/metricbeat/modules.d/jolokia.yml.disabled @@ -1,5 +1,5 @@ # Module: jolokia -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-jolokia.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-jolokia.html - module: jolokia #metricsets: ["jmx"] diff --git a/metricbeat/modules.d/kafka.yml.disabled b/metricbeat/modules.d/kafka.yml.disabled index 1e0db5d517b..afafa7e5a4c 100644 --- a/metricbeat/modules.d/kafka.yml.disabled +++ b/metricbeat/modules.d/kafka.yml.disabled @@ -1,5 +1,5 @@ # Module: kafka -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-kafka.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-kafka.html # Kafka metrics collected using the Kafka protocol - module: kafka diff --git a/metricbeat/modules.d/kibana-xpack.yml.disabled b/metricbeat/modules.d/kibana-xpack.yml.disabled index dd6b4d939a2..91471a7c212 100644 --- a/metricbeat/modules.d/kibana-xpack.yml.disabled +++ b/metricbeat/modules.d/kibana-xpack.yml.disabled @@ -1,5 +1,5 @@ # Module: kibana -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-kibana.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-kibana.html - module: kibana xpack.enabled: true diff --git a/metricbeat/modules.d/kibana.yml.disabled b/metricbeat/modules.d/kibana.yml.disabled index 78f769cd65e..27ca4b1a05f 100644 --- a/metricbeat/modules.d/kibana.yml.disabled +++ b/metricbeat/modules.d/kibana.yml.disabled @@ -1,5 +1,5 @@ # Module: kibana -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-kibana.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-kibana.html - module: kibana #metricsets: diff --git a/metricbeat/modules.d/kubernetes.yml.disabled b/metricbeat/modules.d/kubernetes.yml.disabled index 02baebb8bb7..23bd210a835 100644 --- a/metricbeat/modules.d/kubernetes.yml.disabled +++ b/metricbeat/modules.d/kubernetes.yml.disabled @@ -1,5 +1,5 @@ # Module: kubernetes -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-kubernetes.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-kubernetes.html # Node metrics, from kubelet: - module: kubernetes diff --git a/metricbeat/modules.d/kvm.yml.disabled b/metricbeat/modules.d/kvm.yml.disabled index 8450e1afc6d..00e06354b0b 100644 --- a/metricbeat/modules.d/kvm.yml.disabled +++ b/metricbeat/modules.d/kvm.yml.disabled @@ -1,5 +1,5 @@ # Module: kvm -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-kvm.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-kvm.html - module: kvm #metricsets: diff --git a/metricbeat/modules.d/linux.yml.disabled b/metricbeat/modules.d/linux.yml.disabled index df7311017bf..2c28e8bcbd0 100644 --- a/metricbeat/modules.d/linux.yml.disabled +++ b/metricbeat/modules.d/linux.yml.disabled @@ -1,5 +1,5 @@ # Module: linux -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-linux.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-linux.html - module: linux period: 10s diff --git a/metricbeat/modules.d/logstash-xpack.yml.disabled b/metricbeat/modules.d/logstash-xpack.yml.disabled index db78289f2a8..b00f4479919 100644 --- a/metricbeat/modules.d/logstash-xpack.yml.disabled +++ b/metricbeat/modules.d/logstash-xpack.yml.disabled @@ -1,5 +1,5 @@ # Module: logstash -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-logstash.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-logstash.html - module: logstash xpack.enabled: true diff --git a/metricbeat/modules.d/logstash.yml.disabled b/metricbeat/modules.d/logstash.yml.disabled index 72ea8231ff4..90274a3c728 100644 --- a/metricbeat/modules.d/logstash.yml.disabled +++ b/metricbeat/modules.d/logstash.yml.disabled @@ -1,5 +1,5 @@ # Module: logstash -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-logstash.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-logstash.html - module: logstash #metricsets: diff --git a/metricbeat/modules.d/memcached.yml.disabled b/metricbeat/modules.d/memcached.yml.disabled index 7037988cc35..0df976bb0bf 100644 --- a/metricbeat/modules.d/memcached.yml.disabled +++ b/metricbeat/modules.d/memcached.yml.disabled @@ -1,5 +1,5 @@ # Module: memcached -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-memcached.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-memcached.html - module: memcached # metricsets: ["stats"] diff --git a/metricbeat/modules.d/mongodb.yml.disabled b/metricbeat/modules.d/mongodb.yml.disabled index 0d4c26be4a5..48705eae39f 100644 --- a/metricbeat/modules.d/mongodb.yml.disabled +++ b/metricbeat/modules.d/mongodb.yml.disabled @@ -1,5 +1,5 @@ # Module: mongodb -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-mongodb.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-mongodb.html - module: mongodb #metricsets: diff --git a/metricbeat/modules.d/munin.yml.disabled b/metricbeat/modules.d/munin.yml.disabled index d42b1d9919e..803d200561b 100644 --- a/metricbeat/modules.d/munin.yml.disabled +++ b/metricbeat/modules.d/munin.yml.disabled @@ -1,5 +1,5 @@ # Module: munin -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-munin.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-munin.html - module: munin #metricsets: diff --git a/metricbeat/modules.d/mysql.yml.disabled b/metricbeat/modules.d/mysql.yml.disabled index 2b3371b1890..2913f5af8bc 100644 --- a/metricbeat/modules.d/mysql.yml.disabled +++ b/metricbeat/modules.d/mysql.yml.disabled @@ -1,5 +1,5 @@ # Module: mysql -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-mysql.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-mysql.html - module: mysql #metricsets: diff --git a/metricbeat/modules.d/nats.yml.disabled b/metricbeat/modules.d/nats.yml.disabled index d398ac0be43..e1e751cdb49 100644 --- a/metricbeat/modules.d/nats.yml.disabled +++ b/metricbeat/modules.d/nats.yml.disabled @@ -1,5 +1,5 @@ # Module: nats -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-nats.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-nats.html - module: nats metricsets: diff --git a/metricbeat/modules.d/nginx.yml.disabled b/metricbeat/modules.d/nginx.yml.disabled index 786cc90edd6..40c3bea92e5 100644 --- a/metricbeat/modules.d/nginx.yml.disabled +++ b/metricbeat/modules.d/nginx.yml.disabled @@ -1,5 +1,5 @@ # Module: nginx -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-nginx.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-nginx.html - module: nginx #metricsets: diff --git a/metricbeat/modules.d/openmetrics.yml.disabled b/metricbeat/modules.d/openmetrics.yml.disabled index ad933acedad..bebd339a1a2 100644 --- a/metricbeat/modules.d/openmetrics.yml.disabled +++ b/metricbeat/modules.d/openmetrics.yml.disabled @@ -1,5 +1,5 @@ # Module: openmetrics -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-openmetrics.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-openmetrics.html - module: openmetrics metricsets: ['collector'] diff --git a/metricbeat/modules.d/php_fpm.yml.disabled b/metricbeat/modules.d/php_fpm.yml.disabled index 08aaa3cc957..0ca2ac5c1df 100644 --- a/metricbeat/modules.d/php_fpm.yml.disabled +++ b/metricbeat/modules.d/php_fpm.yml.disabled @@ -1,5 +1,5 @@ # Module: php_fpm -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-php_fpm.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-php_fpm.html - module: php_fpm #metricsets: diff --git a/metricbeat/modules.d/postgresql.yml.disabled b/metricbeat/modules.d/postgresql.yml.disabled index 14ee2fc7aca..fe2e5858dfb 100644 --- a/metricbeat/modules.d/postgresql.yml.disabled +++ b/metricbeat/modules.d/postgresql.yml.disabled @@ -1,5 +1,5 @@ # Module: postgresql -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-postgresql.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-postgresql.html - module: postgresql #metricsets: diff --git a/metricbeat/modules.d/prometheus.yml.disabled b/metricbeat/modules.d/prometheus.yml.disabled index 82f45573931..f829e3d89da 100644 --- a/metricbeat/modules.d/prometheus.yml.disabled +++ b/metricbeat/modules.d/prometheus.yml.disabled @@ -1,5 +1,5 @@ # Module: prometheus -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-prometheus.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-prometheus.html # Metrics collected from a Prometheus endpoint - module: prometheus diff --git a/metricbeat/modules.d/rabbitmq.yml.disabled b/metricbeat/modules.d/rabbitmq.yml.disabled index ed0d8159571..b6967556f83 100644 --- a/metricbeat/modules.d/rabbitmq.yml.disabled +++ b/metricbeat/modules.d/rabbitmq.yml.disabled @@ -1,5 +1,5 @@ # Module: rabbitmq -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-rabbitmq.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-rabbitmq.html - module: rabbitmq #metricsets: diff --git a/metricbeat/modules.d/redis.yml.disabled b/metricbeat/modules.d/redis.yml.disabled index 303ede6f47d..99a7288e5ee 100644 --- a/metricbeat/modules.d/redis.yml.disabled +++ b/metricbeat/modules.d/redis.yml.disabled @@ -1,5 +1,5 @@ # Module: redis -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-redis.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-redis.html - module: redis #metricsets: diff --git a/metricbeat/modules.d/system.yml b/metricbeat/modules.d/system.yml index 3c511e77439..4123ea00f33 100644 --- a/metricbeat/modules.d/system.yml +++ b/metricbeat/modules.d/system.yml @@ -1,5 +1,5 @@ # Module: system -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-system.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-system.html - module: system period: 10s diff --git a/metricbeat/modules.d/traefik.yml.disabled b/metricbeat/modules.d/traefik.yml.disabled index 35326a4ec4a..b186538f4e1 100644 --- a/metricbeat/modules.d/traefik.yml.disabled +++ b/metricbeat/modules.d/traefik.yml.disabled @@ -1,5 +1,5 @@ # Module: traefik -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-traefik.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-traefik.html - module: traefik metricsets: ["health"] diff --git a/metricbeat/modules.d/uwsgi.yml.disabled b/metricbeat/modules.d/uwsgi.yml.disabled index f758061b65a..7ac6322064c 100644 --- a/metricbeat/modules.d/uwsgi.yml.disabled +++ b/metricbeat/modules.d/uwsgi.yml.disabled @@ -1,5 +1,5 @@ # Module: uwsgi -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-uwsgi.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-uwsgi.html - module: uwsgi #metricsets: diff --git a/metricbeat/modules.d/vsphere.yml.disabled b/metricbeat/modules.d/vsphere.yml.disabled index c56a9b1ac33..874b3b5b2e8 100644 --- a/metricbeat/modules.d/vsphere.yml.disabled +++ b/metricbeat/modules.d/vsphere.yml.disabled @@ -1,5 +1,5 @@ # Module: vsphere -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-vsphere.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-vsphere.html - module: vsphere #metricsets: diff --git a/metricbeat/modules.d/windows.yml.disabled b/metricbeat/modules.d/windows.yml.disabled index 717e52655a2..afe1af59311 100644 --- a/metricbeat/modules.d/windows.yml.disabled +++ b/metricbeat/modules.d/windows.yml.disabled @@ -1,5 +1,5 @@ # Module: windows -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-windows.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-windows.html - module: windows metricsets: diff --git a/metricbeat/modules.d/zookeeper.yml.disabled b/metricbeat/modules.d/zookeeper.yml.disabled index 7d44efb938e..f8d16c527a6 100644 --- a/metricbeat/modules.d/zookeeper.yml.disabled +++ b/metricbeat/modules.d/zookeeper.yml.disabled @@ -1,5 +1,5 @@ # Module: zookeeper -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-zookeeper.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-zookeeper.html - module: zookeeper #metricsets: diff --git a/metricbeat/scripts/mage/docs_collector.go b/metricbeat/scripts/mage/docs_collector.go index b505db79a69..9d270905e6c 100644 --- a/metricbeat/scripts/mage/docs_collector.go +++ b/metricbeat/scripts/mage/docs_collector.go @@ -29,7 +29,6 @@ import ( "text/template" "github.com/magefile/mage/sh" - "github.com/pkg/errors" "gopkg.in/yaml.v2" "github.com/elastic/beats/v7/dev-tools/mage" @@ -61,12 +60,12 @@ type metricsetData struct { func writeTemplate(filename string, t *template.Template, args interface{}) error { fd, err := os.Create(filename) if err != nil { - return errors.Wrapf(err, "error opening file at %s", filename) + return fmt.Errorf("error opening file at %s: %w", filename, err) } defer fd.Close() err = t.Execute(fd, args) if err != nil { - return errors.Wrap(err, "error executing template") + return fmt.Errorf("error executing template: %w", err) } return nil } @@ -150,7 +149,7 @@ func getDefaultMetricsets() (map[string][]string, error) { for _, dir := range runpaths { rawMap, err := sh.OutCmd("go", append(cmd, dir)...)() if err != nil { - return nil, errors.Wrap(err, "Error running subcommand to get metricsets") + return nil, fmt.Errorf("Error running subcommand to get metricsets: %w", err) } var msetMap = make(map[string][]string) err = json.Unmarshal([]byte(rawMap), &msetMap) @@ -169,7 +168,7 @@ func getDefaultMetricsets() (map[string][]string, error) { func loadModuleFields(file string) (moduleData, error) { fd, err := ioutil.ReadFile(file) if err != nil { - return moduleData{}, errors.Wrap(err, "failed to read from spec file") + return moduleData{}, fmt.Errorf("failed to read from spec file: %w", err) } // Cheat and use the same struct. var mod []moduleData @@ -180,7 +179,7 @@ func loadModuleFields(file string) (moduleData, error) { rel, err := getRelease(module.Release) if err != nil { - return mod[0], errors.Wrapf(err, "file %s is missing a release string", file) + return mod[0], fmt.Errorf("file %s is missing a release string: %w", file, err) } module.Release = rel @@ -191,7 +190,7 @@ func loadModuleFields(file string) (moduleData, error) { func getReleaseState(metricsetPath string) (string, error) { raw, err := ioutil.ReadFile(metricsetPath) if err != nil { - return "", errors.Wrap(err, "failed to read from spec file") + return "", fmt.Errorf("failed to read from spec file: %w", err) } type metricset struct { @@ -204,7 +203,7 @@ func getReleaseState(metricsetPath string) (string, error) { relString, err := getRelease(rel[0].Release) if err != nil { - return "", errors.Wrapf(err, "metricset %s is missing a release tag", metricsetPath) + return "", fmt.Errorf("metricset %s is missing a release tag: %w", metricsetPath, err) } return relString, nil } @@ -298,7 +297,7 @@ func gatherMetricsets(modulePath string, moduleName string, defaultMetricSets [] func gatherData(modules []string) ([]moduleData, error) { defmset, err := getDefaultMetricsets() if err != nil { - return nil, errors.Wrap(err, "error getting default metricsets") + return nil, fmt.Errorf("error getting default metricsets: %w", err) } moduleList := make([]moduleData, 0) //iterate over all the modules, checking to make sure we have an asciidoc file @@ -377,7 +376,7 @@ func writeMetricsetDocs(modules []moduleData, t *template.Template) error { filename := mage.OSSBeatDir("docs", "modules", mod.Base, fmt.Sprintf("%s.asciidoc", metricset.Title)) err := writeTemplate(filename, t.Lookup("metricsetDoc.tmpl"), modData) if err != nil { - return errors.Wrapf(err, "error opening file at %s", filename) + return fmt.Errorf("error opening file at %s: %w", filename, err) } } // end metricset loop } // end module loop @@ -403,25 +402,25 @@ func writeDocs(modules []moduleData) error { tmplList := template.New("moduleList").Option("missingkey=error").Funcs(funcMap) beatPath, err := mage.ElasticBeatsDir() if err != nil { - return errors.Wrap(err, "error finding beats dir") + return fmt.Errorf("error finding beats dir: %w", err) } tmplList, err = tmplList.ParseGlob(path.Join(beatPath, "metricbeat/scripts/mage/template/*.tmpl")) if err != nil { - return errors.Wrap(err, "error parsing template files") + return fmt.Errorf("error parsing template files: %w", err) } err = writeModuleDocs(modules, tmplList) if err != nil { - return errors.Wrap(err, "error writing module docs") + return fmt.Errorf("error writing module docs: %w", err) } err = writeMetricsetDocs(modules, tmplList) if err != nil { - return errors.Wrap(err, "error writing metricset docs") + return fmt.Errorf("error writing metricset docs: %w", err) } err = writeModuleList(modules, tmplList) if err != nil { - return errors.Wrap(err, "error writing module list") + return fmt.Errorf("error writing module list: %w", err) } return nil diff --git a/metricbeat/scripts/mage/package.go b/metricbeat/scripts/mage/package.go index 6ba347bf6f8..e206881dd3c 100644 --- a/metricbeat/scripts/mage/package.go +++ b/metricbeat/scripts/mage/package.go @@ -26,7 +26,6 @@ import ( "strings" "github.com/magefile/mage/mg" - "github.com/pkg/errors" devtools "github.com/elastic/beats/v7/dev-tools/mage" ) @@ -58,7 +57,7 @@ func CustomizePackaging() { Modules: true, Dep: func(spec devtools.PackageSpec) error { if err := devtools.Copy(dirModulesDGenerated, spec.MustExpand("{{.PackageDir}}/modules.d")); err != nil { - return errors.Wrap(err, "failed to copy modules.d dir") + return fmt.Errorf("failed to copy modules.d dir: %w", err) } return devtools.FindReplace( @@ -73,7 +72,7 @@ func CustomizePackaging() { err := devtools.Copy("metricbeat.reference.yml", spec.MustExpand("{{.PackageDir}}/metricbeat.reference.yml")) if err != nil { - return errors.Wrap(err, "failed to copy reference config") + return fmt.Errorf("failed to copy reference config: %w", err) } return devtools.FindReplace( @@ -96,7 +95,7 @@ func CustomizePackaging() { case devtools.Deb, devtools.RPM: args.Spec.Files["/etc/{{.BeatName}}/"+modulesDTarget] = modulesD default: - panic(errors.Errorf("unhandled package type: %v", pkgType)) + panic(fmt.Errorf("unhandled package type: %v", pkgType)) } } } @@ -171,7 +170,7 @@ func GenerateDirModulesD() error { docBranch, err := devtools.BeatDocBranch() if err != nil { - errors.Wrap(err, "failed to get doc branch") + return fmt.Errorf("failed to get doc branch: %w", err) } mode := 0644 @@ -297,21 +296,24 @@ func moduleConfigParts(f string) (moduleName string, configName string, ok bool) func copyWithHeader(header, src, dst string, mode os.FileMode) error { dstFile, err := os.OpenFile(devtools.CreateDir(dst), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, mode&os.ModePerm) if err != nil { - return errors.Wrap(err, "failed to open copy destination") + return fmt.Errorf("failed to open copy destination: %w", err) } defer dstFile.Close() _, err = io.WriteString(dstFile, header+"\n\n") if err != nil { - return errors.Wrap(err, "failed to write header") + return fmt.Errorf("failed to write header: %w", err) } srcFile, err := os.Open(src) if err != nil { - return errors.Wrap(err, "failed to open copy source") + return fmt.Errorf("failed to open copy source: %w", err) } defer srcFile.Close() _, err = io.Copy(dstFile, srcFile) - return errors.Wrap(err, "failed to copy file") + if err != nil { + return fmt.Errorf("failed to copy file: %w", err) + } + return nil } diff --git a/metricbeat/scripts/module/metricset/metricset.go.tmpl b/metricbeat/scripts/module/metricset/metricset.go.tmpl index b79ff2f19f2..37e4a7b6b4d 100644 --- a/metricbeat/scripts/module/metricset/metricset.go.tmpl +++ b/metricbeat/scripts/module/metricset/metricset.go.tmpl @@ -8,7 +8,7 @@ import ( // init registers the MetricSet with the central registry as soon as the program // starts. The New function will be called later to instantiate an instance of -// the MetricSet for each host defined in the module's configuration. After the +// the MetricSet for each host is defined in the module's configuration. After the // MetricSet has been created then Fetch will begin to be called periodically. func init() { mb.Registry.MustAddMetricSet("{module}", "{metricset}", New) @@ -39,7 +39,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { }, nil } -// Fetch methods implements the data gathering and data conversion to the right +// Fetch method implements the data gathering and data conversion to the right // format. It publishes the event which is then forwarded to the output. In case // of an error set the Error field of mb.Event or simply call report.Error(). func (m *MetricSet) Fetch(report mb.ReporterV2) error { diff --git a/packetbeat/Dockerfile b/packetbeat/Dockerfile index 4d6e1f522e0..11384f61683 100644 --- a/packetbeat/Dockerfile +++ b/packetbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.19.10 +FROM golang:1.19.12 RUN \ apt-get update \ @@ -12,7 +12,7 @@ RUN \ && rm -rf /var/lib/apt/lists/* # Use a virtualenv to avoid the PEP668 "externally managed environment" error caused by conflicts -# with the system Python installation. golang:1.19.10 uses Debian 12 which now enforces PEP668. +# with the system Python installation. golang:1.20.6 uses Debian 12 which now enforces PEP668. ENV VIRTUAL_ENV=/opt/venv RUN python3 -m venv $VIRTUAL_ENV ENV PATH="$VIRTUAL_ENV/bin:$PATH" diff --git a/packetbeat/_meta/config/beat.reference.yml.tmpl b/packetbeat/_meta/config/beat.reference.yml.tmpl index 3a6319390d0..649ec0e8dee 100644 --- a/packetbeat/_meta/config/beat.reference.yml.tmpl +++ b/packetbeat/_meta/config/beat.reference.yml.tmpl @@ -15,7 +15,7 @@ # to sniff on the device carrying the default route. packetbeat.interfaces.device: {{ call .device .GOOS }} -# The network CIDR blocks that are considered "internal" networks for +# The network CIDR blocks are considered "internal" networks for # the purpose of network perimeter boundary classification. The valid # values for internal_networks are the same as those that can be used # with processor network conditions. @@ -39,7 +39,7 @@ packetbeat.interfaces.internal_networks: #packetbeat.interfaces.snaplen: 65535 # The maximum size of the shared memory buffer to use between the kernel and -# user space. A bigger buffer usually results in lower CPU usage, but consumes +# user space. A bigger buffer usually results in lower CPU usage but consumes # more memory. This setting is only available for the af_packet sniffer type. # The default is 30 MB. #packetbeat.interfaces.buffer_size_mb: 30 @@ -58,23 +58,23 @@ packetbeat.interfaces.internal_networks: # The value must be between 0 and 65535. By default, no value is set. # # This is only available on Linux and requires using `type: af_packet`. Each process -# must be running in same network namespace. All processes must use the same +# must be running in the same network namespace. All processes must use the same # interface settings. You must take responsibility for running multiple instances # of Packetbeat. #packetbeat.interfaces.fanout_group: ~ # Packetbeat automatically generates a BPF for capturing only the traffic on -# ports where it expects to find known protocols. Use this settings to tell +# ports where it expects to find known protocols. Use this setting to tell # Packetbeat to generate a BPF filter that accepts VLAN tags. #packetbeat.interfaces.with_vlans: true # Use this setting to override the automatically generated BPF filter. #packetbeat.interfaces.bpf_filter: -# With `auto_promisc_mode` Packetbeat puts interface in promiscuous mode automatically on startup. +# With `auto_promisc_mode` Packetbeat puts the interface in promiscuous mode automatically on startup. # This option does not work with `any` interface device. # The default option is false and requires manual set-up of promiscuous mode. -# Warning: under some circumstances (e.g beat crash) promiscuous mode +# Warning: under some circumstances (e.g., beat crash) promiscuous mode # can stay enabled even after beat is shut down. #packetbeat.interfaces.auto_promisc_mode: true @@ -132,7 +132,7 @@ packetbeat.protocols: # Default: false #parse_arguments: false - # Hide all methods relative to connection negotiation between server and + # Hide all methods relative to connection negotiation between the server and # client. # Default: true #hide_connection_information: true @@ -164,7 +164,7 @@ packetbeat.protocols: #send_request: true # If this option is enabled, the raw message of the response (`cassandra_request.request_headers` field) - # is included in published events. The default is true. enable `send_request` first before enable this option. + # is included in published events. The default is true. enable `send_request` first before enabling this option. #send_request_header: true # If this option is enabled, the raw message of the response (`cassandra_response` field) @@ -172,7 +172,7 @@ packetbeat.protocols: #send_response: true # If this option is enabled, the raw message of the response (`cassandra_response.response_headers` field) - # is included in published events. The default is true. enable `send_response` first before enable this option. + # is included in published events. The default is true. enable `send_response` first before enabling this option. #send_response_header: true # Set to true to publish fields with null values in events. @@ -239,8 +239,8 @@ packetbeat.protocols: # the HTTP protocol by commenting out the list of ports. ports: [80, 8080, 8000, 5000, 8002] - # Uncomment the following to hide certain parameters in URL or forms attached - # to HTTP requests. The names of the parameters are case insensitive. + # Uncomment the following to hide certain parameters in the URL or forms attached + # to HTTP requests. The names of the parameters are case-insensitive. # The value of the parameters will be replaced with the 'xxxxx' string. # This is generally useful for avoiding storing user passwords or other # sensitive information. diff --git a/packetbeat/_meta/config/windows_npcap.yml.tmpl b/packetbeat/_meta/config/windows_npcap.yml.tmpl index 62605c20250..23647cc6d01 100644 --- a/packetbeat/_meta/config/windows_npcap.yml.tmpl +++ b/packetbeat/_meta/config/windows_npcap.yml.tmpl @@ -8,6 +8,6 @@ #packetbeat.npcap: # # If a specific local version of Npcap is required, installation by packetbeat # # can be blocked by setting never_install to true. No action is taken if this -# # option is set to true. +# # option is set to true unless no Npcap is already installed. # never_install: false {{- end -}} diff --git a/packetbeat/beater/install_npcap.go b/packetbeat/beater/install_npcap.go index 370f7712e97..c1413fdb6d9 100644 --- a/packetbeat/beater/install_npcap.go +++ b/packetbeat/beater/install_npcap.go @@ -23,16 +23,23 @@ import ( "os" "path/filepath" "runtime" + "strings" + "sync" "time" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/packetbeat/npcap" + conf "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/logp" ) const installTimeout = 120 * time.Second -func installNpcap(b *beat.Beat) error { +// muInstall protects use of npcap.Installer. The only writes to npcap.Installer +// are here and during init in x-pack/packetbeat/npcap/npcap_windows.go +var muInstall sync.Mutex + +func installNpcap(b *beat.Beat, cfg *conf.C) error { if !b.Info.ElasticLicensed { return nil } @@ -54,19 +61,29 @@ func installNpcap(b *beat.Beat) error { return nil } - canInstall, err := canInstallNpcap(b) - if err != nil { - return err - } log := logp.NewLogger("npcap_install") - if !canInstall { - log.Warn("npcap installation/upgrade disabled by user") - return nil + // Only check whether we have been requested to never_install if there + // is already an Npcap installation present. This should not be necessary, + // but the start-up logic of packetbeat is tightly coupled to the presence + // of a backing sniffer. This should really not be necessary, but the changes + // to modify this behaviour are non-trivial, so just avoid the issue. + isInstalled := strings.HasPrefix(npcap.Version(), "Npcap version") + if isInstalled { + canInstall, err := canInstallNpcap(b, cfg, log) + if err != nil { + return err + } + if !canInstall { + log.Warn("npcap installation/upgrade disabled by user") + return nil + } } ctx, cancel := context.WithTimeout(context.Background(), installTimeout) defer cancel() + muInstall.Lock() + defer muInstall.Unlock() if npcap.Installer == nil { return nil } @@ -95,9 +112,10 @@ func installNpcap(b *beat.Beat) error { // configurations from agent normalised to the internal packetbeat format by this point. // In the case that the beat is managed, any data stream that has npcap.never_install // set to true will result in a block on the installation. -func canInstallNpcap(b *beat.Beat) (bool, error) { +func canInstallNpcap(b *beat.Beat, rawcfg *conf.C, log *logp.Logger) (bool, error) { type npcapInstallCfg struct { - NeverInstall bool `config:"npcap.never_install"` + Type string `config:"type"` + NeverInstall bool `config:"npcap.never_install"` } // Agent managed case. @@ -105,12 +123,19 @@ func canInstallNpcap(b *beat.Beat) (bool, error) { var cfg struct { Streams []npcapInstallCfg `config:"streams"` } - err := b.BeatConfig.Unpack(&cfg) + err := rawcfg.Unpack(&cfg) if err != nil { return false, fmt.Errorf("failed to unpack npcap config from agent configuration: %w", err) } + if len(cfg.Streams) == 0 { + // We have no stream to monitor, so we don't need to install + // anything. We may be in the middle of a config check. + log.Debug("cannot install because no configured stream") + return false, nil + } for _, c := range cfg.Streams { if c.NeverInstall { + log.Debugf("cannot install because %s has never_install set to true", c.Type) return false, nil } } @@ -119,9 +144,12 @@ func canInstallNpcap(b *beat.Beat) (bool, error) { // Packetbeat case. var cfg npcapInstallCfg - err := b.BeatConfig.Unpack(&cfg) + err := rawcfg.Unpack(&cfg) if err != nil { return false, fmt.Errorf("failed to unpack npcap config from packetbeat configuration: %w", err) } + if cfg.NeverInstall { + log.Debugf("cannot install because %s has never_install set to true", cfg.Type) + } return !cfg.NeverInstall, err } diff --git a/packetbeat/beater/install_npcap_test.go b/packetbeat/beater/install_npcap_test.go index 7a888b4e0c8..3d8c678edaf 100644 --- a/packetbeat/beater/install_npcap_test.go +++ b/packetbeat/beater/install_npcap_test.go @@ -23,6 +23,7 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/management" "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" ) var canInstallNpcapTests = []struct { @@ -210,10 +211,9 @@ func TestCanInstallNpcap(t *testing.T) { t.Fatalf("unexpected error: %v", err) } b := &beat.Beat{ - BeatConfig: cfg, - Manager: boolManager{managed: test.managed}, + Manager: boolManager{managed: test.managed}, } - got, err := canInstallNpcap(b) + got, err := canInstallNpcap(b, cfg, logp.NewLogger("npcap_install_test")) if err != nil { t.Errorf("unexpected error from canInstallNpcap: %v", err) } diff --git a/packetbeat/beater/packetbeat.go b/packetbeat/beater/packetbeat.go index fbc0e1c1fb9..725f3eebc33 100644 --- a/packetbeat/beater/packetbeat.go +++ b/packetbeat/beater/packetbeat.go @@ -93,14 +93,6 @@ func New(b *beat.Beat, rawConfig *conf.C) (beat.Beater, error) { configurator = initialConfig().FromStatic } - // Install Npcap if needed. This need to happen before any other - // work on Windows, including config checking, because that involves - // probing interfaces. - err := installNpcap(b) - if err != nil { - return nil, err - } - factory := newProcessorFactory(b.Info.Name, make(chan error, maxSniffers), b, configurator) if err := factory.CheckConfig(rawConfig); err != nil { return nil, err @@ -133,6 +125,18 @@ func (pb *packetbeat) Run(b *beat.Beat) error { } } + if b.Manager != nil { + b.Manager.RegisterDiagnosticHook("input_metrics", "Metrics from active inputs.", + "input_metrics.json", "application/json", func() []byte { + data, err := inputmon.MetricSnapshotJSON() + if err != nil { + logp.L().Warnw("Failed to collect input metric snapshot for Agent diagnostics.", "error", err) + return []byte(err.Error()) + } + return data + }) + } + if !b.Manager.Enabled() { return pb.runStatic(b, pb.factory) } diff --git a/packetbeat/beater/processor.go b/packetbeat/beater/processor.go index 494b8f890b6..513dbe2871c 100644 --- a/packetbeat/beater/processor.go +++ b/packetbeat/beater/processor.go @@ -123,6 +123,24 @@ func (p *processorFactory) Create(pipeline beat.PipelineConnector, cfg *conf.C) logp.Err("Failed to generate ID from config: %v, %v", err, config) return nil, err } + if len(config.Interfaces) != 0 { + // Install Npcap if needed. This needs to happen before any other + // work on Windows, including config checking, because that involves + // probing interfaces. + // + // Users may block installation of Npcap, so we defer the install + // until we have a configuration that will tell us if it has been + // blocked. To do this we must have a valid config. + // + // When Packetbeat is managed by fleet we will only have this if + // Create has been called via the agent Reload process. We take + // the opportunity to not install the DLL if there is no configured + // interface. + err := installNpcap(p.beat, cfg) + if err != nil { + return nil, err + } + } publisher, err := publish.NewTransactionPublisher( p.beat.Info.Name, diff --git a/packetbeat/docs/packetbeat-options.asciidoc b/packetbeat/docs/packetbeat-options.asciidoc index 5266dac8d33..4a74dd5593e 100644 --- a/packetbeat/docs/packetbeat-options.asciidoc +++ b/packetbeat/docs/packetbeat-options.asciidoc @@ -61,7 +61,7 @@ On Windows {beatname} requires an Npcap DLL installation. This is provided by {b for users of the Elastic Licenced version. In some cases users may wish to use their own installed version. In order to do this the `packetbeat.npcap.never_install` option can be used. Setting this option to `true` will not attempt to install the -bundled Npcap library on start-up. +bundled Npcap library on start-up unless no Npcap is already installed. [source,yaml] ------------------------------------------------------------------------------ diff --git a/packetbeat/docs/troubleshooting.asciidoc b/packetbeat/docs/troubleshooting.asciidoc index f6a3fb23be3..ef210371ad0 100644 --- a/packetbeat/docs/troubleshooting.asciidoc +++ b/packetbeat/docs/troubleshooting.asciidoc @@ -8,6 +8,7 @@ following tips: * <> * <> +* <> * <> * <> @@ -29,6 +30,18 @@ include::{libbeat-dir}/debugging.asciidoc[] //sets block macro for recording-trace content included in next section +//sets block macro for metrics-in-logs.asciidoc included in next section + +[id="understand-{beatname_lc}-logs"] +[role="xpack"] +== Understand metrics in {beatname_uc} logs + +++++ +Understand logged metrics +++++ + +include::{libbeat-dir}/metrics-in-logs.asciidoc[] + [[recording-trace]] == Record a trace diff --git a/packetbeat/packetbeat.reference.yml b/packetbeat/packetbeat.reference.yml index 736c0754893..7442b7f6a0f 100644 --- a/packetbeat/packetbeat.reference.yml +++ b/packetbeat/packetbeat.reference.yml @@ -15,7 +15,7 @@ # to sniff on the device carrying the default route. packetbeat.interfaces.device: any -# The network CIDR blocks that are considered "internal" networks for +# The network CIDR blocks are considered "internal" networks for # the purpose of network perimeter boundary classification. The valid # values for internal_networks are the same as those that can be used # with processor network conditions. @@ -39,7 +39,7 @@ packetbeat.interfaces.internal_networks: #packetbeat.interfaces.snaplen: 65535 # The maximum size of the shared memory buffer to use between the kernel and -# user space. A bigger buffer usually results in lower CPU usage, but consumes +# user space. A bigger buffer usually results in lower CPU usage but consumes # more memory. This setting is only available for the af_packet sniffer type. # The default is 30 MB. #packetbeat.interfaces.buffer_size_mb: 30 @@ -58,23 +58,23 @@ packetbeat.interfaces.internal_networks: # The value must be between 0 and 65535. By default, no value is set. # # This is only available on Linux and requires using `type: af_packet`. Each process -# must be running in same network namespace. All processes must use the same +# must be running in the same network namespace. All processes must use the same # interface settings. You must take responsibility for running multiple instances # of Packetbeat. #packetbeat.interfaces.fanout_group: ~ # Packetbeat automatically generates a BPF for capturing only the traffic on -# ports where it expects to find known protocols. Use this settings to tell +# ports where it expects to find known protocols. Use this setting to tell # Packetbeat to generate a BPF filter that accepts VLAN tags. #packetbeat.interfaces.with_vlans: true # Use this setting to override the automatically generated BPF filter. #packetbeat.interfaces.bpf_filter: -# With `auto_promisc_mode` Packetbeat puts interface in promiscuous mode automatically on startup. +# With `auto_promisc_mode` Packetbeat puts the interface in promiscuous mode automatically on startup. # This option does not work with `any` interface device. # The default option is false and requires manual set-up of promiscuous mode. -# Warning: under some circumstances (e.g beat crash) promiscuous mode +# Warning: under some circumstances (e.g., beat crash) promiscuous mode # can stay enabled even after beat is shut down. #packetbeat.interfaces.auto_promisc_mode: true @@ -130,7 +130,7 @@ packetbeat.protocols: # Default: false #parse_arguments: false - # Hide all methods relative to connection negotiation between server and + # Hide all methods relative to connection negotiation between the server and # client. # Default: true #hide_connection_information: true @@ -162,7 +162,7 @@ packetbeat.protocols: #send_request: true # If this option is enabled, the raw message of the response (`cassandra_request.request_headers` field) - # is included in published events. The default is true. enable `send_request` first before enable this option. + # is included in published events. The default is true. enable `send_request` first before enabling this option. #send_request_header: true # If this option is enabled, the raw message of the response (`cassandra_response` field) @@ -170,7 +170,7 @@ packetbeat.protocols: #send_response: true # If this option is enabled, the raw message of the response (`cassandra_response.response_headers` field) - # is included in published events. The default is true. enable `send_response` first before enable this option. + # is included in published events. The default is true. enable `send_response` first before enabling this option. #send_response_header: true # Set to true to publish fields with null values in events. @@ -237,8 +237,8 @@ packetbeat.protocols: # the HTTP protocol by commenting out the list of ports. ports: [80, 8080, 8000, 5000, 8002] - # Uncomment the following to hide certain parameters in URL or forms attached - # to HTTP requests. The names of the parameters are case insensitive. + # Uncomment the following to hide certain parameters in the URL or forms attached + # to HTTP requests. The names of the parameters are case-insensitive. # The value of the parameters will be replaced with the 'xxxxx' string. # This is generally useful for avoiding storing user passwords or other # sensitive information. @@ -642,10 +642,10 @@ packetbeat.ignore_outgoing: false # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. +# If this option is not defined, the hostname is used. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. Tags make it easy to group servers by different # logical properties. #tags: ["service-X", "web-tier"] @@ -657,7 +657,7 @@ packetbeat.ignore_outgoing: false # env: staging # If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields +# fields in the output document instead of being grouped under a field # sub-dictionary. Default is false. #fields_under_root: false @@ -669,7 +669,7 @@ packetbeat.ignore_outgoing: false #queue: # Queue type by name (default 'mem') # The memory queue will present all available events (up to the outputs - # bulk_max_size) to the output, the moment the output is ready to server + # bulk_max_size) to the output, the moment the output is ready to serve # another batch of events. #mem: # Max number of events the queue can buffer. @@ -721,7 +721,7 @@ packetbeat.ignore_outgoing: false # length of its retry interval each time, up to this maximum. #max_retry_interval: 30s -# Sets the maximum number of CPUs that can be executing simultaneously. The +# Sets the maximum number of CPUs that can be executed simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: @@ -842,7 +842,7 @@ packetbeat.ignore_outgoing: false # ignore_missing: false # fail_on_error: true # -# The following example copies the value of message to message_copied +# The following example copies the value of the message to message_copied # #processors: # - copy_fields: @@ -852,7 +852,7 @@ packetbeat.ignore_outgoing: false # fail_on_error: true # ignore_missing: false # -# The following example truncates the value of message to 1024 bytes +# The following example truncates the value of the message to 1024 bytes # #processors: # - truncate_fields: @@ -949,7 +949,7 @@ output.elasticsearch: # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. #index: "packetbeat-%{[agent.version]}" - # Optional ingest pipeline. By default no pipeline will be used. + # Optional ingest pipeline. By default, no pipeline will be used. #pipeline: "" # Optional HTTP path @@ -1680,14 +1680,14 @@ output.elasticsearch: # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards are disabled by default and can be enabled either by setting the -# options here, or by using the `-setup` CLI flag or the `setup` command. +# options here or by using the `-setup` CLI flag or the `setup` command. #setup.dashboards.enabled: false # The directory from where to read the dashboards. The default is the `kibana` # folder in the home path. #setup.dashboards.directory: ${path.home}/kibana -# The URL from where to download the dashboards archive. It is used instead of +# The URL from where to download the dashboard archive. It is used instead of # the directory if it has a value. #setup.dashboards.url: @@ -1784,7 +1784,7 @@ setup.template.settings: # Configure index lifecycle management (ILM) to manage the backing indices # of your data streams. -# Enable ILM support. Valid values are true, false. +# Enable ILM support. Valid values are true, or false. #setup.ilm.enabled: true # Set the lifecycle policy name. The default policy name is @@ -1939,25 +1939,25 @@ logging.files: # The name of the files where the logs are written to. #name: packetbeat - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. #rotateeverybytes: 10485760 # = 10MB - # Number of rotated log files to keep. Oldest files will be deleted first. + # Number of rotated log files to keep. The oldest files will be deleted first. #keepfiles: 7 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. #permissions: 0600 - # Enable log file rotation on time intervals in addition to size-based rotation. + # Enable log file rotation on time intervals in addition to the size-based rotation. # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h # are boundary-aligned with minutes, hours, days, weeks, months, and years as # reported by the local system clock. All other intervals are calculated from the # Unix epoch. Defaults to disabled. #interval: 0 - # Rotate existing logs on startup rather than appending to the existing + # Rotate existing logs on startup rather than appending them to the existing # file. Defaults to true. # rotateonstartup: true @@ -1985,7 +1985,7 @@ logging.files: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # In case you specify an additional path, the scheme is required: http://localhost:9200/path # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 #hosts: ["localhost:9200"] @@ -2032,7 +2032,7 @@ logging.files: # Elasticsearch after a network error. The default is 60s. #backoff.max: 60s - # Configure HTTP request timeout before failing an request to Elasticsearch. + # Configure HTTP request timeout before failing a request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. @@ -2129,15 +2129,15 @@ logging.files: # =============================== HTTP Endpoint ================================ -# Each beat can expose internal metrics through a HTTP endpoint. For security +# Each beat can expose internal metrics through an HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. -# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# Stats can be accessed through http://localhost:5066/stats. For pretty JSON output # append ?pretty to the URL. # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# The HTTP endpoint will bind to this hostname, IP address, unix socket, or named pipe. # When using IP addresses, it is recommended to only use localhost. #http.host: localhost @@ -2147,7 +2147,7 @@ logging.files: # Define which user should be owning the named pipe. #http.named_pipe.user: -# Define which the permissions that should be applied to the named pipe, use the Security +# Define which permissions should be applied to the named pipe, use the Security # Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with # `http.user`. #http.named_pipe.security_descriptor: diff --git a/packetbeat/packetbeat.yml b/packetbeat/packetbeat.yml index 34f4f370875..cfe15388a35 100644 --- a/packetbeat/packetbeat.yml +++ b/packetbeat/packetbeat.yml @@ -168,8 +168,8 @@ setup.template.settings: # options here or by using the `setup` command. #setup.dashboards.enabled: false -# The URL from where to download the dashboards archive. By default this URL -# has a value which is computed based on the Beat name and version. For released +# The URL from where to download the dashboard archive. By default, this URL +# has a value that is computed based on the Beat name and version. For released # versions, this URL points to the dashboard archive on the artifacts.elastic.co # website. #setup.dashboards.url: @@ -262,7 +262,7 @@ processors: #logging.level: debug # At debug level, you can selectively enable logging only for some components. -# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# To enable all selectors, use ["*"]. Examples of other selectors are "beat", # "publisher", "service". #logging.selectors: ["*"] @@ -280,7 +280,7 @@ processors: #monitoring.cluster_uuid: # Uncomment to send the metrics to Elasticsearch. Most settings from the -# Elasticsearch output are accepted here as well. +# Elasticsearch outputs are accepted here as well. # Note that the settings should point to your Elasticsearch *monitoring* cluster. # Any setting that is not set is automatically inherited from the Elasticsearch # output configuration, so if you have the Elasticsearch output configured such diff --git a/packetbeat/pb/event.go b/packetbeat/pb/event.go index 3c38f98729b..44cb7c81d92 100644 --- a/packetbeat/pb/event.go +++ b/packetbeat/pb/event.go @@ -24,8 +24,6 @@ import ( "strings" "time" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/flowhash" @@ -112,7 +110,7 @@ func GetFields(m mapstr.M) (*Fields, error) { fields, ok := v.(*Fields) if !ok { - return nil, errors.Errorf("%v must be a *types.Fields, but is %T", FieldsKey, fields) + return nil, fmt.Errorf("%v must be a *types.Fields, but is %T", FieldsKey, fields) } return fields, nil } diff --git a/packetbeat/protos/http/decode.go b/packetbeat/protos/http/decode.go index 34301955e9b..dc113f53de9 100644 --- a/packetbeat/protos/http/decode.go +++ b/packetbeat/protos/http/decode.go @@ -23,7 +23,7 @@ import ( "compress/gzip" "io" - "github.com/pkg/errors" + "errors" ) var ( diff --git a/packetbeat/protos/http/http.go b/packetbeat/protos/http/http.go index 91706a5e30d..6c291e8e83a 100644 --- a/packetbeat/protos/http/http.go +++ b/packetbeat/protos/http/http.go @@ -27,8 +27,6 @@ import ( "strings" "time" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/ecs" @@ -708,7 +706,7 @@ func decodeBody(body []byte, encodings []string, maxSize int) (result []byte, er if idx != 0 { body = nil } - return body, errors.Wrapf(err, "unable to decode body using %s encoding", format) + return body, fmt.Errorf("unable to decode body using %s encoding: %w", format, err) } } return body, nil diff --git a/packetbeat/publish/publish.go b/packetbeat/publish/publish.go index 9c64727de35..257ad027a70 100644 --- a/packetbeat/publish/publish.go +++ b/packetbeat/publish/publish.go @@ -18,10 +18,9 @@ package publish import ( + "errors" "net" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/processors" diff --git a/packetbeat/scripts/mage/package.go b/packetbeat/scripts/mage/package.go index fdbb2b69bb5..76373df2b73 100644 --- a/packetbeat/scripts/mage/package.go +++ b/packetbeat/scripts/mage/package.go @@ -18,11 +18,10 @@ package mage import ( + "fmt" "os" "path/filepath" - "github.com/pkg/errors" - devtools "github.com/elastic/beats/v7/dev-tools/mage" ) @@ -93,7 +92,7 @@ func CustomizePackaging() { case devtools.Docker: args.Spec.ExtraVar("linux_capabilities", "cap_net_raw,cap_net_admin+eip") default: - panic(errors.Errorf("unhandled package type: %v", pkgType)) + panic(fmt.Errorf("unhandled package type: %v", pkgType)) } } } diff --git a/testing/environments/latest.yml b/testing/environments/latest.yml index 94fc107a289..d0d62c5cab6 100644 --- a/testing/environments/latest.yml +++ b/testing/environments/latest.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.7.0 + image: docker.elastic.co/elasticsearch/elasticsearch:8.8.1 healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -19,7 +19,7 @@ services: - "script.context.template.cache_max_size=2000" logstash: - image: docker.elastic.co/logstash/logstash:8.7.0 + image: docker.elastic.co/logstash/logstash:8.8.1 healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 300 @@ -29,7 +29,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.7.0 + image: docker.elastic.co/kibana/kibana:8.8.1 healthcheck: test: ["CMD", "curl", "-f", "http://localhost:5601"] retries: 300 diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 4595b6fc1b0..8ad95b3362e 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.9.0-9ee1e6fb-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.10.0-ed2e01b4-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -31,7 +31,7 @@ services: - "./docker/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles" logstash: - image: docker.elastic.co/logstash/logstash:8.9.0-9ee1e6fb-SNAPSHOT + image: docker.elastic.co/logstash/logstash:8.10.0-ed2e01b4-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -44,7 +44,7 @@ services: - 5055:5055 kibana: - image: docker.elastic.co/kibana/kibana:8.9.0-9ee1e6fb-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.10.0-ed2e01b4-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" diff --git a/winlogbeat/beater/winlogbeat.go b/winlogbeat/beater/winlogbeat.go index 89e2144d4c3..78e7f24ff0b 100644 --- a/winlogbeat/beater/winlogbeat.go +++ b/winlogbeat/beater/winlogbeat.go @@ -162,6 +162,18 @@ func (eb *Winlogbeat) Run(b *beat.Beat) error { } } + if b.Manager != nil { + b.Manager.RegisterDiagnosticHook("input_metrics", "Metrics from active inputs.", + "input_metrics.json", "application/json", func() []byte { + data, err := inputmon.MetricSnapshotJSON() + if err != nil { + logp.L().Warnw("Failed to collect input metric snapshot for Agent diagnostics.", "error", err) + return []byte(err.Error()) + } + return data + }) + } + var wg sync.WaitGroup for _, log := range eb.eventLogs { state := persistedState[log.source.Name()] diff --git a/winlogbeat/docs/troubleshooting.asciidoc b/winlogbeat/docs/troubleshooting.asciidoc index bc9f7dddacc..e14d6802c2a 100644 --- a/winlogbeat/docs/troubleshooting.asciidoc +++ b/winlogbeat/docs/troubleshooting.asciidoc @@ -7,6 +7,7 @@ If you have issues installing or running Winlogbeat, read the following tips: * <> * <> +* <> * <> //sets block macro for getting-help.asciidoc included in next section @@ -25,3 +26,15 @@ include::{libbeat-dir}/getting-help.asciidoc[] == Debug include::{libbeat-dir}/debugging.asciidoc[] + +//sets block macro for metrics-in-logs.asciidoc included in next section + +[id="understand-{beatname_lc}-logs"] +[role="xpack"] +== Understand metrics in {beatname_uc} logs + +++++ +Understand logged metrics +++++ + +include::{libbeat-dir}/metrics-in-logs.asciidoc[] \ No newline at end of file diff --git a/winlogbeat/scripts/mage/docs.go b/winlogbeat/scripts/mage/docs.go index 0b59d3e79b1..510ae7558ec 100644 --- a/winlogbeat/scripts/mage/docs.go +++ b/winlogbeat/scripts/mage/docs.go @@ -25,8 +25,6 @@ import ( "regexp" "strings" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/dev-tools/mage" ) @@ -58,7 +56,7 @@ func moduleDocs() error { return err } if len(files) == 0 { - return errors.Errorf("No modules found matching %v", searchPath) + return fmt.Errorf("No modules found matching %v", searchPath) } // Clean existing files. @@ -71,7 +69,7 @@ func moduleDocs() error { for _, f := range files { matches := moduleNameRegex.FindStringSubmatch(filepath.ToSlash(f)) if len(matches) != 2 { - return errors.Errorf("module path %v does not match regexp", f) + return fmt.Errorf("module path %v does not match regexp", f) } name := matches[1] names = append(names, name) diff --git a/winlogbeat/scripts/mage/package.go b/winlogbeat/scripts/mage/package.go index 81e090092bd..20cde9f551b 100644 --- a/winlogbeat/scripts/mage/package.go +++ b/winlogbeat/scripts/mage/package.go @@ -23,7 +23,6 @@ import ( "time" "github.com/magefile/mage/mg" - "github.com/pkg/errors" devtools "github.com/elastic/beats/v7/dev-tools/mage" "github.com/elastic/beats/v7/dev-tools/mage/target/build" @@ -86,7 +85,7 @@ func customizePackaging() { case devtools.Deb, devtools.RPM: args.Spec.Files["/etc/{{.BeatName}}/module"] = moduleDir default: - panic(errors.Errorf("unhandled package type: %v", pkgType)) + panic(fmt.Errorf("unhandled package type: %v", pkgType)) } } } diff --git a/winlogbeat/winlogbeat.reference.yml b/winlogbeat/winlogbeat.reference.yml index 941fda11491..64377c0fc6d 100644 --- a/winlogbeat/winlogbeat.reference.yml +++ b/winlogbeat/winlogbeat.reference.yml @@ -58,10 +58,10 @@ winlogbeat.event_logs: # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. +# If this option is not defined, the hostname is used. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. Tags make it easy to group servers by different # logical properties. #tags: ["service-X", "web-tier"] @@ -73,7 +73,7 @@ winlogbeat.event_logs: # env: staging # If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields +# fields in the output document instead of being grouped under a field # sub-dictionary. Default is false. #fields_under_root: false @@ -85,7 +85,7 @@ winlogbeat.event_logs: #queue: # Queue type by name (default 'mem') # The memory queue will present all available events (up to the outputs - # bulk_max_size) to the output, the moment the output is ready to server + # bulk_max_size) to the output, the moment the output is ready to serve # another batch of events. #mem: # Max number of events the queue can buffer. @@ -137,7 +137,7 @@ winlogbeat.event_logs: # length of its retry interval each time, up to this maximum. #max_retry_interval: 30s -# Sets the maximum number of CPUs that can be executing simultaneously. The +# Sets the maximum number of CPUs that can be executed simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: @@ -258,7 +258,7 @@ winlogbeat.event_logs: # ignore_missing: false # fail_on_error: true # -# The following example copies the value of message to message_copied +# The following example copies the value of the message to message_copied # #processors: # - copy_fields: @@ -268,7 +268,7 @@ winlogbeat.event_logs: # fail_on_error: true # ignore_missing: false # -# The following example truncates the value of message to 1024 bytes +# The following example truncates the value of the message to 1024 bytes # #processors: # - truncate_fields: @@ -365,7 +365,7 @@ output.elasticsearch: # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. #index: "winlogbeat-%{[agent.version]}" - # Optional ingest pipeline. By default no pipeline will be used. + # Optional ingest pipeline. By default, no pipeline will be used. #pipeline: "" # Optional HTTP path @@ -1096,14 +1096,14 @@ output.elasticsearch: # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards are disabled by default and can be enabled either by setting the -# options here, or by using the `-setup` CLI flag or the `setup` command. +# options here or by using the `-setup` CLI flag or the `setup` command. #setup.dashboards.enabled: false # The directory from where to read the dashboards. The default is the `kibana` # folder in the home path. #setup.dashboards.directory: ${path.home}/kibana -# The URL from where to download the dashboards archive. It is used instead of +# The URL from where to download the dashboard archive. It is used instead of # the directory if it has a value. #setup.dashboards.url: @@ -1200,7 +1200,7 @@ setup.template.settings: # Configure index lifecycle management (ILM) to manage the backing indices # of your data streams. -# Enable ILM support. Valid values are true, false. +# Enable ILM support. Valid values are true, or false. #setup.ilm.enabled: true # Set the lifecycle policy name. The default policy name is @@ -1355,25 +1355,25 @@ logging.files: # The name of the files where the logs are written to. #name: winlogbeat - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. #rotateeverybytes: 10485760 # = 10MB - # Number of rotated log files to keep. Oldest files will be deleted first. + # Number of rotated log files to keep. The oldest files will be deleted first. #keepfiles: 7 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. #permissions: 0600 - # Enable log file rotation on time intervals in addition to size-based rotation. + # Enable log file rotation on time intervals in addition to the size-based rotation. # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h # are boundary-aligned with minutes, hours, days, weeks, months, and years as # reported by the local system clock. All other intervals are calculated from the # Unix epoch. Defaults to disabled. #interval: 0 - # Rotate existing logs on startup rather than appending to the existing + # Rotate existing logs on startup rather than appending them to the existing # file. Defaults to true. # rotateonstartup: true @@ -1401,7 +1401,7 @@ logging.files: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # In case you specify an additional path, the scheme is required: http://localhost:9200/path # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 #hosts: ["localhost:9200"] @@ -1448,7 +1448,7 @@ logging.files: # Elasticsearch after a network error. The default is 60s. #backoff.max: 60s - # Configure HTTP request timeout before failing an request to Elasticsearch. + # Configure HTTP request timeout before failing a request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. @@ -1545,15 +1545,15 @@ logging.files: # =============================== HTTP Endpoint ================================ -# Each beat can expose internal metrics through a HTTP endpoint. For security +# Each beat can expose internal metrics through an HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. -# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# Stats can be accessed through http://localhost:5066/stats. For pretty JSON output # append ?pretty to the URL. # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# The HTTP endpoint will bind to this hostname, IP address, unix socket, or named pipe. # When using IP addresses, it is recommended to only use localhost. #http.host: localhost @@ -1563,7 +1563,7 @@ logging.files: # Define which user should be owning the named pipe. #http.named_pipe.user: -# Define which the permissions that should be applied to the named pipe, use the Security +# Define which permissions should be applied to the named pipe, use the Security # Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with # `http.user`. #http.named_pipe.security_descriptor: diff --git a/winlogbeat/winlogbeat.yml b/winlogbeat/winlogbeat.yml index bdb1f706fa6..a4c24aa88e0 100644 --- a/winlogbeat/winlogbeat.yml +++ b/winlogbeat/winlogbeat.yml @@ -51,7 +51,7 @@ setup.template.settings: # all the transactions sent by a single shipper in the web interface. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. #tags: ["service-X", "web-tier"] @@ -66,8 +66,8 @@ setup.template.settings: # options here or by using the `setup` command. #setup.dashboards.enabled: false -# The URL from where to download the dashboards archive. By default this URL -# has a value which is computed based on the Beat name and version. For released +# The URL from where to download the dashboard archive. By default, this URL +# has a value that is computed based on the Beat name and version. For released # versions, this URL points to the dashboard archive on the artifacts.elastic.co # website. #setup.dashboards.url: @@ -147,7 +147,7 @@ processors: #logging.level: debug # At debug level, you can selectively enable logging only for some components. -# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# To enable all selectors, use ["*"]. Examples of other selectors are "beat", # "publisher", "service". #logging.selectors: ["*"] @@ -165,7 +165,7 @@ processors: #monitoring.cluster_uuid: # Uncomment to send the metrics to Elasticsearch. Most settings from the -# Elasticsearch output are accepted here as well. +# Elasticsearch outputs are accepted here as well. # Note that the settings should point to your Elasticsearch *monitoring* cluster. # Any setting that is not set is automatically inherited from the Elasticsearch # output configuration, so if you have the Elasticsearch output configured such diff --git a/x-pack/auditbeat/auditbeat.reference.yml b/x-pack/auditbeat/auditbeat.reference.yml index 5a5cc04871b..e9ecd33ae39 100644 --- a/x-pack/auditbeat/auditbeat.reference.yml +++ b/x-pack/auditbeat/auditbeat.reference.yml @@ -9,7 +9,7 @@ # ============================== Config Reloading ============================== -# Config reloading allows to dynamically load modules. Each file which is +# Config reloading allows to dynamically load modules. Each file that is # monitored must contain one or multiple modules as a list. auditbeat.config.modules: @@ -232,10 +232,10 @@ auditbeat.modules: # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. +# If this option is not defined, the hostname is used. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. Tags make it easy to group servers by different # logical properties. #tags: ["service-X", "web-tier"] @@ -247,7 +247,7 @@ auditbeat.modules: # env: staging # If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields +# fields in the output document instead of being grouped under a field # sub-dictionary. Default is false. #fields_under_root: false @@ -259,7 +259,7 @@ auditbeat.modules: #queue: # Queue type by name (default 'mem') # The memory queue will present all available events (up to the outputs - # bulk_max_size) to the output, the moment the output is ready to server + # bulk_max_size) to the output, the moment the output is ready to serve # another batch of events. #mem: # Max number of events the queue can buffer. @@ -311,7 +311,7 @@ auditbeat.modules: # length of its retry interval each time, up to this maximum. #max_retry_interval: 30s -# Sets the maximum number of CPUs that can be executing simultaneously. The +# Sets the maximum number of CPUs that can be executed simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: @@ -432,7 +432,7 @@ auditbeat.modules: # ignore_missing: false # fail_on_error: true # -# The following example copies the value of message to message_copied +# The following example copies the value of the message to message_copied # #processors: # - copy_fields: @@ -442,7 +442,7 @@ auditbeat.modules: # fail_on_error: true # ignore_missing: false # -# The following example truncates the value of message to 1024 bytes +# The following example truncates the value of the message to 1024 bytes # #processors: # - truncate_fields: @@ -539,7 +539,7 @@ output.elasticsearch: # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. #index: "auditbeat-%{[agent.version]}" - # Optional ingest pipeline. By default no pipeline will be used. + # Optional ingest pipeline. By default, no pipeline will be used. #pipeline: "" # Optional HTTP path @@ -1270,14 +1270,14 @@ output.elasticsearch: # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards are disabled by default and can be enabled either by setting the -# options here, or by using the `-setup` CLI flag or the `setup` command. +# options here or by using the `-setup` CLI flag or the `setup` command. #setup.dashboards.enabled: false # The directory from where to read the dashboards. The default is the `kibana` # folder in the home path. #setup.dashboards.directory: ${path.home}/kibana -# The URL from where to download the dashboards archive. It is used instead of +# The URL from where to download the dashboard archive. It is used instead of # the directory if it has a value. #setup.dashboards.url: @@ -1374,7 +1374,7 @@ setup.template.settings: # Configure index lifecycle management (ILM) to manage the backing indices # of your data streams. -# Enable ILM support. Valid values are true, false. +# Enable ILM support. Valid values are true, or false. #setup.ilm.enabled: true # Set the lifecycle policy name. The default policy name is @@ -1529,25 +1529,25 @@ logging.files: # The name of the files where the logs are written to. #name: auditbeat - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. #rotateeverybytes: 10485760 # = 10MB - # Number of rotated log files to keep. Oldest files will be deleted first. + # Number of rotated log files to keep. The oldest files will be deleted first. #keepfiles: 7 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. #permissions: 0600 - # Enable log file rotation on time intervals in addition to size-based rotation. + # Enable log file rotation on time intervals in addition to the size-based rotation. # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h # are boundary-aligned with minutes, hours, days, weeks, months, and years as # reported by the local system clock. All other intervals are calculated from the # Unix epoch. Defaults to disabled. #interval: 0 - # Rotate existing logs on startup rather than appending to the existing + # Rotate existing logs on startup rather than appending them to the existing # file. Defaults to true. # rotateonstartup: true @@ -1575,7 +1575,7 @@ logging.files: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # In case you specify an additional path, the scheme is required: http://localhost:9200/path # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 #hosts: ["localhost:9200"] @@ -1622,7 +1622,7 @@ logging.files: # Elasticsearch after a network error. The default is 60s. #backoff.max: 60s - # Configure HTTP request timeout before failing an request to Elasticsearch. + # Configure HTTP request timeout before failing a request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. @@ -1719,15 +1719,15 @@ logging.files: # =============================== HTTP Endpoint ================================ -# Each beat can expose internal metrics through a HTTP endpoint. For security +# Each beat can expose internal metrics through an HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. -# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# Stats can be accessed through http://localhost:5066/stats. For pretty JSON output # append ?pretty to the URL. # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# The HTTP endpoint will bind to this hostname, IP address, unix socket, or named pipe. # When using IP addresses, it is recommended to only use localhost. #http.host: localhost @@ -1737,7 +1737,7 @@ logging.files: # Define which user should be owning the named pipe. #http.named_pipe.user: -# Define which the permissions that should be applied to the named pipe, use the Security +# Define which permissions should be applied to the named pipe, use the Security # Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with # `http.user`. #http.named_pipe.security_descriptor: diff --git a/x-pack/auditbeat/auditbeat.yml b/x-pack/auditbeat/auditbeat.yml index e3285427746..c84640539dc 100644 --- a/x-pack/auditbeat/auditbeat.yml +++ b/x-pack/auditbeat/auditbeat.yml @@ -88,7 +88,7 @@ setup.template.settings: # all the transactions sent by a single shipper in the web interface. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. #tags: ["service-X", "web-tier"] @@ -103,8 +103,8 @@ setup.template.settings: # options here or by using the `setup` command. #setup.dashboards.enabled: false -# The URL from where to download the dashboards archive. By default this URL -# has a value which is computed based on the Beat name and version. For released +# The URL from where to download the dashboard archive. By default, this URL +# has a value that is computed based on the Beat name and version. For released # versions, this URL points to the dashboard archive on the artifacts.elastic.co # website. #setup.dashboards.url: @@ -188,7 +188,7 @@ processors: #logging.level: debug # At debug level, you can selectively enable logging only for some components. -# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# To enable all selectors, use ["*"]. Examples of other selectors are "beat", # "publisher", "service". #logging.selectors: ["*"] @@ -206,7 +206,7 @@ processors: #monitoring.cluster_uuid: # Uncomment to send the metrics to Elasticsearch. Most settings from the -# Elasticsearch output are accepted here as well. +# Elasticsearch outputs are accepted here as well. # Note that the settings should point to your Elasticsearch *monitoring* cluster. # Any setting that is not set is automatically inherited from the Elasticsearch # output configuration, so if you have the Elasticsearch output configured such diff --git a/x-pack/dockerlogbeat/handlers.go b/x-pack/dockerlogbeat/handlers.go index 8b3a771a741..31466d44b65 100644 --- a/x-pack/dockerlogbeat/handlers.go +++ b/x-pack/dockerlogbeat/handlers.go @@ -6,6 +6,7 @@ package main import ( "encoding/json" + "fmt" "io" "net/http" @@ -14,7 +15,6 @@ import ( "github.com/elastic/beats/v7/x-pack/dockerlogbeat/pipelinemanager" "github.com/docker/docker/pkg/ioutils" - "github.com/pkg/errors" ) // StartLoggingRequest represents the request object we get on a call to //LogDriver.StartLogging @@ -54,7 +54,7 @@ func startLoggingHandler(pm *pipelinemanager.PipelineManager) func(w http.Respon var startReq StartLoggingRequest err := json.NewDecoder(r.Body).Decode(&startReq) if err != nil { - http.Error(w, errors.Wrap(err, "error decoding json request").Error(), http.StatusBadRequest) + http.Error(w, fmt.Sprintf("error decoding json request: %v", err), http.StatusBadRequest) return } @@ -64,13 +64,13 @@ func startLoggingHandler(pm *pipelinemanager.PipelineManager) func(w http.Respon cfg, err := pipelinemanager.NewCfgFromRaw(startReq.Info.Config) if err != nil { - http.Error(w, errors.Wrap(err, "error creating client config").Error(), http.StatusBadRequest) + http.Error(w, fmt.Errorf("error creating client config: %w", err).Error(), http.StatusBadRequest) return } pm.Logger.Debugf("Got config: %#v", cfg) cl, err := pm.CreateClientWithConfig(cfg, startReq.Info, startReq.File) if err != nil { - http.Error(w, errors.Wrap(err, "error creating client").Error(), http.StatusBadRequest) + http.Error(w, fmt.Errorf("error creating client: %w", err).Error(), http.StatusBadRequest) return } @@ -86,7 +86,7 @@ func stopLoggingHandler(pm *pipelinemanager.PipelineManager) func(w http.Respons var stopReq StopLoggingRequest err := json.NewDecoder(r.Body).Decode(&stopReq) if err != nil { - http.Error(w, errors.Wrap(err, "error decoding json request").Error(), http.StatusBadRequest) + http.Error(w, fmt.Errorf("error decoding json request: %w", err).Error(), http.StatusBadRequest) return } pm.Logger.Debugf("Got stop request object %#v\n", stopReq) @@ -108,14 +108,14 @@ func readLogHandler(pm *pipelinemanager.PipelineManager) func(w http.ResponseWri var logReq logsRequest err := json.NewDecoder(r.Body).Decode(&logReq) if err != nil { - http.Error(w, errors.Wrap(err, "error decoding json request").Error(), http.StatusBadRequest) + http.Error(w, fmt.Errorf("error decoding json request: %w", err).Error(), http.StatusBadRequest) return } pm.Logger.Debugf("Got logging request for container %s\n", logReq.Info.ContainerName) stream, err := pm.CreateReaderForContainer(logReq.Info, logReq.Config) if err != nil { - http.Error(w, errors.Wrap(err, "error creating log reader").Error(), http.StatusBadRequest) + http.Error(w, fmt.Errorf("error creating log reader: %w", err).Error(), http.StatusBadRequest) return } defer stream.Close() diff --git a/x-pack/dockerlogbeat/magefile.go b/x-pack/dockerlogbeat/magefile.go index 3b848ce92c0..73da67186d7 100644 --- a/x-pack/dockerlogbeat/magefile.go +++ b/x-pack/dockerlogbeat/magefile.go @@ -24,7 +24,6 @@ import ( "github.com/docker/docker/client" "github.com/magefile/mage/mg" "github.com/magefile/mage/sh" - "github.com/pkg/errors" devtools "github.com/elastic/beats/v7/dev-tools/mage" // mage:import @@ -71,7 +70,7 @@ func init() { func getPluginName() (string, error) { version, err := devtools.BeatQualifiedVersion() if err != nil { - return "", errors.Wrap(err, "error getting beats version") + return "", fmt.Errorf("error getting beats version: %w", err) } return dockerPluginName + ":" + version, nil } @@ -80,35 +79,35 @@ func getPluginName() (string, error) { func createContainer(ctx context.Context, cli *client.Client, arch string) error { dockerLogBeatDir, err := os.Getwd() if err != nil { - return errors.Wrap(err, "error getting work dir") + return fmt.Errorf("error getting work dir: %w", err) } if !strings.Contains(dockerLogBeatDir, "dockerlogbeat") { - return errors.Errorf("not in dockerlogbeat directory: %s", dockerLogBeatDir) + return fmt.Errorf("not in dockerlogbeat directory: %s", dockerLogBeatDir) } dockerfile := filepath.Join(packageStagingDir, "Dockerfile") err = devtools.ExpandFile(dockerfileTmpl, dockerfile, platformMap[arch]) if err != nil { - return errors.Wrap(err, "error while expanding Dockerfile template") + return fmt.Errorf("error while expanding Dockerfile template: %w", err) } // start to build the root container that'll be used to build the plugin tmpDir, err := ioutil.TempDir("", "dockerBuildTar") if err != nil { - return errors.Wrap(err, "error locating temp dir") + return fmt.Errorf("error locating temp dir: %w", err) } defer sh.Rm(tmpDir) tarPath := filepath.Join(tmpDir, "tarRoot.tar") err = sh.RunV("tar", "cf", tarPath, "./") if err != nil { - return errors.Wrap(err, "error creating tar") + return fmt.Errorf("error creating tar: %w", err) } buildContext, err := os.Open(tarPath) if err != nil { - return errors.Wrap(err, "error opening temp dur") + return fmt.Errorf("error opening temp dur: %w", err) } defer buildContext.Close() @@ -119,13 +118,13 @@ func createContainer(ctx context.Context, cli *client.Client, arch string) error // build, wait for output buildResp, err := cli.ImageBuild(ctx, buildContext, buildOpts) if err != nil { - return errors.Wrap(err, "error building final container image") + return fmt.Errorf("error building final container image: %w", err) } defer buildResp.Body.Close() // This blocks until the build operation completes buildStr, errBufRead := ioutil.ReadAll(buildResp.Body) if errBufRead != nil { - return errors.Wrap(err, "error reading from docker output") + return fmt.Errorf("error reading from docker output: %w", errBufRead) } fmt.Printf("%s\n", string(buildStr)) @@ -143,14 +142,14 @@ func BuildContainer(ctx context.Context) error { // setup cli, err := newDockerClient(ctx) if err != nil { - return errors.Wrap(err, "error creating docker client") + return fmt.Errorf("error creating docker client: %w", err) } devtools.CreateDir(packageStagingDir) devtools.CreateDir(packageEndDir) err = os.MkdirAll(filepath.Join(buildDir, "rootfs"), 0755) if err != nil { - return errors.Wrap(err, "error creating build dir") + return fmt.Errorf("error creating build dir: %w", err) } for _, plat := range devtools.Platforms { @@ -162,13 +161,13 @@ func BuildContainer(ctx context.Context) error { err = createContainer(ctx, cli, arch) if err != nil { - return errors.Wrap(err, "error creating base container") + return fmt.Errorf("error creating base container: %w", err) } // create the container that will become our rootfs CreatedContainerBody, err := cli.ContainerCreate(ctx, &container.Config{Image: rootImageName}, nil, nil, nil, "") if err != nil { - return errors.Wrap(err, "error creating container") + return fmt.Errorf("error creating container: %w", err) } defer func() { @@ -185,31 +184,31 @@ func BuildContainer(ctx context.Context) error { file, err := os.Create(dockerExportPath) if err != nil { - return errors.Wrap(err, "error creating tar archive") + return fmt.Errorf("error creating tar archive: %w", err) } // export the container to a tar file exportReader, err := cli.ContainerExport(ctx, CreatedContainerBody.ID) if err != nil { - return errors.Wrap(err, "error exporting container") + return fmt.Errorf("error exporting container: %w", err) } _, err = io.Copy(file, exportReader) if err != nil { - return errors.Wrap(err, "error writing exported container") + return fmt.Errorf("error writing exported container: %w", err) } // misc prepare operations err = devtools.Copy("config.json", filepath.Join(buildDir, "config.json")) if err != nil { - return errors.Wrap(err, "error copying config.json") + return fmt.Errorf("error copying config.json: %w", err) } // unpack the tar file into a root directory, which is the format needed for the docker plugin create tool err = sh.RunV("tar", "-xf", dockerExportPath, "-C", filepath.Join(buildDir, "rootfs")) if err != nil { - return errors.Wrap(err, "error unpacking exported container") + return fmt.Errorf("error unpacking exported container: %w", err) } } @@ -220,12 +219,12 @@ func cleanDockerArtifacts(ctx context.Context, containerID string, cli *client.C fmt.Printf("Removing container %s\n", containerID) err := cli.ContainerRemove(ctx, containerID, types.ContainerRemoveOptions{RemoveVolumes: true, Force: true}) if err != nil { - return errors.Wrap(err, "error removing container") + return fmt.Errorf("error removing container: %w", err) } resp, err := cli.ImageRemove(ctx, rootImageName, types.ImageRemoveOptions{Force: true}) if err != nil { - return errors.Wrap(err, "error removing image") + return fmt.Errorf("error removing image: %w", err) } fmt.Printf("Removed image: %#v\n", resp) return nil @@ -235,13 +234,13 @@ func cleanDockerArtifacts(ctx context.Context, containerID string, cli *client.C func Uninstall(ctx context.Context) error { cli, err := newDockerClient(ctx) if err != nil { - return errors.Wrap(err, "error creating docker client") + return fmt.Errorf("error creating docker client: %w", err) } // check to see if we have a plugin we need to remove plugins, err := cli.PluginList(ctx, filters.Args{}) if err != nil { - return errors.Wrap(err, "error getting list of plugins") + return fmt.Errorf("error getting list of plugins: %w", err) } toRemoveName := "" @@ -257,11 +256,11 @@ func Uninstall(ctx context.Context) error { err = cli.PluginDisable(ctx, toRemoveName, types.PluginDisableOptions{Force: true}) if err != nil { - return errors.Wrap(err, "error disabling plugin") + return fmt.Errorf("error disabling plugin: %w", err) } err = cli.PluginRemove(ctx, toRemoveName, types.PluginRemoveOptions{Force: true}) if err != nil { - return errors.Wrap(err, "error removing plugin") + return fmt.Errorf("error removing plugin: %w", err) } return nil @@ -281,22 +280,22 @@ func Install(ctx context.Context) error { cli, err := newDockerClient(ctx) if err != nil { - return errors.Wrap(err, "error creating docker client") + return fmt.Errorf("error creating docker client: %w", err) } archive, err := tar(buildDir, "rootfs", "config.json") if err != nil { - return errors.Wrap(err, "error creating archive of work dir") + return fmt.Errorf("error creating archive of work dir: %w", err) } err = cli.PluginCreate(ctx, archive, types.PluginCreateOptions{RepoName: name}) if err != nil { - return errors.Wrap(err, "error creating plugin") + return fmt.Errorf("error creating plugin: %w", err) } err = cli.PluginEnable(ctx, name, types.PluginEnableOptions{}) if err != nil { - return errors.Wrap(err, "error enabling plugin") + return fmt.Errorf("error enabling plugin: %w", err) } return nil @@ -308,7 +307,7 @@ func tar(dir string, files ...string) (io.Reader, error) { args := append([]string{"-C", dir, "-cf", "-"}, files...) _, err := sh.Exec(nil, &archive, &stdErr, "tar", args...) if err != nil { - return nil, errors.Wrap(err, stdErr.String()) + return nil, fmt.Errorf(stdErr.String()+": %w", err) } return &archive, nil @@ -318,7 +317,7 @@ func tar(dir string, files ...string) (io.Reader, error) { func Export() error { version, err := devtools.BeatQualifiedVersion() if err != nil { - return errors.Wrap(err, "error getting beats version") + return fmt.Errorf("error getting beats version: %w", err) } if devtools.Snapshot { @@ -333,16 +332,20 @@ func Export() error { err = os.Chdir(packageStagingDir) if err != nil { - return errors.Wrap(err, "error changing directory") + return fmt.Errorf("error changing directory: %w", err) } err = sh.RunV("tar", "zcf", outpath, filepath.Join(logDriverName, "rootfs"), filepath.Join(logDriverName, "config.json")) if err != nil { - return errors.Wrap(err, "error creating release tarball") + return fmt.Errorf("error creating release tarball: %w", err) } - return errors.Wrap(devtools.CreateSHA512File(outpath), "failed to create .sha512 file") + + if err = devtools.CreateSHA512File(outpath); err != nil { + return fmt.Errorf("failed to create .sha512 file: %w", err) + } + return nil } return nil diff --git a/x-pack/dockerlogbeat/pipelinemanager/config.go b/x-pack/dockerlogbeat/pipelinemanager/config.go index 46a10a7fe6b..b6bbf3a26fe 100644 --- a/x-pack/dockerlogbeat/pipelinemanager/config.go +++ b/x-pack/dockerlogbeat/pipelinemanager/config.go @@ -5,10 +5,10 @@ package pipelinemanager import ( + "errors" + "fmt" "strings" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/common/transform/typeconv" "github.com/elastic/elastic-agent-libs/config" ) @@ -66,11 +66,11 @@ func (cfg ContainerOutputConfig) CreateConfig() (*config.C, error) { var tmp map[string]interface{} err := typeconv.Convert(&tmp, cfg) if err != nil { - return nil, errors.Wrap(err, "error converting config struct to interface") + return nil, fmt.Errorf("error converting config struct to interface: %w", err) } cfgFinal, err := config.NewConfigFrom(tmp) if err != nil { - return nil, errors.Wrap(err, "error creating config object") + return nil, fmt.Errorf("error creating config object: %w", err) } return cfgFinal, nil diff --git a/x-pack/dockerlogbeat/pipelinemanager/pipelineManager.go b/x-pack/dockerlogbeat/pipelinemanager/pipelineManager.go index 9115027e110..1e0179d2fa3 100644 --- a/x-pack/dockerlogbeat/pipelinemanager/pipelineManager.go +++ b/x-pack/dockerlogbeat/pipelinemanager/pipelineManager.go @@ -17,8 +17,6 @@ import ( "github.com/elastic/beats/v7/x-pack/dockerlogbeat/pipereader" "github.com/elastic/elastic-agent-libs/config" - "github.com/pkg/errors" - "github.com/docker/docker/api/types/plugins/logdriver" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/jsonfilelog" @@ -77,7 +75,7 @@ func (pm *PipelineManager) CloseClientWithFile(file string) error { cl, err := pm.removeClient(file) if err != nil { - return errors.Wrap(err, "Error removing client") + return fmt.Errorf("Error removing client: %w", err) } hash := cl.pipelineHash @@ -88,7 +86,7 @@ func (pm *PipelineManager) CloseClientWithFile(file string) error { pm.Logger.Debugf("Closing Client first from pipelineManager") err = cl.Close() if err != nil { - return errors.Wrap(err, "error closing client") + return fmt.Errorf("error closing client: %w", err) } // if the pipeline is no longer in use, clean up @@ -103,16 +101,16 @@ func (pm *PipelineManager) CreateClientWithConfig(containerConfig ContainerOutpu hashstring, err := hashstructure.Hash(containerConfig, nil) if err != nil { - return nil, errors.Wrap(err, "error creating config hash") + return nil, fmt.Errorf("error creating config hash: %w", err) } pipeline, err := pm.getOrCreatePipeline(containerConfig, hashstring) if err != nil { - return nil, errors.Wrap(err, "error getting pipeline") + return nil, fmt.Errorf("error getting pipeline: %w", err) } reader, err := pipereader.NewReaderFromPath(file) if err != nil { - return nil, errors.Wrap(err, "error creating reader for docker log stream") + return nil, fmt.Errorf("error creating reader for docker log stream: %w", err) } // Why is this empty by default? What should be here? Who knows! @@ -121,7 +119,7 @@ func (pm *PipelineManager) CreateClientWithConfig(containerConfig ContainerOutpu } err = os.MkdirAll(filepath.Dir(info.LogPath), 0755) if err != nil { - return nil, errors.Wrap(err, "error creating directory for local logs") + return nil, fmt.Errorf("error creating directory for local logs: %w", err) } // set a default log size if _, ok := info.Config["max-size"]; !ok { @@ -134,13 +132,13 @@ func (pm *PipelineManager) CreateClientWithConfig(containerConfig ContainerOutpu localLog, err := jsonfilelog.New(info) if err != nil { - return nil, errors.Wrap(err, "error creating local log") + return nil, fmt.Errorf("error creating local log: %w", err) } //actually get to crafting the new client. cl, err := newClientFromPipeline(pipeline.pipeline, reader, hashstring, info, localLog, pm.hostname) if err != nil { - return nil, errors.Wrap(err, "error creating client") + return nil, fmt.Errorf("error creating client: %w", err) } pm.registerClient(cl, hashstring, file) @@ -211,7 +209,7 @@ func (pm *PipelineManager) getOrCreatePipeline(logOptsConfig ContainerOutputConf if !test { pipeline, err = loadNewPipeline(logOptsConfig, pm.hostname, pm.Logger) if err != nil { - return nil, errors.Wrap(err, "error loading pipeline") + return nil, fmt.Errorf("error loading pipeline: %w", err) } pm.pipelines[hash] = pipeline } diff --git a/x-pack/dockerlogbeat/pipelinemanager/selector.go b/x-pack/dockerlogbeat/pipelinemanager/selector.go index 624e8528b09..637837a35b1 100644 --- a/x-pack/dockerlogbeat/pipelinemanager/selector.go +++ b/x-pack/dockerlogbeat/pipelinemanager/selector.go @@ -7,8 +7,6 @@ package pipelinemanager import ( "fmt" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/outputs" "github.com/elastic/beats/v7/libbeat/outputs/outil" @@ -36,7 +34,7 @@ func (s *IdxSupport) BuildSelector(cfg *config.C) (outputs.IndexSelector, error) if cfg.HasField("indicies") { sub, err := cfg.Child("indices", -1) if err != nil { - return nil, errors.Wrap(err, "error getting indicies field") + return nil, fmt.Errorf("error getting indicies field: %w", err) } bsCfg.SetChild("indices", -1, sub) } @@ -64,7 +62,7 @@ func (s *IdxSupport) BuildSelector(cfg *config.C) (outputs.IndexSelector, error) indexSel, err := outil.BuildSelectorFromConfig(bsCfg, buildSettings) if err != nil { - return nil, errors.Wrap(err, "error creating build Selector") + return nil, fmt.Errorf("error creating build Selector: %w", err) } return indexSel, nil diff --git a/x-pack/dockerlogbeat/pipereader/reader.go b/x-pack/dockerlogbeat/pipereader/reader.go index d1f8eb05c21..6ca20f301b3 100644 --- a/x-pack/dockerlogbeat/pipereader/reader.go +++ b/x-pack/dockerlogbeat/pipereader/reader.go @@ -7,6 +7,7 @@ package pipereader import ( "context" "encoding/binary" + "fmt" "io" "io/ioutil" "syscall" @@ -14,7 +15,6 @@ import ( "github.com/containerd/fifo" "github.com/docker/docker/api/types/plugins/logdriver" "github.com/gogo/protobuf/proto" - "github.com/pkg/errors" ) // PipeReader reads from the FIFO pipe we get from the docker container @@ -30,7 +30,7 @@ type PipeReader struct { func NewReaderFromPath(file string) (*PipeReader, error) { inputFile, err := fifo.OpenFifo(context.Background(), file, syscall.O_RDONLY, 0700) if err != nil { - return nil, errors.Wrapf(err, "error opening logger fifo: %q", file) + return nil, fmt.Errorf("error opening logger fifo: %q: %w", file, err) } return &PipeReader{fifoPipe: inputFile, byteOrder: binary.BigEndian, lenFrameBuf: make([]byte, 4), bodyBuf: nil, maxSize: 2e6}, nil @@ -63,7 +63,7 @@ func (reader *PipeReader) ReadMessage(log *logdriver.LogEntry) error { // 2) we have a too-large message. Disregard length bytes _, err = io.CopyBuffer(ioutil.Discard, io.LimitReader(reader.fifoPipe, int64(lenFrame)), reader.bodyBuf) if err != nil { - return errors.Wrap(err, "error emptying buffer") + return fmt.Errorf("error emptying buffer: %w", err) } } @@ -71,7 +71,7 @@ func (reader *PipeReader) ReadMessage(log *logdriver.LogEntry) error { readBuf := reader.setBuffer(lenFrame) _, err = io.ReadFull(reader.fifoPipe, readBuf[:lenFrame]) if err != nil { - return errors.Wrap(err, "error reading buffer") + return fmt.Errorf("error reading buffer: %w", err) } return proto.Unmarshal(readBuf[:lenFrame], log) diff --git a/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc b/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc index 1df7f4bb341..794a51de081 100644 --- a/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc @@ -797,7 +797,7 @@ observe the activity of the input. | `sqs_messages_inflight_gauge` | Number of SQS messages inflight (gauge). | `sqs_messages_returned_total` | Number of SQS message returned to queue (happens on errors implicitly after visibility timeout passes). | `sqs_messages_deleted_total` | Number of SQS messages deleted. -| `sqs_messages_waiting_gauge` | Number of SQS messages waiting in the SQS queue (gauge). The value is refreshed every minute via data from GetQueueAttributes. +| `sqs_messages_waiting_gauge` | Number of SQS messages waiting in the SQS queue (gauge). The value is refreshed every minute via data from https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_GetQueueAttributes.html. A value of `-1` indicates the metric is uninitialized or could not be collected due to an error. | `sqs_worker_utilization` | Rate of SQS worker utilization over previous 5 seconds. 0 indicates idle, 1 indicates all workers utilized. | `sqs_message_processing_time` | Histogram of the elapsed SQS processing times in nanoseconds (time of receipt to time of delete/return). | `sqs_lag_time` | Histogram of the difference between the SQS SentTimestamp attribute and the time when the SQS message was received expressed in nanoseconds. diff --git a/x-pack/filebeat/docs/inputs/input-cel.asciidoc b/x-pack/filebeat/docs/inputs/input-cel.asciidoc index b4772fe3681..2643714ef7d 100644 --- a/x-pack/filebeat/docs/inputs/input-cel.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-cel.asciidoc @@ -1,6 +1,8 @@ [role="xpack"] :type: cel +:mito_version: v1.5.0 +:mito_docs: https://pkg.go.dev/github.com/elastic/mito@{mito_version} [id="{beatname_lc}-input-{type}"] === Common Expression Language input @@ -42,6 +44,21 @@ filebeat.inputs: }) ---- +or equivalently using the text format from ipify.org + +["source","yaml",subs="attributes"] +---- +filebeat.inputs: +# Fetch your public IP every minute. +- type: cel + interval: 1m + resource.url: https://api.ipify.org/?format=text + program: | + { + "events": [{"ip": string(get(state.url).Body)}] + } +---- + ["source","yaml",subs="attributes"] ---- filebeat.inputs: @@ -68,7 +85,7 @@ filebeat.inputs: ==== Execution -The execution environment provided for the input includes includes the function, macros and global variables provided by the mito and ext.Strings libraries. +The execution environment provided for the input includes includes the functions, macros, and global variables provided by the mito library. A single JSON object is provided as an input accessible through a `state` variable. `state` contains a string `url` field and may contain arbitrary other fields configured via the input's `state` configuration. If the CEL program saves cursor states between executions of the program, the configured `state.cursor` value will be replaced by the saved cursor prior to execution. @@ -120,7 +137,7 @@ The field should be an array, but in the case of an error condition in the CEL p <2> If `cursor` is present it must be either be a single object or an array with the same length as events; each element _i_ of the `cursor` will be the details for obtaining the events at and beyond event _i_ in the `events` array. If the `cursor` is a single object it is will be the details for obtaining events after the last event in the `events` array and will only be retained on successful publication of all the events in the `events` array. -<3> If `rate_limit` is present it must be a map with numeric fields `rate` and `burst`. The `rate_limit` field may also have a string `error` field and other fields which will be logged. If it has an `error` field, the `rate` and `burst` will not be used to set rate limit behavior. The https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#Limit[Limit], and https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#OktaRateLimit[Okta Rate Limit policy] and https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#DraftRateLimit[Draft Rate Limit policy] documentation show how to construct this field. +<3> If `rate_limit` is present it must be a map with numeric fields `rate` and `burst`. The `rate_limit` field may also have a string `error` field and other fields which will be logged. If it has an `error` field, the `rate` and `burst` will not be used to set rate limit behavior. The {mito_docs}/lib#Limit[Limit], and {mito_docs}/lib#OktaRateLimit[Okta Rate Limit policy] and {mito_docs}/lib#DraftRateLimit[Draft Rate Limit policy] documentation show how to construct this field. <4> The evaluation is repeated with the new state, after removing the events field, if the "want_more" field is present and true, and a non-zero events array is returned. @@ -130,90 +147,92 @@ The `status_code`, `header` and `rate_limit` values may be omitted if the progra The CEL input will log the complete state after evaluation when logging at the DEBUG level. This will include any sensitive or secret information kept in the `state` object, and so DEBUG level logging should not be used in production when sensitive information is retained in the `state` object. +See <> configuration parameters for settings to exclude sensitive fields from DEBUG logs. ==== CEL extension libraries -As noted above the `cel` input provides function, macro and global variables to extend the language. - -* https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#Collections[Collections] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Collate[Collate] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Drop[Drop] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Drop_Empty[Drop Empty] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Flatten[Flatten] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Max[Max] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Min[Min] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-With[With] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-With_Replace[With Replace] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-With_Update[With Update] - -* https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#Crypto[Crypto] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Base64[Base64] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Base64_Raw[Base64 Raw] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Hex[Hex] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-MD5[MD5] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-SHA_1[SHA-1] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-SHA_256[SHA-256] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-HMAC[HMAC] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-UUID[UUID] - -* https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#File[File] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Dir[Dir] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-File[File] - -* https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#HTTP[HTTP] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-HEAD[HEAD] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-GET[GET] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-GET_Request[GET Request] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-POST[POST] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-POST_Request[POST Request] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Request[Request] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Basic_Authentication[Basic Authentication] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Do_Request[Do Request] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Parse_URL[Parse URL] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Format_URL[Format URL] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Parse_Query[Parse Query] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Format_Query[Format Query] - -* https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#File[File] — the file extension is initialized with MIME handlers for "application/gzip", https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#NDJSON["application/x-ndjson"], https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#Zip["application/zip"], https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#CSVNoHeader["text/csv; header=absent"], and https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#CSVHeader["text/csv; header=present"]. -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Dir[Dir] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-File[File] - -* https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#JSON[JSON] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Encode_JSON[Encode JSON] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Decode_JSON[Decode JSON] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Decode_JSON_Stream[Decode JSON Stream] - -* https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#XML[XML] — the XML extension is initialized with XML schema definitions provided via the `xsd` configuration option. -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Decode_XML[Decode JSON] - -* https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#Limit[Limit] — the rate limit extension is initialized with https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#OktaRateLimit[Okta (as "okta")] and the https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#DraftRateLimit[Draft Rate Limit (as "draft")] policies. -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Rate_Limit[Rate Limit] - -* https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#MIME[MIME] — the MIME extension is initialized with MIME handlers for "application/gzip", https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#NDJSON["application/x-ndjson"], https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#Zip["application/zip"], https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#CSVNoHeader["text/csv; header=absent"], and https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#CSVHeader["text/csv; header=present"]. -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-MIME[MIME] - -* https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#Regexp[Regexp] — the regular expression extension is initialized with the patterns specified in the user input configuration via the `regexp` field. -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-RE_Match[RE Match] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-RE_Find[RE Find] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-RE_Find_All[RE Find All] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-RE_Find_Submatch[RE Find Submatch] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-RE_Find_All_Submatch[RE Find All Submatch] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-RE_Replace_All[RE Replace All] - -* https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#Strings[Strings] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-String_Methods[String Methods] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-String_List_Methods[String List Methods] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Bytes_Methods[Bytes Methods] - -* https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#Time[Time] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Format[Format] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Parse_Time[Parse Time] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Global_Variables[Global Variables] - -* https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#Try[Try] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Try[Try] -** https://pkg.go.dev/github.com/elastic/mito@v1.4.0/lib#hdr-Is_Error[Is Error] +As noted above the `cel` input provides functions, macros, and global variables to extend the language. + +* {mito_docs}/lib#Collections[Collections] +** {mito_docs}/lib#hdr-Collate[Collate] +** {mito_docs}/lib#hdr-Drop[Drop] +** {mito_docs}/lib#hdr-Drop_Empty[Drop Empty] +** {mito_docs}/lib#hdr-Flatten[Flatten] +** {mito_docs}/lib#hdr-Max[Max] +** {mito_docs}/lib#hdr-Min[Min] +** {mito_docs}/lib#hdr-With[With] +** {mito_docs}/lib#hdr-With_Replace[With Replace] +** {mito_docs}/lib#hdr-With_Update[With Update] +** {mito_docs}/lib#hdr-Zip[Zip] + +* {mito_docs}/lib#Crypto[Crypto] +** {mito_docs}/lib#hdr-Base64[Base64] +** {mito_docs}/lib#hdr-Base64_Raw[Base64 Raw] +** {mito_docs}/lib#hdr-Hex[Hex] +** {mito_docs}/lib#hdr-MD5[MD5] +** {mito_docs}/lib#hdr-SHA_1[SHA-1] +** {mito_docs}/lib#hdr-SHA_256[SHA-256] +** {mito_docs}/lib#hdr-HMAC[HMAC] +** {mito_docs}/lib#hdr-UUID[UUID] + +* {mito_docs}/lib#File[File] +** {mito_docs}/lib#hdr-Dir[Dir] +** {mito_docs}/lib#hdr-File[File] + +* {mito_docs}/lib#HTTP[HTTP] +** {mito_docs}/lib#hdr-HEAD[HEAD] +** {mito_docs}/lib#hdr-GET[GET] +** {mito_docs}/lib#hdr-GET_Request[GET Request] +** {mito_docs}/lib#hdr-POST[POST] +** {mito_docs}/lib#hdr-POST_Request[POST Request] +** {mito_docs}/lib#hdr-Request[Request] +** {mito_docs}/lib#hdr-Basic_Authentication[Basic Authentication] +** {mito_docs}/lib#hdr-Do_Request[Do Request] +** {mito_docs}/lib#hdr-Parse_URL[Parse URL] +** {mito_docs}/lib#hdr-Format_URL[Format URL] +** {mito_docs}/lib#hdr-Parse_Query[Parse Query] +** {mito_docs}/lib#hdr-Format_Query[Format Query] + +* {mito_docs}/lib#File[File] — the file extension is initialized with MIME handlers for "application/gzip", {mito_docs}/lib#NDJSON["application/x-ndjson"], {mito_docs}/lib#Zip["application/zip"], {mito_docs}/lib#CSVNoHeader["text/csv; header=absent"], and {mito_docs}/lib#CSVHeader["text/csv; header=present"]. +** {mito_docs}/lib#hdr-Dir[Dir] +** {mito_docs}/lib#hdr-File[File] + +* {mito_docs}/lib#JSON[JSON] +** {mito_docs}/lib#hdr-Encode_JSON[Encode JSON] +** {mito_docs}/lib#hdr-Decode_JSON[Decode JSON] +** {mito_docs}/lib#hdr-Decode_JSON_Stream[Decode JSON Stream] + +* {mito_docs}/lib#XML[XML] — the XML extension is initialized with XML schema definitions provided via the `xsd` configuration option. +** {mito_docs}/lib#hdr-Decode_XML[Decode XML] + +* {mito_docs}/lib#Limit[Limit] — the rate limit extension is initialized with {mito_docs}/lib#OktaRateLimit[Okta (as "okta")] and the {mito_docs}/lib#DraftRateLimit[Draft Rate Limit (as "draft")] policies. +** {mito_docs}/lib#hdr-Rate_Limit[Rate Limit] + +* {mito_docs}/lib#MIME[MIME] — the MIME extension is initialized with MIME handlers for "application/gzip", {mito_docs}/lib#NDJSON["application/x-ndjson"], {mito_docs}/lib#Zip["application/zip"], {mito_docs}/lib#CSVNoHeader["text/csv; header=absent"], and {mito_docs}/lib#CSVHeader["text/csv; header=present"]. +** {mito_docs}/lib#hdr-MIME[MIME] + +* {mito_docs}/lib#Regexp[Regexp] — the regular expression extension is initialized with the patterns specified in the user input configuration via the `regexp` field. +** {mito_docs}/lib#hdr-RE_Match[RE Match] +** {mito_docs}/lib#hdr-RE_Find[RE Find] +** {mito_docs}/lib#hdr-RE_Find_All[RE Find All] +** {mito_docs}/lib#hdr-RE_Find_Submatch[RE Find Submatch] +** {mito_docs}/lib#hdr-RE_Find_All_Submatch[RE Find All Submatch] +** {mito_docs}/lib#hdr-RE_Replace_All[RE Replace All] + +* {mito_docs}/lib#Strings[Strings] +** {mito_docs}/lib#hdr-String_Methods[String Methods] +** {mito_docs}/lib#hdr-String_List_Methods[String List Methods] +** {mito_docs}/lib#hdr-Bytes_Methods[Bytes Methods] + +* {mito_docs}/lib#Time[Time] +** {mito_docs}/lib#hdr-Format[Format] +** {mito_docs}/lib#hdr-Parse_Time[Parse Time] +** {mito_docs}/lib#hdr-Global_Variables[Global Variables] + +* {mito_docs}/lib#Try[Try] +** {mito_docs}/lib#hdr-Try[Try] +** {mito_docs}/lib#hdr-Is_Error[Is Error] In addition to the extensions provided in the packages listed above, a global variable `useragent` is also provided which gives the user CEL program access to the {beatname_lc} user-agent string. @@ -345,7 +364,7 @@ XML documents may require additional type information to enable correct parsing filebeat.inputs: - type: cel # Provide an XSD, 'order', for use during CEL execution (truncated for example). - regexp: + xsd: order: | @@ -631,6 +650,38 @@ Whether to use the host's local time rather that UTC for timestamping rotated lo This determines whether rotated logs should be gzip compressed. +[[cel-state-redact]] +[float] +==== `redact` + +During debug level logging, the `state` object and the resulting evaluation result are included in logs. This may result in leaking of secrets. In order to prevent this, fields may be redacted or deleted from the logged `state`. The `redact` configuration allows users to configure this field redaction behaviour. For safety reasons if the `redact` configuration is missing a warning is logged. + +In the case of no-required redaction an empty `redact.fields` configuration should be used to silence the logged warning. + +["source","yaml",subs="attributes"] +---- +- type: cel + redact: + fields: ~ +---- + +As an example, if a user-constructed Basic Authentication request is used in a CEL program the password can be redacted like so + +["source","yaml",subs="attributes"] +---- +filebeat.inputs: +- type: cel + resource.url: http://localhost:9200/_search + state: + user: user@domain.tld + password: P@$$W0₹D + redact: + fields: password + delete: true +---- + +Note that fields under the `auth` configuration hierarchy are not exposed to the `state` and so do not need to be redacted. For this reason it is preferable to use these for authentication over the request construction shown above where possible. + [float] ==== `redact.fields` diff --git a/x-pack/filebeat/docs/inputs/input-entity-analytics.asciidoc b/x-pack/filebeat/docs/inputs/input-entity-analytics.asciidoc index 22d3a67ec72..c983b607e76 100644 --- a/x-pack/filebeat/docs/inputs/input-entity-analytics.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-entity-analytics.asciidoc @@ -54,6 +54,9 @@ provider to function properly: |User.Read.All |Application + +|Device.Read.All +|Application |=== For a full guide on how to set up the necessary App Registration, permission @@ -66,33 +69,36 @@ granting, and secret configuration, follow this https://learn.microsoft.com/en-u ===== Overview The Azure AD provider periodically contacts Azure Active Directory, retrieving -updates for users and groups, updates its internal cache of user metadata and -group membership information, and ships updated user metadata to Elasticsearch. - -Fetching and shipping updates occurs in one of two processes: **full -synchronizations** and *incremental updates*. Full synchronizations will send the -entire list of users in state, along with write markers to indicate the start -and end of the synchronization event. Incremental updates will only send data -for changed users during that event. Changes on a user can come in many forms, -whether it be a change to the user's metadata, a user was added or deleted, or -group membership was changed (either direct or transitive). +updates for users, devices and groups, updates its internal cache of user and +device metadata and group membership information, and ships updated user metadata +to Elasticsearch. + +Fetching and shipping updates occurs in one of two processes: *full +synchronizations* and *incremental updates*. Full synchronizations will send the +entire list of users and devices in state, along with write markers to indicate +the start and end of the synchronization event. Incremental updates will only +send data for changed users and devices during that event. Changes on a user or +device can come in many forms, whether it be a change to the user or device +metadata, a user/device was added or deleted, or group membership was changed +(either direct or transitive). [float] ===== API Interactions -The provider periodically retrieves changes to user and group metadata from the -Microsoft Graph API for Azure Active Directory. This is done through calls to -two API endpoints: +The provider periodically retrieves changes to user, device and group metadata +from the Microsoft Graph API for Azure Active Directory. This is done through +calls to three API endpoints: - https://learn.microsoft.com/en-us/graph/api/user-delta?view=graph-rest-1.0&tabs=http[/users/delta] +- https://learn.microsoft.com/en-us/graph/api/device-delta?view=graph-rest-1.0&tabs=http[/devices/delta] - https://learn.microsoft.com/en-us/graph/api/group-delta?view=graph-rest-1.0&tabs=http[/groups/delta] The `/delta` endpoint will provide changes that have occurred since the last call, with state being tracked through a delta token. If the /delta endpoint is -called without a delta token, it will provide a full listing of users or groups, -similar to the non-delta endpoint. Since many results may be returned, there is -a paging mechanism that is used. In the response body, there are two fields that -may appear, `@odata.nextLink` and `@odata.deltaLink`. +called without a delta token, it will provide a full listing of users, devices +or groups, similar to the non-delta endpoint. Since many results may be returned, +there is a paging mechanism that is used. In the response body, there are two +fields that may appear, `@odata.nextLink` and `@odata.deltaLink`. - If a `@odata.nextLink` is returned, then there are more results to fetch, and the value of this field will contain the URL which should be immediately fetched. @@ -100,16 +106,17 @@ the value of this field will contain the URL which should be immediately fetched and the value of this field (a URL) should be saved for the next time updates need to be fetched (the delta token). -The group metadata will be used to enrich users with group membership information. -Direct memberships, along with transitive memberships, will provided on users. +The group metadata will be used to enrich users and devices with group membership +information. Direct memberships, along with transitive memberships, will be provided +for users and devices. [float] -===== Sending User Metadata to Elasticsearch +===== Sending User and Device Metadata to Elasticsearch -During a full synchronization, all users stored in state will be sent to the -output, while incremental updates will only send users which have been updated. -Full synchronizations will be bounded on either side by write marker documents, -which will look something like this: +During a full synchronization, all users and devices stored in state will be sent +to the output, while incremental updates will only send users which have been +updated. Full synchronizations will be bounded on either side by write marker +documents, which will look something like this: ["source","json",subs="attributes"] ---- @@ -165,6 +172,77 @@ Example user document: } ---- +Device documents will show the current state of the device. + +Example device document: + +["source","json",subs="attributes"] +---- +{ + "@timestamp": "2022-11-04T09:57:19.786056-05:00", + "event": { + "action": "device-discovered", + }, + "azure_ad": { + "accountEnabled": true, + "deviceId": "2fbbb8f9-ff67-4a21-b867-a344d18a4198", + "displayName": "DESKTOP-LETW452G", + "operatingSystem": "Windows", + "operatingSystemVersion": "10.0.19043.1337", + "physicalIds": { + "extensionAttributes": { + "extensionAttribute1": "BYOD-Device" + } + }, + "alternativeSecurityIds": [ + { + "type": 2, + "identityProvider": null, + "key": "DGFSGHSGGTH345A...35DSFH0A" + }, + ] + }, + "device": { + "id": "adbbe40a-0627-4328-89f1-88cac84dbc7f", + "group": [ + { + "id": "331676df-b8fd-4492-82ed-02b927f8dd80", + "name": "group1" + } + ] + "registered_owners": [ + { + "id": "5ebc6a0f-05b7-4f42-9c8a-682bbc75d0fc", + "userPrincipalName": "example.user@example.com", + "mail": "example.user@example.com", + "displayName": "Example User", + "givenName": "Example", + "surname": "User", + "jobTitle": "Software Engineer", + "mobilePhone": "123-555-1000", + "businessPhones": ["123-555-0122"] + }, + ], + "registered_users": [ + { + "id": "5ebc6a0f-05b7-4f42-9c8a-682bbc75d0fc", + "userPrincipalName": "example.user@example.com", + "mail": "example.user@example.com", + "displayName": "Example User", + "givenName": "Example", + "surname": "User", + "jobTitle": "Software Engineer", + "mobilePhone": "123-555-1000", + "businessPhones": ["123-555-0122"] + }, + ], + }, + "labels": { + "identity_source": "azure-1" + } +} +---- + [float] ==== Configuration diff --git a/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc b/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc index 34bcc3b1a7e..52af35dd3ca 100644 --- a/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc @@ -214,6 +214,8 @@ Some built-in helper functions are provided to work with the input state inside - `hmacBase64`: calculates the hmac signature of a list of strings concatenated together. Returns a base64 encoded signature. Supports sha1 or sha256. Example `[[hmac "sha256" "secret" "string1" "string2" (formatDate (now) "RFC1123")]]` - `hmac`: calculates the hmac signature of a list of strings concatenated together. Returns a hex encoded signature. Supports sha1 or sha256. Example `[[hmac "sha256" "secret" "string1" "string2" (formatDate (now) "RFC1123")]]` - `join`: joins a list using the specified separator. Example: `[[join .body.arr ","]]` +- `max`: returns the maximum of two values. +- `min`: returns the minimum of two values. - `mul`: multiplies two integers. - `now`: returns the current `time.Time` object in UTC. Optionally, it can receive a `time.Duration` as a parameter. Example: `[[now (parseDuration "-1h")]]` returns the time at 1 hour before now. - `parseDate`: parses a date string and returns a `time.Time` in UTC. By default the expected layout is `RFC3339` but optionally can accept any of the Golang predefined layouts or a custom one. Example: `[[ parseDate "2020-11-05T12:25:32Z" ]]`, `[[ parseDate "2020-11-05T12:25:32.1234567Z" "RFC3339Nano" ]]`, `[[ (parseDate "Thu Nov 5 12:25:32 +0000 2020" "Mon Jan _2 15:04:05 -0700 2006").UTC ]]`. @@ -1560,7 +1562,7 @@ observe the activity of the input. | `httpjson_interval_total` | Total number of intervals executed. | `httpjson_interval_errors_total` | Total number of interval errors. | `httpjson_interval_execution_time` | Histogram of the interval execution time. -| `httpjson_interval_pages_total` | Histogram of the total number of pages per interval. +| `httpjson_interval_pages` | Histogram of the total number of pages per interval. | `httpjson_interval_pages_execution_time` | Histogram of the interval pages execution time. |======= diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index f6326441287..5733bcd6ad7 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -2616,7 +2616,7 @@ filebeat.inputs: # To fetch all ".log" files from a specific level of subdirectories # /var/log/*/*.log can be used. # For each file found under this path, a harvester is started. - # Make sure not file is defined twice as this can lead to unexpected behaviour. + # Make sure no file is defined twice as this can lead to unexpected behaviour. paths: - /var/log/*.log #- c:\programdata\elasticsearch\logs\* @@ -2653,7 +2653,7 @@ filebeat.inputs: # level: debug # review: 1 - # Set to true to store the additional fields as top level fields instead + # Set to true to store the additional fields as top-level fields instead # of under the "fields" sub-dictionary. In case of name conflicts with the # fields added by Filebeat itself, the custom fields overwrite the default # fields. @@ -2667,7 +2667,7 @@ filebeat.inputs: # false. #publisher_pipeline.disable_host: false - # Ignore files which were modified more then the defined timespan in the past. + # Ignore files that were modified more than the defined timespan in the past. # ignore_older is disabled by default, so no files are ignored by setting it to 0. # Time strings like 2h (2 hours), 5m (5 minutes) can be used. #ignore_older: 0 @@ -2685,7 +2685,7 @@ filebeat.inputs: # This is especially useful for multiline log messages which can get large. #max_bytes: 10485760 - # Characters which separate the lines. Valid values: auto, line_feed, vertical_tab, form_feed, + # Characters that separate the lines. Valid values: auto, line_feed, vertical_tab, form_feed, # carriage_return, carriage_return_line_feed, next_line, line_separator, paragraph_separator, # null_terminator #line_terminator: auto @@ -2708,7 +2708,7 @@ filebeat.inputs: #json.keys_under_root: false # If keys_under_root and this setting are enabled, then the values from the decoded - # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) + # JSON object overwrites the fields that Filebeat normally adds (type, source, offset, etc.) # in case of conflicts. #json.overwrite_keys: false @@ -2717,7 +2717,7 @@ filebeat.inputs: # For example, `{"a.b.c": 123}` would be expanded into `{"a":{"b":{"c":123}}}`. #json.expand_keys: false - # If this setting is enabled, Filebeat adds a "error.message" and "error.key: json" key in case of JSON + # If this setting is enabled, Filebeat adds an "error.message" and "error.key: json" key in case of JSON # unmarshaling errors or when a text key is defined in the configuration but cannot # be used. #json.add_error_key: false @@ -2730,20 +2730,20 @@ filebeat.inputs: # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ #multiline.pattern: ^\[ - # Defines if the pattern set under pattern should be negated or not. Default is false. + # Defines if the pattern set under the pattern should be negated or not. Default is false. #multiline.negate: false - # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # Match can be set to "after" or "before". It is used to define if lines should be appended to a pattern # that was (not) matched before or after or as long as a pattern is not matched based on negate. # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash #multiline.match: after - # The maximum number of lines that are combined to one event. + # The maximum number of lines that are combined into one event. # In case there are more the max_lines the additional lines are discarded. # Default is 500 #multiline.max_lines: 500 - # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # After the defined timeout, a multiline event is sent even if no new pattern was found to start a new event # Default is 5s. #multiline.timeout: 5s @@ -2753,7 +2753,7 @@ filebeat.inputs: # The number of lines to aggregate into a single event. #multiline.count_lines: 3 - # Do not add new line character when concatenating lines. + # Do not add new line characters when concatenating lines. #multiline.skip_newline: false # Setting tail_files to true means filebeat starts reading new files at the end @@ -2766,13 +2766,13 @@ filebeat.inputs: #pipeline: # If symlinks is enabled, symlinks are opened and harvested. The harvester is opening the - # original for harvesting but will report the symlink name as source. + # original for harvesting but will report the symlink name as the source. #symlinks: false # Backoff values define how aggressively filebeat crawls new files for updates - # The default values can be used in most cases. Backoff defines how long it is waited + # The default values can be used in most cases. Backoff defines how long it has to wait # to check a file again after EOF is reached. Default is 1s which means the file - # is checked every second if new lines were added. This leads to a near real time crawling. + # is checked every second if new lines were added. This leads to a near real-time crawling. # Every time a new line appears, backoff is reset to the initial value. #backoff: 1s @@ -2795,7 +2795,7 @@ filebeat.inputs: # Close inactive closes the file handler after the predefined period. # The period starts when the last line of the file was, not the file ModTime. - # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + # Time strings like 2h (2 hours), and 5m (5 minutes) can be used. #close_inactive: 5m # Close renamed closes a file handler when the file is renamed or rotated. @@ -2807,18 +2807,18 @@ filebeat.inputs: # after scan_frequency. #close_removed: true - # Closes the file handler as soon as the harvesters reaches the end of the file. + # Closes the file handler as soon as the harvesters reach the end of the file. # By default this option is disabled. # Note: Potential data loss. Make sure to read and understand the docs for this option. #close_eof: false ### State options - # Files for the modification data is older then clean_inactive the state from the registry is removed + # Files for the modification data are older than clean_inactive the state from the registry is removed # By default this is disabled. #clean_inactive: 0 - # Removes the state for file which cannot be found on disk anymore immediately + # Removes the state for files which cannot be found on disk anymore immediately #clean_removed: true # Close timeout closes the harvester after the predefined time. @@ -2827,7 +2827,7 @@ filebeat.inputs: # Note: Potential data loss. Make sure to read and understand the docs for this option. #close_timeout: 0 - # Defines if inputs is enabled + # Defines if inputs are enabled #enabled: true #--------------------------- Filestream input ---------------------------- @@ -2889,9 +2889,22 @@ filebeat.inputs: #prospector.scanner.recursive_glob: true # If symlinks is enabled, symlinks are opened and harvested. The harvester is opening the - # original for harvesting but will report the symlink name as source. + # original for harvesting but will report the symlink name as the source. #prospector.scanner.symlinks: false + # If enabled, instead of relying on the device ID and inode values when comparing files, + # compare hashes of the given byte ranges in files. A file becomes an ingest target + # when its size grows larger than offset+length (see below). Until then it's ignored. + #prospector.scanner.fingerprint.enabled: false + + # If fingerprint mode is enabled, sets the offset from the beginning of the file + # for the byte range used for computing the fingerprint value. + #prospector.scanner.fingerprint.offset: 0 + + # If fingerprint mode is enabled, sets the length of the byte range used for + # computing the fingerprint value. Cannot be less than 64 bytes. + #prospector.scanner.fingerprint.length: 1024 + ### Parsers configuration #### JSON configuration @@ -2947,12 +2960,12 @@ filebeat.inputs: # Defines if the pattern set under the pattern setting should be negated or not. Default is false. #negate: false - # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # Match can be set to "after" or "before". It is used to define if lines should be appended to a pattern # that was (not) matched before or after or as long as a pattern is not matched based on negate. # Note: After is the equivalent to previous and before is the equivalent to next in Logstash #match: after - # The maximum number of lines that are combined to one event. + # The maximum number of lines that are combined into one event. # In case there are more than max_lines the additional lines are discarded. # Default is 500 #max_lines: 500 @@ -2973,16 +2986,16 @@ filebeat.inputs: # The number of lines to aggregate into a single event. #count_lines: 3 - # The maximum number of lines that are combined to one event. + # The maximum number of lines that are combined into one event. # In case there are more than max_lines the additional lines are discarded. # Default is 500 #max_lines: 500 - # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # After the defined timeout, a multiline event is sent even if no new pattern was found to start a new event # Default is 5s. #timeout: 5s - # Do not add new line character when concatenating lines. + # Do not add new line characters when concatenating lines. #skip_newline: false #### Parsing container events @@ -3009,11 +3022,11 @@ filebeat.inputs: ### State options - # Files for the modification data is older then clean_inactive the state from the registry is removed + # Files for the modification data is older than clean_inactive the state from the registry is removed # By default this is disabled. #clean_inactive: 0 - # Removes the state for file which cannot be found on disk anymore immediately + # Removes the state for files which cannot be found on disk anymore immediately #clean_removed: true # Method to determine if two files are the same or not. By default @@ -3034,9 +3047,9 @@ filebeat.inputs: # false. #publisher_pipeline.disable_host: false - # Ignore files which were modified more then the defined timespan in the past. + # Ignore files that were modified more than the defined timespan in the past. # ignore_older is disabled by default, so no files are ignored by setting it to 0. - # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + # Time strings like 2h (2 hours) and 5m (5 minutes) can be used. #ignore_older: 0 # Ignore files that have not been updated since the selected event. @@ -3057,7 +3070,7 @@ filebeat.inputs: # This is especially useful for multiline log messages which can get large. #message_max_bytes: 10485760 - # Characters which separate the lines. Valid values: auto, line_feed, vertical_tab, form_feed, + # Characters that separate the lines. Valid values: auto, line_feed, vertical_tab, form_feed, # carriage_return, carriage_return_line_feed, next_line, line_separator, paragraph_separator, # null_terminator #line_terminator: auto @@ -3067,9 +3080,9 @@ filebeat.inputs: #pipeline: # Backoff values define how aggressively filebeat crawls new files for updates - # The default values can be used in most cases. Backoff defines how long it is waited + # The default values can be used in most cases. Backoff defines how long it has to wait # to check a file again after EOF is reached. Default is 1s which means the file - # is checked every second if new lines were added. This leads to a near real time crawling. + # is checked every second if new lines were added. This leads to a near real-time crawling. # Every time a new line appears, backoff is reset to the initial value. #backoff.init: 1s @@ -3083,7 +3096,7 @@ filebeat.inputs: # Close inactive closes the file handler after the predefined period. # The period starts when the last line of the file was, not the file ModTime. - # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + # Time strings like 2h (2 hours) and 5m (5 minutes) can be used. #close.on_state_change.inactive: 5m # Close renamed closes a file handler when the file is renamed or rotated. @@ -3581,12 +3594,12 @@ filebeat.inputs: # data path. #filebeat.registry.path: ${path.data}/registry -# The permissions mask to apply on registry data, and meta files. The default +# The permissions mask to apply on registry data and meta files. The default # value is 0600. Must be a valid Unix-style file permissions mask expressed in # octal notation. This option is not supported on Windows. #filebeat.registry.file_permissions: 0600 -# The timeout value that controls when registry entries are written to disk +# The timeout value that controls when registry entries are written to the disk # (flushed). When an unwritten update exceeds this value, it triggers a write # to disk. When flush is set to 0s, the registry is written to disk after each # batch of events has been published successfully. The default value is 1s. @@ -3602,7 +3615,7 @@ filebeat.inputs: # By default Ingest pipelines are not updated if a pipeline with the same ID # already exists. If this option is enabled Filebeat overwrites pipelines -# everytime a new Elasticsearch connection is established. +# every time a new Elasticsearch connection is established. #filebeat.overwrite_pipelines: false # How long filebeat waits on shutdown for the publisher to finish. @@ -3627,10 +3640,10 @@ filebeat.inputs: # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. +# If this option is not defined, the hostname is used. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. Tags make it easy to group servers by different # logical properties. #tags: ["service-X", "web-tier"] @@ -3642,7 +3655,7 @@ filebeat.inputs: # env: staging # If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields +# fields in the output document instead of being grouped under a field # sub-dictionary. Default is false. #fields_under_root: false @@ -3654,7 +3667,7 @@ filebeat.inputs: #queue: # Queue type by name (default 'mem') # The memory queue will present all available events (up to the outputs - # bulk_max_size) to the output, the moment the output is ready to server + # bulk_max_size) to the output, the moment the output is ready to serve # another batch of events. #mem: # Max number of events the queue can buffer. @@ -3706,7 +3719,7 @@ filebeat.inputs: # length of its retry interval each time, up to this maximum. #max_retry_interval: 30s -# Sets the maximum number of CPUs that can be executing simultaneously. The +# Sets the maximum number of CPUs that can be executed simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: @@ -3827,7 +3840,7 @@ filebeat.inputs: # ignore_missing: false # fail_on_error: true # -# The following example copies the value of message to message_copied +# The following example copies the value of the message to message_copied # #processors: # - copy_fields: @@ -3837,7 +3850,7 @@ filebeat.inputs: # fail_on_error: true # ignore_missing: false # -# The following example truncates the value of message to 1024 bytes +# The following example truncates the value of the message to 1024 bytes # #processors: # - truncate_fields: @@ -3934,7 +3947,7 @@ output.elasticsearch: # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. #index: "filebeat-%{[agent.version]}" - # Optional ingest pipeline. By default no pipeline will be used. + # Optional ingest pipeline. By default, no pipeline will be used. #pipeline: "" # Optional HTTP path @@ -4665,14 +4678,14 @@ output.elasticsearch: # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards are disabled by default and can be enabled either by setting the -# options here, or by using the `-setup` CLI flag or the `setup` command. +# options here or by using the `-setup` CLI flag or the `setup` command. #setup.dashboards.enabled: false # The directory from where to read the dashboards. The default is the `kibana` # folder in the home path. #setup.dashboards.directory: ${path.home}/kibana -# The URL from where to download the dashboards archive. It is used instead of +# The URL from where to download the dashboard archive. It is used instead of # the directory if it has a value. #setup.dashboards.url: @@ -4769,7 +4782,7 @@ setup.template.settings: # Configure index lifecycle management (ILM) to manage the backing indices # of your data streams. -# Enable ILM support. Valid values are true, false. +# Enable ILM support. Valid values are true, or false. #setup.ilm.enabled: true # Set the lifecycle policy name. The default policy name is @@ -4924,25 +4937,25 @@ logging.files: # The name of the files where the logs are written to. #name: filebeat - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. #rotateeverybytes: 10485760 # = 10MB - # Number of rotated log files to keep. Oldest files will be deleted first. + # Number of rotated log files to keep. The oldest files will be deleted first. #keepfiles: 7 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. #permissions: 0600 - # Enable log file rotation on time intervals in addition to size-based rotation. + # Enable log file rotation on time intervals in addition to the size-based rotation. # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h # are boundary-aligned with minutes, hours, days, weeks, months, and years as # reported by the local system clock. All other intervals are calculated from the # Unix epoch. Defaults to disabled. #interval: 0 - # Rotate existing logs on startup rather than appending to the existing + # Rotate existing logs on startup rather than appending them to the existing # file. Defaults to true. # rotateonstartup: true @@ -4970,7 +4983,7 @@ logging.files: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # In case you specify an additional path, the scheme is required: http://localhost:9200/path # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 #hosts: ["localhost:9200"] @@ -5017,7 +5030,7 @@ logging.files: # Elasticsearch after a network error. The default is 60s. #backoff.max: 60s - # Configure HTTP request timeout before failing an request to Elasticsearch. + # Configure HTTP request timeout before failing a request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. @@ -5114,15 +5127,15 @@ logging.files: # =============================== HTTP Endpoint ================================ -# Each beat can expose internal metrics through a HTTP endpoint. For security +# Each beat can expose internal metrics through an HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. -# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# Stats can be accessed through http://localhost:5066/stats. For pretty JSON output # append ?pretty to the URL. # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# The HTTP endpoint will bind to this hostname, IP address, unix socket, or named pipe. # When using IP addresses, it is recommended to only use localhost. #http.host: localhost @@ -5132,7 +5145,7 @@ logging.files: # Define which user should be owning the named pipe. #http.named_pipe.user: -# Define which the permissions that should be applied to the named pipe, use the Security +# Define which permissions should be applied to the named pipe, use the Security # Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with # `http.user`. #http.named_pipe.security_descriptor: diff --git a/x-pack/filebeat/filebeat.yml b/x-pack/filebeat/filebeat.yml index 4e6dab043bb..d4080e9cccb 100644 --- a/x-pack/filebeat/filebeat.yml +++ b/x-pack/filebeat/filebeat.yml @@ -16,7 +16,7 @@ filebeat.inputs: # Each - is an input. Most options can be set at the input level, so # you can use different inputs for various configurations. -# Below are the input specific configurations. +# Below are the input-specific configurations. # filestream is an input for collecting log messages from files. - type: filestream @@ -80,7 +80,7 @@ setup.template.settings: # all the transactions sent by a single shipper in the web interface. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. #tags: ["service-X", "web-tier"] @@ -95,8 +95,8 @@ setup.template.settings: # options here or by using the `setup` command. #setup.dashboards.enabled: false -# The URL from where to download the dashboards archive. By default this URL -# has a value which is computed based on the Beat name and version. For released +# The URL from where to download the dashboard archive. By default, this URL +# has a value that is computed based on the Beat name and version. For released # versions, this URL points to the dashboard archive on the artifacts.elastic.co # website. #setup.dashboards.url: @@ -178,7 +178,7 @@ processors: #logging.level: debug # At debug level, you can selectively enable logging only for some components. -# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# To enable all selectors, use ["*"]. Examples of other selectors are "beat", # "publisher", "service". #logging.selectors: ["*"] @@ -196,7 +196,7 @@ processors: #monitoring.cluster_uuid: # Uncomment to send the metrics to Elasticsearch. Most settings from the -# Elasticsearch output are accepted here as well. +# Elasticsearch outputs are accepted here as well. # Note that the settings should point to your Elasticsearch *monitoring* cluster. # Any setting that is not set is automatically inherited from the Elasticsearch # output configuration, so if you have the Elasticsearch output configured such diff --git a/x-pack/filebeat/input/awss3/_meta/terraform/.gitignore b/x-pack/filebeat/input/awss3/_meta/terraform/.gitignore index 0825744a776..1af7b09a151 100644 --- a/x-pack/filebeat/input/awss3/_meta/terraform/.gitignore +++ b/x-pack/filebeat/input/awss3/_meta/terraform/.gitignore @@ -1,3 +1,3 @@ terraform/ -outputs.yml +outputs*.yml *.tfstate* diff --git a/x-pack/filebeat/input/awss3/_meta/terraform/README.md b/x-pack/filebeat/input/awss3/_meta/terraform/README.md index d5614b99a92..41100d98dad 100644 --- a/x-pack/filebeat/input/awss3/_meta/terraform/README.md +++ b/x-pack/filebeat/input/awss3/_meta/terraform/README.md @@ -5,6 +5,8 @@ for executing the integration tests for the `aws-s3` Filebeat input. It creates an S3 bucket and SQS queue and configures S3 `ObjectCreated:*` notifications to be delivered to SQS. It also creates a second S3 bucket, SNS topic, SQS queue and configures S3 `ObjectCreated:*` notifications to be delivered to SNS and also creates a subscription for this SNS topic to SQS queue to automatically place messages sent to SNS topic in SQS queue. +## Cloud AWS environment + It outputs configuration information that is consumed by the tests to `outputs.yml`. The AWS resources are randomly named to prevent name collisions between multiple users. @@ -42,4 +44,40 @@ the S3 bucket and its contents. `terraform destroy` +## Emulated cloud Localstack environment + +It outputs configuration information that is consumed by the tests to +`outputs-localstack.yml`. The AWS resources are randomly named to prevent name collisions +between multiple users. + +### Usage + +You must have the appropriate Localstack environment up and running in docker. +You can use `.ci/jobs/docker-compose.yml` to spin up localstack environment. + +1. Execute terraform in this directory to create the resources. This will also +write the `outputs-localstack.yml`. You can use `export TF_VAR_aws_region=NNNNN` in order +to match the AWS region of the profile you are using. + + `terraform apply` + + +2. (Optional) View the output configuration. + + ```yaml + "aws_region": "us-east-1" + "bucket_name": "filebeat-s3-integtest-8iok1h" + "queue_url": "https://localhost:4566/000000000000/filebeat-s3-integtest-8iok1h" + ``` + +4. Execute the integration test. + + ``` + cd x-pack/filebeat/input/awss3 + go test -tags aws,integration -run TestInputRun*Localstack* -v . + ``` + +5. Cleanup AWS resources. Execute terraform to remove the SQS queue and delete +the S3 bucket and its contents. + `terraform destroy` \ No newline at end of file diff --git a/x-pack/filebeat/input/awss3/_meta/terraform/localstack.tf b/x-pack/filebeat/input/awss3/_meta/terraform/localstack.tf new file mode 100644 index 00000000000..2d2fcbe09ec --- /dev/null +++ b/x-pack/filebeat/input/awss3/_meta/terraform/localstack.tf @@ -0,0 +1,89 @@ +provider "aws" { + alias = "localstack" + access_key = "bharat" + secret_key = "bharat" + region = "us-east-1" + s3_use_path_style = true + skip_credentials_validation = true + skip_metadata_api_check = true + skip_requesting_account_id = true + + endpoints { + apigateway = "http://localhost:4566" + apigatewayv2 = "http://localhost:4566" + cloudformation = "http://localhost:4566" + cloudwatch = "http://localhost:4566" + dynamodb = "http://localhost:4566" + ec2 = "http://localhost:4566" + es = "http://localhost:4566" + elasticache = "http://localhost:4566" + firehose = "http://localhost:4566" + iam = "http://localhost:4566" + kinesis = "http://localhost:4566" + lambda = "http://localhost:4566" + rds = "http://localhost:4566" + redshift = "http://localhost:4566" + route53 = "http://localhost:4566" + s3 = "http://localhost:4566" + secretsmanager = "http://localhost:4566" + ses = "http://localhost:4566" + sns = "http://localhost:4566" + sqs = "http://localhost:4566" + ssm = "http://localhost:4566" + stepfunctions = "http://localhost:4566" + sts = "http://localhost:4566" + } +} + +resource "random_string" "random_localstack" { + length = 6 + special = false + upper = false +} + +resource "aws_s3_bucket" "filebeat-integtest-localstack" { + provider = aws.localstack + bucket = "filebeat-s3-integtest-localstack-${random_string.random_localstack.result}" + force_destroy = true +} + +resource "aws_sqs_queue" "filebeat-integtest-localstack" { + provider = aws.localstack + name = "filebeat-sqs-integtest-localstack-${random_string.random_localstack.result}" + policy = <= 0 && arg1.Uint() < uint64(arg2.Int()) + default: + return false, errBadComparison + } + } else { + switch k1 { + case boolKind, complexKind: + return false, errBadComparisonType + case floatKind: + truth = arg1.Float() < arg2.Float() + case intKind: + truth = arg1.Int() < arg2.Int() + case stringKind: + truth = arg1.String() < arg2.String() + case uintKind: + truth = arg1.Uint() < arg2.Uint() + default: + panic("invalid kind") + } + } + return truth, nil +} diff --git a/x-pack/filebeat/input/httpjson/value_tpl.go b/x-pack/filebeat/input/httpjson/value_tpl.go index 4decfdc5c19..133271e726f 100644 --- a/x-pack/filebeat/input/httpjson/value_tpl.go +++ b/x-pack/filebeat/input/httpjson/value_tpl.go @@ -66,6 +66,8 @@ func (t *valueTpl) Unpack(in string) error { "hmacBase64": hmacStringBase64, "join": join, "toJSON": toJSON, + "max": max, + "min": min, "mul": mul, "now": now, "parseDate": parseDate, @@ -295,6 +297,32 @@ func div(a, b int64) int64 { return a / b } +func min(arg1, arg2 reflect.Value) (interface{}, error) { + lessThan, err := lt(arg1, arg2) + if err != nil { + return nil, err + } + + // arg1 is < arg2. + if lessThan { + return arg1.Interface(), nil + } + return arg2.Interface(), nil +} + +func max(arg1, arg2 reflect.Value) (interface{}, error) { + lessThan, err := lt(arg1, arg2) + if err != nil { + return nil, err + } + + // arg1 is < arg2. + if lessThan { + return arg2.Interface(), nil + } + return arg1.Interface(), nil +} + func base64Encode(values ...string) string { data := strings.Join(values, "") if data == "" { diff --git a/x-pack/filebeat/input/httpjson/value_tpl_test.go b/x-pack/filebeat/input/httpjson/value_tpl_test.go index aacbda3eede..37589cd8821 100644 --- a/x-pack/filebeat/input/httpjson/value_tpl_test.go +++ b/x-pack/filebeat/input/httpjson/value_tpl_test.go @@ -356,6 +356,76 @@ func TestValueTpl(t *testing.T) { paramTr: transformable{}, expectedVal: "4", }, + { + name: "func min int", + value: `[[min 4 1]]`, + paramCtx: emptyTransformContext(), + paramTr: transformable{}, + expectedVal: "1", + }, + { + name: "func max int", + value: `[[max 4 1]]`, + paramCtx: emptyTransformContext(), + paramTr: transformable{}, + expectedVal: "4", + }, + { + name: "func max float", + value: `[[max 1.23 4.666]]`, + paramCtx: emptyTransformContext(), + paramTr: transformable{}, + expectedVal: "4.666", + }, + { + name: "func min float", + value: `[[min 1.23 4.666]]`, + paramCtx: emptyTransformContext(), + paramTr: transformable{}, + expectedVal: "1.23", + }, + { + name: "func min string", + value: `[[min "a" "b"]]`, + paramCtx: emptyTransformContext(), + paramTr: transformable{}, + expectedVal: "a", + }, + { + name: "func max string", + value: `[[max "a" "b"]]`, + paramCtx: emptyTransformContext(), + paramTr: transformable{}, + expectedVal: "b", + }, + { + name: "func min int64 unix seconds", + value: `[[ min (now.Unix) 1689771139 ]]`, + paramCtx: emptyTransformContext(), + paramTr: transformable{}, + expectedVal: "1689771139", + }, + { + name: "func min int year", + value: `[[ min (now.Year) 2020 ]]`, + paramCtx: emptyTransformContext(), + paramTr: transformable{}, + expectedVal: "2020", + }, + { + name: "func max duration", + value: `[[ max (parseDuration "59m") (parseDuration "1h") ]]`, + paramCtx: emptyTransformContext(), + paramTr: transformable{}, + expectedVal: "1h0m0s", + }, + { + name: "func min int ", + value: `[[ min (now.Year) 2020 ]]`, + paramCtx: emptyTransformContext(), + paramTr: transformable{}, + expectedVal: "2020", + }, { name: "func sha1 hmac Hex", value: `[[hmac "sha1" "secret" "string1" "string2"]]`, diff --git a/x-pack/filebeat/input/netflow/decoder/decoder.go b/x-pack/filebeat/input/netflow/decoder/decoder.go index 993c373c881..0e1ef3df92c 100644 --- a/x-pack/filebeat/input/netflow/decoder/decoder.go +++ b/x-pack/filebeat/input/netflow/decoder/decoder.go @@ -7,14 +7,13 @@ package decoder import ( "bytes" "encoding/binary" + "errors" "fmt" "io" "log" "net" "sync" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/config" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/protocol" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/record" @@ -59,7 +58,7 @@ func (p *Decoder) Start() error { for _, proto := range p.protos { if err := proto.Start(); err != nil { p.stop() - return errors.Wrapf(err, "failed to start protocol version %d", proto.Version()) + return fmt.Errorf("failed to start protocol version %d: %w", proto.Version(), err) } } p.started = true diff --git a/x-pack/filebeat/input/netflow/decoder/v1/v1.go b/x-pack/filebeat/input/netflow/decoder/v1/v1.go index 8d9008c9393..e023341c4ad 100644 --- a/x-pack/filebeat/input/netflow/decoder/v1/v1.go +++ b/x-pack/filebeat/input/netflow/decoder/v1/v1.go @@ -7,13 +7,12 @@ package v1 import ( "bytes" "encoding/binary" + "fmt" "io" "log" "net" "time" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/config" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/fields" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/protocol" @@ -92,11 +91,11 @@ func (p *NetflowProtocol) OnPacket(buf *bytes.Buffer, source net.Addr) (flows [] numFlows, timestamp, metadata, err := p.readHeader(buf, source) if err != nil { p.logger.Printf("Failed parsing packet: %v", err) - return nil, errors.Wrap(err, "error reading netflow header") + return nil, fmt.Errorf("error reading netflow header: %w", err) } flows, err = p.flowTemplate.Apply(buf, numFlows) if err != nil { - return nil, errors.Wrap(err, "error parsing flows") + return nil, fmt.Errorf("error parsing flows: %w", err) } for i := range flows { flows[i].Exporter = metadata diff --git a/x-pack/filebeat/input/netflow/decoder/v8/v8.go b/x-pack/filebeat/input/netflow/decoder/v8/v8.go index 855b8d3c914..9fa88ea1c68 100644 --- a/x-pack/filebeat/input/netflow/decoder/v8/v8.go +++ b/x-pack/filebeat/input/netflow/decoder/v8/v8.go @@ -13,8 +13,6 @@ import ( "net" "time" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/config" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/fields" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/protocol" @@ -323,7 +321,7 @@ func (p *NetflowV8Protocol) OnPacket(buf *bytes.Buffer, source net.Addr) (flows header, err := ReadPacketHeader(buf) if err != nil { p.logger.Printf("Failed parsing packet: %v", err) - return nil, errors.Wrap(err, "error reading V8 header") + return nil, fmt.Errorf("error reading V8 header: %w", err) } template, found := templates[header.Aggregation] if !found { @@ -333,7 +331,7 @@ func (p *NetflowV8Protocol) OnPacket(buf *bytes.Buffer, source net.Addr) (flows metadata := header.GetMetadata(source) flows, err = template.Apply(buf, int(header.Count)) if err != nil { - return nil, errors.Wrapf(err, "unable to decode V8 flows of type %d", header.Aggregation) + return nil, fmt.Errorf("unable to decode V8 flows of type %d: %w", header.Aggregation, err) } for i := range flows { flows[i].Exporter = metadata diff --git a/x-pack/filebeat/input/netflow/decoder/v9/v9.go b/x-pack/filebeat/input/netflow/decoder/v9/v9.go index e70bda4ab3b..fdb46076c87 100644 --- a/x-pack/filebeat/input/netflow/decoder/v9/v9.go +++ b/x-pack/filebeat/input/netflow/decoder/v9/v9.go @@ -6,12 +6,11 @@ package v9 import ( "bytes" + "fmt" "log" "net" "time" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/config" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/protocol" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/record" @@ -76,7 +75,7 @@ func (p *NetflowV9Protocol) OnPacket(buf *bytes.Buffer, source net.Addr) (flows header, payload, numFlowSets, err := p.decoder.ReadPacketHeader(buf) if err != nil { p.logger.Printf("Unable to read V9 header: %v", err) - return nil, errors.Wrapf(err, "error reading header") + return nil, fmt.Errorf("error reading header: %w", err) } buf = payload @@ -105,7 +104,7 @@ func (p *NetflowV9Protocol) OnPacket(buf *bytes.Buffer, source net.Addr) (flows f, err := p.parseSet(set.SetID, session, body) if err != nil { p.logger.Printf("Error parsing set %d: %v", set.SetID, err) - return nil, errors.Wrapf(err, "error parsing set") + return nil, fmt.Errorf("error parsing set: %w", err) } flows = append(flows, f...) } diff --git a/x-pack/filebeat/input/netflow/definitions.go b/x-pack/filebeat/input/netflow/definitions.go index 267abef5a92..62e6dc3f1b0 100644 --- a/x-pack/filebeat/input/netflow/definitions.go +++ b/x-pack/filebeat/input/netflow/definitions.go @@ -5,13 +5,13 @@ package netflow import ( + "errors" "fmt" "io/ioutil" "math" "os" "strconv" - "github.com/pkg/errors" "gopkg.in/yaml.v2" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/fields" @@ -64,7 +64,7 @@ func LoadFieldDefinitions(yaml interface{}) (defs fields.FieldDict, err error) { defs = fields.FieldDict{} if !isIPFIX { if err := loadFields(tree, 0, defs); err != nil { - return nil, errors.Wrap(err, "failed to load NetFlow fields") + return nil, fmt.Errorf("failed to load NetFlow fields: %w", err) } return defs, nil } @@ -81,7 +81,7 @@ func LoadFieldDefinitions(yaml interface{}) (defs fields.FieldDict, err error) { return nil, fmt.Errorf("IPFIX fields for pem=%d malformed", pem) } if err := loadFields(tree, uint32(pem), defs); err != nil { - return nil, errors.Wrapf(err, "failed to load IPFIX fields for pem=%d", pem) + return nil, fmt.Errorf("failed to load IPFIX fields for pem=%d: %w", pem, err) } } return defs, nil @@ -101,7 +101,7 @@ func LoadFieldDefinitionsFromFile(path string) (defs fields.FieldDict, err error } var tree interface{} if err := yaml.Unmarshal(contents, &tree); err != nil { - return nil, errors.Wrap(err, "unable to parse YAML") + return nil, fmt.Errorf("unable to parse YAML: %w", err) } return LoadFieldDefinitions(tree) } diff --git a/x-pack/filebeat/input/netflow/input.go b/x-pack/filebeat/input/netflow/input.go index 16aa958396d..97f9931f325 100644 --- a/x-pack/filebeat/input/netflow/input.go +++ b/x-pack/filebeat/input/netflow/input.go @@ -6,12 +6,11 @@ package netflow import ( "bytes" + "fmt" "net" "sync" "time" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/filebeat/channel" "github.com/elastic/beats/v7/filebeat/harvester" "github.com/elastic/beats/v7/filebeat/input" @@ -108,7 +107,7 @@ func NewInput( for _, yamlPath := range config.CustomDefinitions { f, err := LoadFieldDefinitionsFromFile(yamlPath) if err != nil { - return nil, errors.Wrapf(err, "failed parsing custom field definitions from file '%s'", yamlPath) + return nil, fmt.Errorf("failed parsing custom field definitions from file '%s': %w", yamlPath, err) } customFields = append(customFields, f) } @@ -120,7 +119,7 @@ func NewInput( WithSequenceResetEnabled(config.DetectSequenceReset). WithSharedTemplates(config.ShareTemplates)) if err != nil { - return nil, errors.Wrapf(err, "error initializing netflow decoder") + return nil, fmt.Errorf("error initializing netflow decoder: %w", err) } input := &netflowInput{ diff --git a/x-pack/filebeat/input/o365audit/auth/auth.go b/x-pack/filebeat/input/o365audit/auth/auth.go index 69899e34031..6b0fea75c12 100644 --- a/x-pack/filebeat/input/o365audit/auth/auth.go +++ b/x-pack/filebeat/input/o365audit/auth/auth.go @@ -5,8 +5,9 @@ package auth import ( + "fmt" + "github.com/Azure/go-autorest/autorest/adal" - "github.com/pkg/errors" ) // TokenProvider is the interface that wraps an authentication mechanism and @@ -28,7 +29,7 @@ type servicePrincipalToken adal.ServicePrincipalToken func (provider *servicePrincipalToken) Token() (string, error) { inner := (*adal.ServicePrincipalToken)(provider) if err := inner.EnsureFresh(); err != nil { - return "", errors.Wrap(err, "refreshing spt token") + return "", fmt.Errorf("refreshing spt token: %w", err) } token := inner.Token() return token.OAuthToken(), nil diff --git a/x-pack/filebeat/input/o365audit/auth/cert.go b/x-pack/filebeat/input/o365audit/auth/cert.go index 1d0dc9f7526..f4912e0afc4 100644 --- a/x-pack/filebeat/input/o365audit/auth/cert.go +++ b/x-pack/filebeat/input/o365audit/auth/cert.go @@ -10,7 +10,6 @@ import ( "fmt" "github.com/Azure/go-autorest/autorest/adal" - "github.com/pkg/errors" "github.com/elastic/elastic-agent-libs/transport/tlscommon" ) @@ -22,11 +21,11 @@ func NewProviderFromCertificate( conf tlscommon.CertificateConfig) (sptp TokenProvider, err error) { cert, privKey, err := loadConfigCerts(conf) if err != nil { - return nil, errors.Wrap(err, "failed loading certificates") + return nil, fmt.Errorf("failed loading certificates: %w", err) } oauth, err := adal.NewOAuthConfig(endpoint, tenantID) if err != nil { - return nil, errors.Wrap(err, "error generating OAuthConfig") + return nil, fmt.Errorf("error generating OAuthConfig: %w", err) } spt, err := adal.NewServicePrincipalTokenFromCertificate( @@ -46,14 +45,14 @@ func NewProviderFromCertificate( func loadConfigCerts(cfg tlscommon.CertificateConfig) (cert *x509.Certificate, key *rsa.PrivateKey, err error) { tlsCert, err := tlscommon.LoadCertificate(&cfg) if err != nil { - return nil, nil, errors.Wrapf(err, "error loading X509 certificate from '%s'", cfg.Certificate) + return nil, nil, fmt.Errorf("error loading X509 certificate from '%s': %w", cfg.Certificate, err) } if tlsCert == nil || len(tlsCert.Certificate) == 0 { return nil, nil, fmt.Errorf("no certificates loaded from '%s'", cfg.Certificate) } cert, err = x509.ParseCertificate(tlsCert.Certificate[0]) if err != nil { - return nil, nil, errors.Wrapf(err, "error parsing X509 certificate from '%s'", cfg.Certificate) + return nil, nil, fmt.Errorf("error parsing X509 certificate from '%s': %w", cfg.Certificate, err) } if tlsCert.PrivateKey == nil { return nil, nil, fmt.Errorf("failed loading private key from '%s'", cfg.Key) diff --git a/x-pack/filebeat/input/o365audit/auth/secret.go b/x-pack/filebeat/input/o365audit/auth/secret.go index c34d6d48cc6..98b5c9dbfda 100644 --- a/x-pack/filebeat/input/o365audit/auth/secret.go +++ b/x-pack/filebeat/input/o365audit/auth/secret.go @@ -5,8 +5,9 @@ package auth import ( + "fmt" + "github.com/Azure/go-autorest/autorest/adal" - "github.com/pkg/errors" ) // NewProviderFromClientSecret returns a token provider that uses a secret @@ -14,7 +15,7 @@ import ( func NewProviderFromClientSecret(endpoint, resource, applicationID, tenantID, secret string) (p TokenProvider, err error) { oauth, err := adal.NewOAuthConfig(endpoint, tenantID) if err != nil { - return nil, errors.Wrap(err, "error generating OAuthConfig") + return nil, fmt.Errorf("error generating OAuthConfig: %w", err) } spt, err := adal.NewServicePrincipalToken(*oauth, applicationID, secret, resource) if err != nil { diff --git a/x-pack/filebeat/input/o365audit/config.go b/x-pack/filebeat/input/o365audit/config.go index d6178e9cbcc..dd419c54679 100644 --- a/x-pack/filebeat/input/o365audit/config.go +++ b/x-pack/filebeat/input/o365audit/config.go @@ -5,12 +5,11 @@ package o365audit import ( + "errors" "fmt" "net/url" "time" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/x-pack/filebeat/input/o365audit/auth" "github.com/elastic/elastic-agent-libs/transport/tlscommon" ) @@ -147,16 +146,16 @@ func (c *Config) Validate() (err error) { } if hasCert { if err = c.CertificateConfig.Validate(); err != nil { - return errors.Wrap(err, "invalid certificate config") + return fmt.Errorf("invalid certificate config: %w", err) } } c.API.Resource, err = forceURLScheme(c.API.Resource, "https") if err != nil { - return errors.Wrapf(err, "resource '%s' is not a valid URL", c.API.Resource) + return fmt.Errorf("resource '%s' is not a valid URL: %w", c.API.Resource, err) } c.API.AuthenticationEndpoint, err = forceURLScheme(c.API.AuthenticationEndpoint, "https") if err != nil { - return errors.Wrapf(err, "authentication_endpoint '%s' is not a valid URL", c.API.AuthenticationEndpoint) + return fmt.Errorf("authentication_endpoint '%s' is not a valid URL: %w", c.API.AuthenticationEndpoint, err) } return nil } diff --git a/x-pack/filebeat/input/o365audit/contentblob.go b/x-pack/filebeat/input/o365audit/contentblob.go index 598ca5f8b9a..55283aaa39c 100644 --- a/x-pack/filebeat/input/o365audit/contentblob.go +++ b/x-pack/filebeat/input/o365audit/contentblob.go @@ -6,12 +6,12 @@ package o365audit import ( "encoding/json" + "errors" "fmt" "net/http" "time" "github.com/Azure/go-autorest/autorest" - "github.com/pkg/errors" "github.com/elastic/beats/v7/x-pack/filebeat/input/o365audit/poll" "github.com/elastic/elastic-agent-libs/mapstr" @@ -53,13 +53,13 @@ func (c contentBlob) OnResponse(response *http.Response) (actions []poll.Action) } var raws []json.RawMessage if err := readJSONBody(response, &raws); err != nil { - return append(actions, poll.Terminate(errors.Wrap(err, "reading body failed"))) + return append(actions, poll.Terminate(fmt.Errorf("reading body failed: %w", err))) } entries := make([]mapstr.M, len(raws)) for idx, raw := range raws { var entry mapstr.M if err := json.Unmarshal(raw, &entry); err != nil { - return append(actions, poll.Terminate(errors.Wrap(err, "decoding json failed"))) + return append(actions, poll.Terminate(fmt.Errorf("decoding json failed: %w", err))) } entries[idx] = entry id, _ := getString(entry, "Id") diff --git a/x-pack/filebeat/input/o365audit/contentblob_test.go b/x-pack/filebeat/input/o365audit/contentblob_test.go index 223c2aa971a..1a71595d7e9 100644 --- a/x-pack/filebeat/input/o365audit/contentblob_test.go +++ b/x-pack/filebeat/input/o365audit/contentblob_test.go @@ -5,10 +5,10 @@ package o365audit import ( + "errors" "testing" "time" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/elastic/beats/v7/libbeat/beat" diff --git a/x-pack/filebeat/input/o365audit/dates.go b/x-pack/filebeat/input/o365audit/dates.go index ff6eb5db989..6e6c99c80ec 100644 --- a/x-pack/filebeat/input/o365audit/dates.go +++ b/x-pack/filebeat/input/o365audit/dates.go @@ -5,11 +5,10 @@ package o365audit import ( + "errors" "fmt" "time" - "github.com/pkg/errors" - "github.com/elastic/elastic-agent-libs/mapstr" ) diff --git a/x-pack/filebeat/input/o365audit/input.go b/x-pack/filebeat/input/o365audit/input.go index cf92f1a4f46..3793ce43d6a 100644 --- a/x-pack/filebeat/input/o365audit/input.go +++ b/x-pack/filebeat/input/o365audit/input.go @@ -7,11 +7,11 @@ package o365audit import ( "context" "encoding/json" + "fmt" "time" "github.com/Azure/go-autorest/autorest" "github.com/joeshaw/multierror" - "github.com/pkg/errors" v2 "github.com/elastic/beats/v7/filebeat/input/v2" cursor "github.com/elastic/beats/v7/filebeat/input/v2/input-cursor" @@ -70,7 +70,7 @@ func Plugin(log *logp.Logger, store cursor.StateStore) v2.Plugin { func configure(cfg *conf.C) ([]cursor.Source, cursor.Input, error) { config := defaultConfig() if err := cfg.Unpack(&config); err != nil { - return nil, nil, errors.Wrap(err, "reading config") + return nil, nil, fmt.Errorf("reading config: %w", err) } var sources []cursor.Source @@ -100,7 +100,7 @@ func (inp *o365input) Test(src cursor.Source, ctx v2.TestContext) error { } if _, err := auth.Token(); err != nil { - return errors.Wrapf(err, "unable to acquire authentication token for tenant:%s", tenantID) + return fmt.Errorf("unable to acquire authentication token for tenant:%s: %w", tenantID, err) } return nil @@ -150,7 +150,7 @@ func (inp *o365input) runOnce( } if _, err := tokenProvider.Token(); err != nil { - return errors.Wrapf(err, "unable to acquire authentication token for tenant:%s", stream.tenantID) + return fmt.Errorf("unable to acquire authentication token for tenant:%s: %w", stream.tenantID, err) } config := &inp.config @@ -171,7 +171,7 @@ func (inp *o365input) runOnce( ), ) if err != nil { - return errors.Wrap(err, "failed to create API poller") + return fmt.Errorf("failed to create API poller: %w", err) } start := initCheckpoint(log, cursor, config.API.MaxRetention) @@ -243,7 +243,7 @@ func (env apiEnvironment) toBeatEvent(raw json.RawMessage, doc mapstr.M) beat.Ev ts, err := getDateKey(doc, "CreationTime", apiDateFormats) if err != nil { ts = time.Now() - errs = append(errs, errors.Wrap(err, "failed parsing CreationTime")) + errs = append(errs, fmt.Errorf("failed parsing CreationTime: %w", err)) } b := beat.Event{ Timestamp: ts, diff --git a/x-pack/filebeat/input/o365audit/listblobs.go b/x-pack/filebeat/input/o365audit/listblobs.go index ebdbddf79f6..58437b9f614 100644 --- a/x-pack/filebeat/input/o365audit/listblobs.go +++ b/x-pack/filebeat/input/o365audit/listblobs.go @@ -6,6 +6,7 @@ package o365audit import ( "encoding/json" + "errors" "fmt" "io/ioutil" "net/http" @@ -13,7 +14,6 @@ import ( "time" "github.com/Azure/go-autorest/autorest" - "github.com/pkg/errors" "github.com/elastic/beats/v7/x-pack/filebeat/input/o365audit/poll" ) @@ -280,10 +280,10 @@ func readJSONBody(response *http.Response, dest interface{}) error { autorest.ByClosing()) body, err := ioutil.ReadAll(response.Body) if err != nil { - return errors.Wrap(err, "reading body failed") + return fmt.Errorf("reading body failed: %w", err) } if err = json.Unmarshal(body, dest); err != nil { - return errors.Wrap(err, "decoding json failed") + return fmt.Errorf("decoding json failed: %w", err) } return nil } diff --git a/x-pack/filebeat/input/o365audit/poll/poll.go b/x-pack/filebeat/input/o365audit/poll/poll.go index d32ea4ec135..3f616d433b3 100644 --- a/x-pack/filebeat/input/o365audit/poll/poll.go +++ b/x-pack/filebeat/input/o365audit/poll/poll.go @@ -6,12 +6,12 @@ package poll import ( "context" + "errors" "fmt" "net/http" "time" "github.com/Azure/go-autorest/autorest" - "github.com/pkg/errors" "github.com/elastic/beats/v7/x-pack/filebeat/input/o365audit/auth" "github.com/elastic/elastic-agent-libs/logp" @@ -90,14 +90,14 @@ func (r *Poller) fetchWithDelay(item Transaction, minDelay time.Duration) error if r.tp != nil { token, err := r.tp.Token() if err != nil { - return errors.Wrap(err, "failed getting a token") + return fmt.Errorf("failed getting a token: %w", err) } decorators = append(decorators, autorest.WithBearerAuthorization(token)) } request, err := autorest.Prepare(&http.Request{}, decorators...) if err != nil { - return errors.Wrap(err, "failed preparing request") + return fmt.Errorf("failed preparing request: %w", err) } delay := max(item.Delay(), minDelay) r.log.Debugf(" -- wait %s for %s", delay, request.URL.String()) @@ -115,7 +115,7 @@ func (r *Poller) fetchWithDelay(item Transaction, minDelay time.Duration) error for _, act := range acts { if err = act(r); err != nil { - return errors.Wrapf(err, "error acting on %+v", act) + return fmt.Errorf("error acting on %+v: %w", act, err) } } @@ -242,7 +242,7 @@ func Terminate(err error) Action { if err == nil { return errors.New("polling terminated without a specific error") } - return errors.Wrap(err, "polling terminated due to error") + return fmt.Errorf("polling terminated due to error: %w", err) } } diff --git a/x-pack/filebeat/module/cisco/shared/gen-ecs-mapping-docs.go b/x-pack/filebeat/module/cisco/shared/gen-ecs-mapping-docs.go index 929a2b9dfb9..f4f6ccb34b5 100644 --- a/x-pack/filebeat/module/cisco/shared/gen-ecs-mapping-docs.go +++ b/x-pack/filebeat/module/cisco/shared/gen-ecs-mapping-docs.go @@ -8,13 +8,12 @@ package main import ( "encoding/csv" + "errors" "flag" "fmt" "io" "os" "sort" - - "github.com/pkg/errors" ) var outputFile = flag.String("output", "ftd-ecs-mappings.asciidoc", "Output file") @@ -114,7 +113,7 @@ func loadMappings(reader io.Reader) (m idMappings, err error) { break } if err != nil { - return m, errors.Wrapf(err, "failed reading line %d", lineNum) + return m, fmt.Errorf("failed reading line %d: %w", lineNum, err) } if len(record) < 3 { return m, fmt.Errorf("line %d has unexpected number of columns: %d", lineNum, len(record)) diff --git a/x-pack/filebeat/module/cisco/shared/gen-ftd-ecs-mapping.go b/x-pack/filebeat/module/cisco/shared/gen-ftd-ecs-mapping.go index 6ad9a255212..8ce78bbf1df 100644 --- a/x-pack/filebeat/module/cisco/shared/gen-ftd-ecs-mapping.go +++ b/x-pack/filebeat/module/cisco/shared/gen-ftd-ecs-mapping.go @@ -8,6 +8,7 @@ package main import ( "encoding/csv" + "errors" "flag" "fmt" "io" @@ -16,8 +17,6 @@ import ( "unicode" "gopkg.in/yaml.v2" - - "github.com/pkg/errors" ) var ( @@ -142,7 +141,7 @@ func generate() error { content = append(content, end...) n, err := outHandle.Write(content) if err != nil { - return errors.Wrap(err, "failed writing output file") + return fmt.Errorf("failed writing output file: %w", err) } if n != len(content) { return fmt.Errorf("short write on output file. expected=%d, written=%d", len(content), n) @@ -160,7 +159,7 @@ func loadMappings(reader io.Reader) (m mappings, err error) { break } if err != nil { - return m, errors.Wrapf(err, "failed reading line %d", lineNum) + return m, fmt.Errorf("failed reading line %d: %w", lineNum, err) } if len(record) < 3 { return m, fmt.Errorf("line %d has unexpected number of columns: %d", lineNum, len(record)) diff --git a/x-pack/filebeat/module/fortinet/firewall/ingest/pipeline.yml b/x-pack/filebeat/module/fortinet/firewall/ingest/pipeline.yml index 92037a746b1..244b5e1726e 100644 --- a/x-pack/filebeat/module/fortinet/firewall/ingest/pipeline.yml +++ b/x-pack/filebeat/module/fortinet/firewall/ingest/pipeline.yml @@ -7,6 +7,11 @@ processors: field: message patterns: - '%{SYSLOG5424PRI}%{GREEDYDATA:syslog5424_sd}$' +- gsub: + field: syslog5424_sd + pattern: "\u0000" + replacement: "" + if: ctx.syslog5424_sd != null - script: lang: painless if: ctx.syslog5424_sd != null diff --git a/x-pack/filebeat/module/fortinet/firewall/test/event-nul.log b/x-pack/filebeat/module/fortinet/firewall/test/event-nul.log new file mode 100644 index 00000000000..4d7d8613adc Binary files /dev/null and b/x-pack/filebeat/module/fortinet/firewall/test/event-nul.log differ diff --git a/x-pack/filebeat/module/fortinet/firewall/test/event-nul.log-expected.json b/x-pack/filebeat/module/fortinet/firewall/test/event-nul.log-expected.json new file mode 100644 index 00000000000..89c0254c914 --- /dev/null +++ b/x-pack/filebeat/module/fortinet/firewall/test/event-nul.log-expected.json @@ -0,0 +1,167 @@ +[ + { + "@timestamp": "2020-04-23T12:32:48.000-05:00", + "event.action": "FSSO-logon", + "event.category": [ + "authentication" + ], + "event.code": "0102043014", + "event.dataset": "fortinet.firewall", + "event.kind": "event", + "event.module": "fortinet", + "event.outcome": "success", + "event.start": "2020-04-18T12:32:48.439-05:00", + "event.timezone": "-0500", + "event.type": [ + "start", + "user" + ], + "fileset.name": "firewall", + "fortinet.firewall.action": "FSSO-logon", + "fortinet.firewall.server": "elasticserver", + "fortinet.firewall.subtype": "user", + "fortinet.firewall.type": "event", + "fortinet.firewall.vd": "root", + "input.type": "log", + "log.level": "notice", + "log.offset": 0, + "message": "log contains NUL terminators", + "network.type": "ipv4", + "observer.name": "testswitch3", + "observer.product": "Fortigate", + "observer.serial_number": "someotherrouteridagain", + "observer.type": "firewall", + "observer.vendor": "Fortinet", + "related.ip": [ + "10.10.10.10" + ], + "related.user": [ + "elasticouser" + ], + "rule.description": "FSSO logon authentication status", + "service.type": "fortinet", + "source.ip": "10.10.10.10", + "source.user.name": "elasticouser", + "tags": [ + "fortinet-firewall", + "forwarded" + ] + }, + { + "@timestamp": "2020-04-23T12:32:47.000-05:00", + "destination.ip": "8.8.4.4", + "destination.port": 500, + "event.action": "negotiate", + "event.category": [ + "network" + ], + "event.code": "0101037124", + "event.dataset": "fortinet.firewall", + "event.kind": "event", + "event.module": "fortinet", + "event.outcome": "failure", + "event.reason": "peer SA proposal not match local policy", + "event.start": "2020-04-18T12:32:48.339-05:00", + "event.timezone": "-0500", + "event.type": [ + "connection" + ], + "fileset.name": "firewall", + "fortinet.firewall.action": "negotiate", + "fortinet.firewall.cookies": "345hkjhdrs87/0000000000000000", + "fortinet.firewall.outintf": "wan2", + "fortinet.firewall.peer_notif": "NOT-APPLICABLE", + "fortinet.firewall.status": "negotiate_error", + "fortinet.firewall.subtype": "vpn", + "fortinet.firewall.type": "event", + "fortinet.firewall.vd": "root", + "input.type": "log", + "log.level": "error", + "log.offset": 361, + "message": "IPsec phase 1 error", + "network.type": "ipv4", + "observer.name": "testswitch3", + "observer.product": "Fortigate", + "observer.serial_number": "someotherrouteridagain", + "observer.type": "firewall", + "observer.vendor": "Fortinet", + "related.ip": [ + "175.16.199.1", + "8.8.4.4" + ], + "rule.description": "IPsec phase 1 error", + "service.type": "fortinet", + "source.geo.city_name": "Changchun", + "source.geo.continent_name": "Asia", + "source.geo.country_iso_code": "CN", + "source.geo.country_name": "China", + "source.geo.location.lat": 43.88, + "source.geo.location.lon": 125.3228, + "source.geo.region_iso_code": "CN-22", + "source.geo.region_name": "Jilin Sheng", + "source.ip": "175.16.199.1", + "source.port": 500, + "tags": [ + "fortinet-firewall", + "forwarded" + ] + }, + { + "@timestamp": "2020-04-23T12:32:31.000-05:00", + "destination.ip": "8.4.5.4", + "destination.port": 500, + "event.action": "negotiate", + "event.category": [ + "network" + ], + "event.code": "0101037127", + "event.dataset": "fortinet.firewall", + "event.kind": "event", + "event.module": "fortinet", + "event.outcome": "success", + "event.start": "2020-04-18T12:32:31.628-05:00", + "event.timezone": "-0500", + "event.type": [ + "connection" + ], + "fileset.name": "firewall", + "fortinet.firewall.action": "negotiate", + "fortinet.firewall.cookies": "df868dsg876d/0000000000000000", + "fortinet.firewall.init": "local", + "fortinet.firewall.mode": "main", + "fortinet.firewall.outintf": "wan1", + "fortinet.firewall.result": "OK", + "fortinet.firewall.role": "initiator", + "fortinet.firewall.stage": "1", + "fortinet.firewall.status": "success", + "fortinet.firewall.subtype": "vpn", + "fortinet.firewall.type": "event", + "fortinet.firewall.vd": "root", + "fortinet.firewall.vpntunnel": "elasticvpn", + "input.type": "log", + "log.level": "notice", + "log.offset": 935, + "message": "progress IPsec phase 1", + "network.direction": "outbound", + "network.type": "ipv4", + "observer.name": "testswitch3", + "observer.product": "Fortigate", + "observer.serial_number": "someotherrouteridagain", + "observer.type": "firewall", + "observer.vendor": "Fortinet", + "related.ip": [ + "1.128.3.4", + "8.4.5.4" + ], + "rule.description": "Progress IPsec phase 1", + "service.type": "fortinet", + "source.as.number": 1221, + "source.as.organization.name": "Telstra Pty Ltd", + "source.ip": "1.128.3.4", + "source.port": 500, + "tags": [ + "fortinet-firewall", + "forwarded" + ] + } +] \ No newline at end of file diff --git a/x-pack/filebeat/modules.d/activemq.yml.disabled b/x-pack/filebeat/modules.d/activemq.yml.disabled index 82c70b16947..e19824686ae 100644 --- a/x-pack/filebeat/modules.d/activemq.yml.disabled +++ b/x-pack/filebeat/modules.d/activemq.yml.disabled @@ -1,5 +1,5 @@ # Module: activemq -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-activemq.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-activemq.html - module: activemq # Audit logs diff --git a/x-pack/filebeat/modules.d/aws.yml.disabled b/x-pack/filebeat/modules.d/aws.yml.disabled index 6a49839c116..c730b8aea07 100644 --- a/x-pack/filebeat/modules.d/aws.yml.disabled +++ b/x-pack/filebeat/modules.d/aws.yml.disabled @@ -1,5 +1,5 @@ # Module: aws -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-aws.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-aws.html - module: aws cloudtrail: diff --git a/x-pack/filebeat/modules.d/awsfargate.yml.disabled b/x-pack/filebeat/modules.d/awsfargate.yml.disabled index 57a5e419135..c2e96fd2f93 100644 --- a/x-pack/filebeat/modules.d/awsfargate.yml.disabled +++ b/x-pack/filebeat/modules.d/awsfargate.yml.disabled @@ -1,5 +1,5 @@ # Module: awsfargate -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-awsfargate.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-awsfargate.html - module: awsfargate log: diff --git a/x-pack/filebeat/modules.d/azure.yml.disabled b/x-pack/filebeat/modules.d/azure.yml.disabled index 04fe209e3f7..97eb4b9e461 100644 --- a/x-pack/filebeat/modules.d/azure.yml.disabled +++ b/x-pack/filebeat/modules.d/azure.yml.disabled @@ -1,5 +1,5 @@ # Module: azure -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-azure.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-azure.html - module: azure # All logs diff --git a/x-pack/filebeat/modules.d/barracuda.yml.disabled b/x-pack/filebeat/modules.d/barracuda.yml.disabled index 6327b8d6a75..3926a2fec96 100644 --- a/x-pack/filebeat/modules.d/barracuda.yml.disabled +++ b/x-pack/filebeat/modules.d/barracuda.yml.disabled @@ -1,5 +1,5 @@ # Module: barracuda -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-barracuda.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-barracuda.html - module: barracuda waf: diff --git a/x-pack/filebeat/modules.d/bluecoat.yml.disabled b/x-pack/filebeat/modules.d/bluecoat.yml.disabled index 98a4cef099b..28badfd0def 100644 --- a/x-pack/filebeat/modules.d/bluecoat.yml.disabled +++ b/x-pack/filebeat/modules.d/bluecoat.yml.disabled @@ -1,5 +1,5 @@ # Module: bluecoat -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-bluecoat.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-bluecoat.html - module: bluecoat director: diff --git a/x-pack/filebeat/modules.d/cef.yml.disabled b/x-pack/filebeat/modules.d/cef.yml.disabled index cda083f4a5e..1834c8f4dba 100644 --- a/x-pack/filebeat/modules.d/cef.yml.disabled +++ b/x-pack/filebeat/modules.d/cef.yml.disabled @@ -1,5 +1,5 @@ # Module: cef -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-cef.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-cef.html - module: cef log: diff --git a/x-pack/filebeat/modules.d/checkpoint.yml.disabled b/x-pack/filebeat/modules.d/checkpoint.yml.disabled index 62d30a992b7..595beccdbff 100644 --- a/x-pack/filebeat/modules.d/checkpoint.yml.disabled +++ b/x-pack/filebeat/modules.d/checkpoint.yml.disabled @@ -1,5 +1,5 @@ # Module: checkpoint -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-checkpoint.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-checkpoint.html - module: checkpoint firewall: diff --git a/x-pack/filebeat/modules.d/cisco.yml.disabled b/x-pack/filebeat/modules.d/cisco.yml.disabled index 6bc846f93f6..b2aca39798e 100644 --- a/x-pack/filebeat/modules.d/cisco.yml.disabled +++ b/x-pack/filebeat/modules.d/cisco.yml.disabled @@ -1,5 +1,5 @@ # Module: cisco -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-cisco.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-cisco.html - module: cisco asa: diff --git a/x-pack/filebeat/modules.d/coredns.yml.disabled b/x-pack/filebeat/modules.d/coredns.yml.disabled index fb7e9995130..bfcc3bba412 100644 --- a/x-pack/filebeat/modules.d/coredns.yml.disabled +++ b/x-pack/filebeat/modules.d/coredns.yml.disabled @@ -1,5 +1,5 @@ # Module: coredns -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-coredns.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-coredns.html - module: coredns # Fileset for native deployment diff --git a/x-pack/filebeat/modules.d/crowdstrike.yml.disabled b/x-pack/filebeat/modules.d/crowdstrike.yml.disabled index aea362f2e40..8f30c4ed899 100644 --- a/x-pack/filebeat/modules.d/crowdstrike.yml.disabled +++ b/x-pack/filebeat/modules.d/crowdstrike.yml.disabled @@ -1,5 +1,5 @@ # Module: crowdstrike -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-crowdstrike.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-crowdstrike.html - module: crowdstrike diff --git a/x-pack/filebeat/modules.d/cyberarkpas.yml.disabled b/x-pack/filebeat/modules.d/cyberarkpas.yml.disabled index f2168e9d453..8b4ddf9b814 100644 --- a/x-pack/filebeat/modules.d/cyberarkpas.yml.disabled +++ b/x-pack/filebeat/modules.d/cyberarkpas.yml.disabled @@ -1,5 +1,5 @@ # Module: cyberarkpas -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-cyberarkpas.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-cyberarkpas.html - module: cyberarkpas audit: diff --git a/x-pack/filebeat/modules.d/cylance.yml.disabled b/x-pack/filebeat/modules.d/cylance.yml.disabled index 164642f0738..48cbb166e82 100644 --- a/x-pack/filebeat/modules.d/cylance.yml.disabled +++ b/x-pack/filebeat/modules.d/cylance.yml.disabled @@ -1,5 +1,5 @@ # Module: cylance -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-cylance.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-cylance.html - module: cylance protect: diff --git a/x-pack/filebeat/modules.d/envoyproxy.yml.disabled b/x-pack/filebeat/modules.d/envoyproxy.yml.disabled index d95316b3c30..b06026cc061 100644 --- a/x-pack/filebeat/modules.d/envoyproxy.yml.disabled +++ b/x-pack/filebeat/modules.d/envoyproxy.yml.disabled @@ -1,5 +1,5 @@ # Module: envoyproxy -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-envoyproxy.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-envoyproxy.html - module: envoyproxy # Fileset for native deployment diff --git a/x-pack/filebeat/modules.d/f5.yml.disabled b/x-pack/filebeat/modules.d/f5.yml.disabled index 4db5209693d..cb9399134fe 100644 --- a/x-pack/filebeat/modules.d/f5.yml.disabled +++ b/x-pack/filebeat/modules.d/f5.yml.disabled @@ -1,5 +1,5 @@ # Module: f5 -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-f5.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-f5.html - module: f5 bigipapm: diff --git a/x-pack/filebeat/modules.d/fortinet.yml.disabled b/x-pack/filebeat/modules.d/fortinet.yml.disabled index e31eb967d73..a07a18bca93 100644 --- a/x-pack/filebeat/modules.d/fortinet.yml.disabled +++ b/x-pack/filebeat/modules.d/fortinet.yml.disabled @@ -1,5 +1,5 @@ # Module: fortinet -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-fortinet.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-fortinet.html - module: fortinet firewall: diff --git a/x-pack/filebeat/modules.d/gcp.yml.disabled b/x-pack/filebeat/modules.d/gcp.yml.disabled index b0b5f636b10..601be53f69b 100644 --- a/x-pack/filebeat/modules.d/gcp.yml.disabled +++ b/x-pack/filebeat/modules.d/gcp.yml.disabled @@ -1,5 +1,5 @@ # Module: gcp -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-gcp.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-gcp.html - module: gcp vpcflow: diff --git a/x-pack/filebeat/modules.d/google_workspace.yml.disabled b/x-pack/filebeat/modules.d/google_workspace.yml.disabled index 85142dfcaf0..a079e429f84 100644 --- a/x-pack/filebeat/modules.d/google_workspace.yml.disabled +++ b/x-pack/filebeat/modules.d/google_workspace.yml.disabled @@ -1,5 +1,5 @@ # Module: google_workspace -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-google_workspace.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-google_workspace.html - module: google_workspace saml: diff --git a/x-pack/filebeat/modules.d/ibmmq.yml.disabled b/x-pack/filebeat/modules.d/ibmmq.yml.disabled index 4ad3209a90e..fd19cafb3c9 100644 --- a/x-pack/filebeat/modules.d/ibmmq.yml.disabled +++ b/x-pack/filebeat/modules.d/ibmmq.yml.disabled @@ -1,5 +1,5 @@ # Module: ibmmq -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-ibmmq.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-ibmmq.html - module: ibmmq # All logs diff --git a/x-pack/filebeat/modules.d/imperva.yml.disabled b/x-pack/filebeat/modules.d/imperva.yml.disabled index cd864075960..e6616398303 100644 --- a/x-pack/filebeat/modules.d/imperva.yml.disabled +++ b/x-pack/filebeat/modules.d/imperva.yml.disabled @@ -1,5 +1,5 @@ # Module: imperva -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-imperva.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-imperva.html - module: imperva securesphere: diff --git a/x-pack/filebeat/modules.d/infoblox.yml.disabled b/x-pack/filebeat/modules.d/infoblox.yml.disabled index 24d524d259d..910a896d12a 100644 --- a/x-pack/filebeat/modules.d/infoblox.yml.disabled +++ b/x-pack/filebeat/modules.d/infoblox.yml.disabled @@ -1,5 +1,5 @@ # Module: infoblox -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-infoblox.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-infoblox.html - module: infoblox nios: diff --git a/x-pack/filebeat/modules.d/iptables.yml.disabled b/x-pack/filebeat/modules.d/iptables.yml.disabled index 2d51c67f24e..a4c73b7a04a 100644 --- a/x-pack/filebeat/modules.d/iptables.yml.disabled +++ b/x-pack/filebeat/modules.d/iptables.yml.disabled @@ -1,5 +1,5 @@ # Module: iptables -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-iptables.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-iptables.html - module: iptables log: diff --git a/x-pack/filebeat/modules.d/juniper.yml.disabled b/x-pack/filebeat/modules.d/juniper.yml.disabled index 583f47bb7f7..5fb85afc302 100644 --- a/x-pack/filebeat/modules.d/juniper.yml.disabled +++ b/x-pack/filebeat/modules.d/juniper.yml.disabled @@ -1,5 +1,5 @@ # Module: juniper -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-juniper.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-juniper.html - module: juniper junos: diff --git a/x-pack/filebeat/modules.d/microsoft.yml.disabled b/x-pack/filebeat/modules.d/microsoft.yml.disabled index e4af73ad6ed..4c5528f5b76 100644 --- a/x-pack/filebeat/modules.d/microsoft.yml.disabled +++ b/x-pack/filebeat/modules.d/microsoft.yml.disabled @@ -1,5 +1,5 @@ # Module: microsoft -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-microsoft.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-microsoft.html - module: microsoft # ATP configuration diff --git a/x-pack/filebeat/modules.d/misp.yml.disabled b/x-pack/filebeat/modules.d/misp.yml.disabled index 4e405aaac70..28ca6608367 100644 --- a/x-pack/filebeat/modules.d/misp.yml.disabled +++ b/x-pack/filebeat/modules.d/misp.yml.disabled @@ -1,5 +1,5 @@ # Module: misp -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-misp.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-misp.html # Deprecated in 7.14.0: Recommended to migrate to the Threat Intel module. diff --git a/x-pack/filebeat/modules.d/mssql.yml.disabled b/x-pack/filebeat/modules.d/mssql.yml.disabled index c8473c91dd5..ee3f225a941 100644 --- a/x-pack/filebeat/modules.d/mssql.yml.disabled +++ b/x-pack/filebeat/modules.d/mssql.yml.disabled @@ -1,5 +1,5 @@ # Module: mssql -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-mssql.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-mssql.html - module: mssql # Fileset for native deployment diff --git a/x-pack/filebeat/modules.d/mysqlenterprise.yml.disabled b/x-pack/filebeat/modules.d/mysqlenterprise.yml.disabled index 33c1731cd19..50e8860671f 100644 --- a/x-pack/filebeat/modules.d/mysqlenterprise.yml.disabled +++ b/x-pack/filebeat/modules.d/mysqlenterprise.yml.disabled @@ -1,5 +1,5 @@ # Module: mysqlenterprise -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-mysqlenterprise.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-mysqlenterprise.html - module: mysqlenterprise audit: diff --git a/x-pack/filebeat/modules.d/netflow.yml.disabled b/x-pack/filebeat/modules.d/netflow.yml.disabled index 7f365e90b43..b2584b16890 100644 --- a/x-pack/filebeat/modules.d/netflow.yml.disabled +++ b/x-pack/filebeat/modules.d/netflow.yml.disabled @@ -1,5 +1,5 @@ # Module: netflow -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-netflow.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-netflow.html - module: netflow log: diff --git a/x-pack/filebeat/modules.d/netscout.yml.disabled b/x-pack/filebeat/modules.d/netscout.yml.disabled index c6d5520629b..6a0e4c0dce6 100644 --- a/x-pack/filebeat/modules.d/netscout.yml.disabled +++ b/x-pack/filebeat/modules.d/netscout.yml.disabled @@ -1,5 +1,5 @@ # Module: netscout -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-netscout.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-netscout.html - module: netscout sightline: diff --git a/x-pack/filebeat/modules.d/o365.yml.disabled b/x-pack/filebeat/modules.d/o365.yml.disabled index ab61528d6f9..99724949b39 100644 --- a/x-pack/filebeat/modules.d/o365.yml.disabled +++ b/x-pack/filebeat/modules.d/o365.yml.disabled @@ -1,5 +1,5 @@ # Module: o365 -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-o365.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-o365.html - module: o365 audit: diff --git a/x-pack/filebeat/modules.d/okta.yml.disabled b/x-pack/filebeat/modules.d/okta.yml.disabled index 062856ce4e4..13706b240d2 100644 --- a/x-pack/filebeat/modules.d/okta.yml.disabled +++ b/x-pack/filebeat/modules.d/okta.yml.disabled @@ -1,5 +1,5 @@ # Module: okta -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-okta.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-okta.html - module: okta system: diff --git a/x-pack/filebeat/modules.d/oracle.yml.disabled b/x-pack/filebeat/modules.d/oracle.yml.disabled index aa24b1f6755..c74c5f889f8 100644 --- a/x-pack/filebeat/modules.d/oracle.yml.disabled +++ b/x-pack/filebeat/modules.d/oracle.yml.disabled @@ -1,5 +1,5 @@ # Module: oracle -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-oracle.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-oracle.html - module: oracle database_audit: diff --git a/x-pack/filebeat/modules.d/panw.yml.disabled b/x-pack/filebeat/modules.d/panw.yml.disabled index 1a630f8fb4e..93b9a683603 100644 --- a/x-pack/filebeat/modules.d/panw.yml.disabled +++ b/x-pack/filebeat/modules.d/panw.yml.disabled @@ -1,5 +1,5 @@ # Module: panw -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-panw.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-panw.html - module: panw panos: diff --git a/x-pack/filebeat/modules.d/proofpoint.yml.disabled b/x-pack/filebeat/modules.d/proofpoint.yml.disabled index 34b31277086..2c5dfec92e5 100644 --- a/x-pack/filebeat/modules.d/proofpoint.yml.disabled +++ b/x-pack/filebeat/modules.d/proofpoint.yml.disabled @@ -1,5 +1,5 @@ # Module: proofpoint -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-proofpoint.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-proofpoint.html - module: proofpoint emailsecurity: diff --git a/x-pack/filebeat/modules.d/rabbitmq.yml.disabled b/x-pack/filebeat/modules.d/rabbitmq.yml.disabled index 437cf9a5721..2b2171f86d0 100644 --- a/x-pack/filebeat/modules.d/rabbitmq.yml.disabled +++ b/x-pack/filebeat/modules.d/rabbitmq.yml.disabled @@ -1,5 +1,5 @@ # Module: rabbitmq -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-rabbitmq.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-rabbitmq.html - module: rabbitmq # All logs diff --git a/x-pack/filebeat/modules.d/radware.yml.disabled b/x-pack/filebeat/modules.d/radware.yml.disabled index 553d8459127..fe39a7b805e 100644 --- a/x-pack/filebeat/modules.d/radware.yml.disabled +++ b/x-pack/filebeat/modules.d/radware.yml.disabled @@ -1,5 +1,5 @@ # Module: radware -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-radware.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-radware.html - module: radware defensepro: diff --git a/x-pack/filebeat/modules.d/salesforce.yml.disabled b/x-pack/filebeat/modules.d/salesforce.yml.disabled index 93d04365a86..8535b30f006 100644 --- a/x-pack/filebeat/modules.d/salesforce.yml.disabled +++ b/x-pack/filebeat/modules.d/salesforce.yml.disabled @@ -1,5 +1,5 @@ # Module: salesforce -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-salesforce.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-salesforce.html - module: salesforce diff --git a/x-pack/filebeat/modules.d/snort.yml.disabled b/x-pack/filebeat/modules.d/snort.yml.disabled index 89d25c4b556..d8befbb7d7c 100644 --- a/x-pack/filebeat/modules.d/snort.yml.disabled +++ b/x-pack/filebeat/modules.d/snort.yml.disabled @@ -1,5 +1,5 @@ # Module: snort -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-snort.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-snort.html - module: snort log: diff --git a/x-pack/filebeat/modules.d/snyk.yml.disabled b/x-pack/filebeat/modules.d/snyk.yml.disabled index f92cf1d71f0..ab6b379f389 100644 --- a/x-pack/filebeat/modules.d/snyk.yml.disabled +++ b/x-pack/filebeat/modules.d/snyk.yml.disabled @@ -1,5 +1,5 @@ # Module: snyk -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-snyk.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-snyk.html - module: snyk audit: diff --git a/x-pack/filebeat/modules.d/sonicwall.yml.disabled b/x-pack/filebeat/modules.d/sonicwall.yml.disabled index f267d355b37..cf0706bdd81 100644 --- a/x-pack/filebeat/modules.d/sonicwall.yml.disabled +++ b/x-pack/filebeat/modules.d/sonicwall.yml.disabled @@ -1,5 +1,5 @@ # Module: sonicwall -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-sonicwall.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-sonicwall.html - module: sonicwall firewall: diff --git a/x-pack/filebeat/modules.d/sophos.yml.disabled b/x-pack/filebeat/modules.d/sophos.yml.disabled index e875354ad62..42aa513de7e 100644 --- a/x-pack/filebeat/modules.d/sophos.yml.disabled +++ b/x-pack/filebeat/modules.d/sophos.yml.disabled @@ -1,5 +1,5 @@ # Module: sophos -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-sophos.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-sophos.html - module: sophos xg: diff --git a/x-pack/filebeat/modules.d/squid.yml.disabled b/x-pack/filebeat/modules.d/squid.yml.disabled index 81d5f6e0af0..bc34fdcb5a6 100644 --- a/x-pack/filebeat/modules.d/squid.yml.disabled +++ b/x-pack/filebeat/modules.d/squid.yml.disabled @@ -1,5 +1,5 @@ # Module: squid -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-squid.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-squid.html - module: squid log: diff --git a/x-pack/filebeat/modules.d/suricata.yml.disabled b/x-pack/filebeat/modules.d/suricata.yml.disabled index 98e905fff23..14b1855a058 100644 --- a/x-pack/filebeat/modules.d/suricata.yml.disabled +++ b/x-pack/filebeat/modules.d/suricata.yml.disabled @@ -1,5 +1,5 @@ # Module: suricata -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-suricata.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-suricata.html - module: suricata # All logs diff --git a/x-pack/filebeat/modules.d/threatintel.yml.disabled b/x-pack/filebeat/modules.d/threatintel.yml.disabled index 717de295f33..d5a0365f40c 100644 --- a/x-pack/filebeat/modules.d/threatintel.yml.disabled +++ b/x-pack/filebeat/modules.d/threatintel.yml.disabled @@ -1,5 +1,5 @@ # Module: threatintel -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-threatintel.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-threatintel.html - module: threatintel abuseurl: diff --git a/x-pack/filebeat/modules.d/tomcat.yml.disabled b/x-pack/filebeat/modules.d/tomcat.yml.disabled index dc7a8d7eadd..1fda24706e3 100644 --- a/x-pack/filebeat/modules.d/tomcat.yml.disabled +++ b/x-pack/filebeat/modules.d/tomcat.yml.disabled @@ -1,5 +1,5 @@ # Module: tomcat -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-tomcat.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-tomcat.html - module: tomcat log: diff --git a/x-pack/filebeat/modules.d/zeek.yml.disabled b/x-pack/filebeat/modules.d/zeek.yml.disabled index 2ceeeea911d..4017a6e3997 100644 --- a/x-pack/filebeat/modules.d/zeek.yml.disabled +++ b/x-pack/filebeat/modules.d/zeek.yml.disabled @@ -1,5 +1,5 @@ # Module: zeek -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-zeek.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-zeek.html - module: zeek capture_loss: diff --git a/x-pack/filebeat/modules.d/zookeeper.yml.disabled b/x-pack/filebeat/modules.d/zookeeper.yml.disabled index f632c0de9e7..a2cb2977935 100644 --- a/x-pack/filebeat/modules.d/zookeeper.yml.disabled +++ b/x-pack/filebeat/modules.d/zookeeper.yml.disabled @@ -1,5 +1,5 @@ # Module: zookeeper -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-zookeeper.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-zookeeper.html - module: zookeeper # All logs diff --git a/x-pack/filebeat/modules.d/zoom.yml.disabled b/x-pack/filebeat/modules.d/zoom.yml.disabled index b7a5bc35a00..8fb6dffcaff 100644 --- a/x-pack/filebeat/modules.d/zoom.yml.disabled +++ b/x-pack/filebeat/modules.d/zoom.yml.disabled @@ -1,5 +1,5 @@ # Module: zoom -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-zoom.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-zoom.html - module: zoom webhook: diff --git a/x-pack/filebeat/modules.d/zscaler.yml.disabled b/x-pack/filebeat/modules.d/zscaler.yml.disabled index 732a033073b..8ca0cea079c 100644 --- a/x-pack/filebeat/modules.d/zscaler.yml.disabled +++ b/x-pack/filebeat/modules.d/zscaler.yml.disabled @@ -1,5 +1,5 @@ # Module: zscaler -# Docs: https://www.elastic.co/guide/en/beats/filebeat/master/filebeat-module-zscaler.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/main/filebeat-module-zscaler.html - module: zscaler zia: diff --git a/x-pack/filebeat/processors/decode_cef/cef/cef_test.go b/x-pack/filebeat/processors/decode_cef/cef/cef_test.go index 297642b559f..cc4a4ed786c 100644 --- a/x-pack/filebeat/processors/decode_cef/cef/cef_test.go +++ b/x-pack/filebeat/processors/decode_cef/cef/cef_test.go @@ -20,7 +20,7 @@ import ( var generateCorpus = flag.Bool("corpus", false, "generate fuzz corpus from test cases") const ( - standardMessage = `CEF:26|security|threatmanager|1.0|100|trojan successfully stopped|10|src=10.0.0.192 dst=12.121.122.82 spt=1232 eventId=1` + standardMessage = `CEF:26|security|threatmanager|1.0|100|trojan successfully stopped|10|src=10.0.0.192 dst=12.121.122.82 spt=1232 eventId=1 in=4294967296 out=4294967296` headerOnly = `CEF:26|security|threatmanager|1.0|100|trojan successfully stopped|10|` @@ -124,6 +124,8 @@ func TestEventUnpack(t *testing.T) { "dst": IPField("12.121.122.82"), "spt": IntegerField(1232), "eventId": LongField(1), + "in": LongField(4294967296), + "out": LongField(4294967296), }, e.Extensions) }) @@ -449,6 +451,8 @@ func TestEventUnpackWithFullExtensionNames(t *testing.T) { "destinationAddress": IPField("12.121.122.82"), "sourcePort": IntegerField(1232), "eventId": LongField(1), + "bytesIn": LongField(4294967296), + "bytesOut": LongField(4294967296), }, e.Extensions) } diff --git a/x-pack/filebeat/processors/decode_cef/cef/keys.go b/x-pack/filebeat/processors/decode_cef/cef/keys.go index 7ea0f00d446..43854a304c0 100644 --- a/x-pack/filebeat/processors/decode_cef/cef/keys.go +++ b/x-pack/filebeat/processors/decode_cef/cef/keys.go @@ -90,11 +90,11 @@ var extensionMapping = map[string]mappedField{ }, "in": { Target: "bytesIn", - Type: IntegerType, + Type: LongType, }, "out": { Target: "bytesOut", - Type: IntegerType, + Type: LongType, }, "customerExternalID": { Target: "customerExternalID", diff --git a/x-pack/filebeat/tests/integration/framework_test.go b/x-pack/filebeat/tests/integration/framework_test.go deleted file mode 100644 index 1d572c6cc00..00000000000 --- a/x-pack/filebeat/tests/integration/framework_test.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//go:build integration - -package integration - -import ( - "bufio" - "fmt" - "io" - "os" - "os/exec" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -type BeatProc struct { - Binary string - Args []string - Cmd *exec.Cmd - t *testing.T - tempDir string -} - -// NewBeat createa a new Beat process from the system tests binary. -// It sets some required options like the home path, logging, etc. -// `tempDir` will be used as home and logs directory for the Beat -// `args` will be passed as CLI arguments to the Beat -func NewBeat(t *testing.T, binary string, tempDir string, args ...string) BeatProc { - p := BeatProc{ - t: t, - Binary: binary, - Args: append([]string{ - "--systemTest", - "--path.home", tempDir, - "--path.logs", tempDir, - "-E", "logging.to_files=true", - "-E", "logging.files.rotateeverybytes=104857600", // About 100MB - }, args...), - tempDir: tempDir, - } - return p -} - -// Start starts the Beat process -func (b *BeatProc) Start() { - t := b.t - fullPath, err := filepath.Abs(b.Binary) - if err != nil { - t.Fatalf("could not get full path from %q, err: %s", b.Binary, err) - } - b.Cmd = exec.Command(fullPath, b.Args...) - - if err := b.Cmd.Start(); err != nil { - t.Fatalf("could not start process: %s", err) - } - t.Cleanup(func() { - pid := b.Cmd.Process.Pid - if err := b.Cmd.Process.Kill(); err != nil { - t.Fatalf("could not stop process with PID: %d, err: %s", pid, err) - } - }) -} - -// LogContains looks for `s` as a substring of every log line, -// it will open the log file on every call, read it until EOF, -// then close it. -func (b *BeatProc) LogContains(s string) bool { - t := b.t - logFile := b.openLogFile() - defer func() { - if err := logFile.Close(); err != nil { - // That's not quite a test error, but it can impact - // next executions of LogContains, so treat it as an error - t.Errorf("could not close log file: %s", err) - } - }() - - r := bufio.NewReader(logFile) - for { - line, err := r.ReadString('\n') - if err != nil { - if err != io.EOF { - t.Fatalf("error reading log file '%s': %s", logFile.Name(), err) - } - break - } - if strings.Contains(line, s) { - return true - } - } - - return false -} - -// openLogFile opens the log file for reading and returns it. -// It also registers a cleanup function to close the file -// when the test ends. -func (b *BeatProc) openLogFile() *os.File { - t := b.t - glob := fmt.Sprintf("%s-*.ndjson", filepath.Join(b.tempDir, "filebeat")) - files, err := filepath.Glob(glob) - if err != nil { - t.Fatalf("could not expand log file glob: %s", err) - } - - require.Eventually(t, func() bool { - files, err = filepath.Glob(glob) - if err != nil { - t.Fatalf("could not expand log file glob: %s", err) - } - return len(files) == 1 - }, 5*time.Second, 100*time.Millisecond, - "waiting for log file matching glob '%s' to be created", glob) - - // On a normal operation there must be a single log, if there are more - // than one, then there is an issue and the Beat is logging too much, - // which is enough to stop the test - if len(files) != 1 { - t.Fatalf("there must be only one log file for %s, found: %d", - glob, len(files)) - } - - f, err := os.Open(files[0]) - if err != nil { - t.Fatalf("could not open log file '%s': %s", files[0], err) - } - - return f -} - -// createTempDir creates a temporary directory that will be -// removed after the tests passes. -// -// If the test fails, the temporary directory is not removed. -// -// If the tests are run with -v, the temporary directory will -// be logged. -func createTempDir(t *testing.T) string { - tempDir, err := filepath.Abs(filepath.Join("../../build/integration-tests/", - fmt.Sprintf("%s-%d", t.Name(), time.Now().Unix()))) - if err != nil { - t.Fatal(err) - } - - if err := os.MkdirAll(tempDir, 0766); err != nil { - t.Fatalf("cannot create tmp dir: %s, msg: %s", err, err.Error()) - } - t.Logf("Temporary directory: %s", tempDir) - - cleanup := func() { - if !t.Failed() { - if err := os.RemoveAll(tempDir); err != nil { - t.Errorf("could not remove temp dir '%s': %s", tempDir, err) - } - t.Logf("Temporary directory '%s' removed", tempDir) - } - } - t.Cleanup(cleanup) - - return tempDir -} diff --git a/x-pack/filebeat/tests/integration/managerV2_test.go b/x-pack/filebeat/tests/integration/managerV2_test.go index d0d0eea6e30..5e3111a0e09 100644 --- a/x-pack/filebeat/tests/integration/managerV2_test.go +++ b/x-pack/filebeat/tests/integration/managerV2_test.go @@ -7,17 +7,15 @@ package integration import ( - "context" "fmt" - "net/http" "os" "path/filepath" "testing" "time" "github.com/stretchr/testify/require" - "google.golang.org/protobuf/types/known/structpb" + "github.com/elastic/beats/v7/libbeat/tests/integration" "github.com/elastic/beats/v7/x-pack/libbeat/management" "github.com/elastic/elastic-agent-client/v7/pkg/client/mock" "github.com/elastic/elastic-agent-client/v7/pkg/proto" @@ -41,14 +39,15 @@ func TestInputReloadUnderElasticAgent(t *testing.T) { // First things first, ensure ES is running and we can connect to it. // If ES is not running, the test will timeout and the only way to know // what caused it is going through Filebeat's logs. - ensureESIsRunning(t) + integration.EnsureESIsRunning(t) - // We create our own temp dir so the files can be persisted - // in case the test fails. This will help debugging issues - // locally and on CI. - tempDir := createTempDir(t) + filebeat := integration.NewBeat( + t, + "filebeat", + "../../filebeat.test", + ) - logFilePath := filepath.Join(tempDir, "flog.log") + logFilePath := filepath.Join(filebeat.TempDir(), "flog.log") generateLogFile(t, logFilePath) var units = [][]*proto.UnitExpected{ { @@ -62,14 +61,15 @@ func TestInputReloadUnderElasticAgent(t *testing.T) { Id: "default", Type: "elasticsearch", Name: "elasticsearch", - Source: requireNewStruct(t, + Source: integration.RequireNewStruct(t, map[string]interface{}{ - "type": "elasticsearch", - "hosts": []interface{}{"http://localhost:9200"}, - "username": "admin", - "password": "testing", - "protocol": "http", - "enabled": true, + "type": "elasticsearch", + "hosts": []interface{}{"http://localhost:9200"}, + "username": "admin", + "password": "testing", + "protocol": "http", + "enabled": true, + "allow_older_versions": true, }), }, }, @@ -86,7 +86,7 @@ func TestInputReloadUnderElasticAgent(t *testing.T) { Streams: []*proto.Stream{ { Id: "log-input-1", - Source: requireNewStruct(t, map[string]interface{}{ + Source: integration.RequireNewStruct(t, map[string]interface{}{ "enabled": true, "type": "log", "paths": []interface{}{logFilePath}, @@ -107,14 +107,15 @@ func TestInputReloadUnderElasticAgent(t *testing.T) { Id: "default", Type: "elasticsearch", Name: "elasticsearch", - Source: requireNewStruct(t, + Source: integration.RequireNewStruct(t, map[string]interface{}{ - "type": "elasticsearch", - "hosts": []interface{}{"http://localhost:9200"}, - "username": "admin", - "password": "testing", - "protocol": "http", - "enabled": true, + "type": "elasticsearch", + "hosts": []interface{}{"http://localhost:9200"}, + "username": "admin", + "password": "testing", + "protocol": "http", + "enabled": true, + "allow_older_versions": true, }), }, }, @@ -131,7 +132,7 @@ func TestInputReloadUnderElasticAgent(t *testing.T) { Streams: []*proto.Stream{ { Id: "log-input-2", - Source: requireNewStruct(t, map[string]interface{}{ + Source: integration.RequireNewStruct(t, map[string]interface{}{ "enabled": true, "type": "log", "paths": []interface{}{logFilePath}, @@ -198,16 +199,11 @@ func TestInputReloadUnderElasticAgent(t *testing.T) { require.NoError(t, server.Start()) t.Cleanup(server.Stop) - filebeat := NewBeat( - t, - "../../filebeat.test", - tempDir, + filebeat.Start( "-E", fmt.Sprintf(`management.insecure_grpc_url_for_testing="localhost:%d"`, server.Port), "-E", "management.enabled=true", ) - filebeat.Start() - // waitDeadlineOr5Mins looks at the test deadline // and returns a reasonable value of waiting for a // condition to be met. The possible values are: @@ -262,11 +258,14 @@ func TestFailedOutputReportsUnhealthy(t *testing.T) { // First things first, ensure ES is running and we can connect to it. // If ES is not running, the test will timeout and the only way to know // what caused it is going through Filebeat's logs. - ensureESIsRunning(t) + integration.EnsureESIsRunning(t) + filebeat := integration.NewBeat( + t, + "filebeat", + "../../filebeat.test", + ) - tempDir := createTempDir(t) finalStateReached := false - var units = []*proto.UnitExpected{ { Id: "output-unit-borken", @@ -278,7 +277,7 @@ func TestFailedOutputReportsUnhealthy(t *testing.T) { Id: "default", Type: "logstash", Name: "logstash", - Source: requireNewStruct(t, + Source: integration.RequireNewStruct(t, map[string]interface{}{ "type": "logstash", "invalid": "configuration", @@ -300,7 +299,7 @@ func TestFailedOutputReportsUnhealthy(t *testing.T) { Streams: []*proto.Stream{ { Id: "log-input", - Source: requireNewStruct(t, map[string]interface{}{ + Source: integration.RequireNewStruct(t, map[string]interface{}{ "enabled": true, "type": "log", "paths": "/tmp/foo", @@ -332,16 +331,11 @@ func TestFailedOutputReportsUnhealthy(t *testing.T) { require.NoError(t, server.Start()) - filebeat := NewBeat( - t, - "../../filebeat.test", - tempDir, + filebeat.Start( "-E", fmt.Sprintf(`management.insecure_grpc_url_for_testing="localhost:%d"`, server.Port), "-E", "management.enabled=true", ) - filebeat.Start() - require.Eventually(t, func() bool { return finalStateReached }, 30*time.Second, 100*time.Millisecond, "Output unit did not report unhealthy") @@ -349,12 +343,160 @@ func TestFailedOutputReportsUnhealthy(t *testing.T) { t.Cleanup(server.Stop) } -func requireNewStruct(t *testing.T, v map[string]interface{}) *structpb.Struct { - str, err := structpb.NewStruct(v) - if err != nil { - require.NoError(t, err) +func TestRecoverFromInvalidOutputConfiguration(t *testing.T) { + filebeat := integration.NewBeat( + t, + "filebeat", + "../../filebeat.test", + ) + + // Having the log file enables the inputs to start, while it is not + // strictly necessary for testing output issues, it allows for the + // input to start which creates a more realistic test case and + // can help uncover other issues in the startup/shutdown process. + logFilePath := filepath.Join(filebeat.TempDir(), "flog.log") + generateLogFile(t, logFilePath) + + logLevel := proto.UnitLogLevel_INFO + filestreamInputHealthy := proto.UnitExpected{ + Id: "input-unit-healthy", + Type: proto.UnitType_INPUT, + ConfigStateIdx: 1, + State: proto.State_HEALTHY, + LogLevel: logLevel, + Config: &proto.UnitExpectedConfig{ + Id: "filestream-input", + Type: "filestream", + Name: "filestream-input-healty", + Streams: []*proto.Stream{ + { + Id: "filestream-input-id", + Source: integration.RequireNewStruct(t, map[string]interface{}{ + "id": "filestream-stream-input-id", + "enabled": true, + "type": "filestream", + "paths": logFilePath, + }), + }, + }, + }, + } + + filestreamInputStarting := proto.UnitExpected{ + Id: "input-unit-2", + Type: proto.UnitType_INPUT, + ConfigStateIdx: 1, + State: proto.State_STARTING, + LogLevel: logLevel, + Config: &proto.UnitExpectedConfig{ + Id: "filestream-input", + Type: "filestream", + Name: "filestream-input-starting", + Streams: []*proto.Stream{ + { + Id: "filestream-input-id", + Source: integration.RequireNewStruct(t, map[string]interface{}{ + "id": "filestream-stream-input-id", + "enabled": true, + "type": "filestream", + "paths": logFilePath, + }), + }, + }, + }, + } + + healthyOutput := proto.UnitExpected{ + Id: "output-unit", + Type: proto.UnitType_OUTPUT, + ConfigStateIdx: 1, + State: proto.State_HEALTHY, + LogLevel: logLevel, + Config: &proto.UnitExpectedConfig{ + Id: "default", + Type: "elasticsearch", + Name: "elasticsearch", + Source: integration.RequireNewStruct(t, + map[string]interface{}{ + "type": "elasticsearch", + "hosts": []interface{}{"http://localhost:9200"}, + "username": "admin", + "password": "testing", + "protocol": "http", + "enabled": true, + }), + }, + } + + brokenOutput := proto.UnitExpected{ + Id: "output-unit-borken", + Type: proto.UnitType_OUTPUT, + ConfigStateIdx: 1, + State: proto.State_FAILED, + LogLevel: logLevel, + Config: &proto.UnitExpectedConfig{ + Id: "default", + Type: "logstash", + Name: "logstash", + Source: integration.RequireNewStruct(t, + map[string]interface{}{ + "type": "logstash", + "invalid": "configuration", + }), + }, + } + + // Those are the 'states' Filebeat will go through. + // After each state is reached the mockServer will + // send the next. + protoUnits := [][]*proto.UnitExpected{ + { + &healthyOutput, + &filestreamInputHealthy, + }, + { + &brokenOutput, + &filestreamInputStarting, + }, + { + &healthyOutput, + &filestreamInputHealthy, + }, + {}, // An empty one makes the Beat exit + } + + // We use `success` to signal the test has ended successfully + // if `success` is never closed, then the test will fail with a timeout. + success := make(chan struct{}) + // The test is successful when we reach the last element of `protoUnits` + onObserved := func(observed *proto.CheckinObserved, protoUnitsIdx int) { + if protoUnitsIdx == len(protoUnits)-1 { + close(success) + } + } + + server := integration.NewMockServer( + protoUnits, + []uint64{0, 0, 0, 0}, + []*proto.Features{nil, nil, nil, nil}, + onObserved, + 100*time.Millisecond, + ) + require.NoError(t, server.Start(), "could not start the mock Elastic-Agent server") + defer server.Stop() + + filebeat.RestartOnBeatOnExit = true + filebeat.Start( + "-E", fmt.Sprintf(`management.insecure_grpc_url_for_testing="localhost:%d"`, server.Port), + "-E", "management.enabled=true", + "-E", "management.restart_on_output_change=true", + ) + + select { + case <-success: + case <-time.After(60 * time.Second): + t.Fatal("Output did not recover from a invalid configuration after 60s of waiting") } - return str } // generateLogFile generates a log file by appending the current @@ -400,36 +542,3 @@ func generateLogFile(t *testing.T, fullPath string) { } }() } - -func ensureESIsRunning(t *testing.T) { - t.Helper() - ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(500*time.Second)) - defer cancel() - req, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://localhost:9200", nil) - if err != nil { - t.Fatalf("cannot create request to ensure ES is running: %s", err) - } - - user := os.Getenv("ES_USER") - if user == "" { - user = "admin" - } - - pass := os.Getenv("ES_PASS") - if pass == "" { - pass = "testing" - } - - req.SetBasicAuth(user, pass) - - resp, err := http.DefaultClient.Do(req) - if err != nil { - // If you're reading this message, you probably forgot to start ES - // run `mage compose:Up` from Filebeat's folder to start all - // containers required for integration tests - t.Fatalf("cannot execute HTTP request to ES: %s", err) - } - if resp.StatusCode != http.StatusOK { - t.Errorf("unexpected HTTP status: %d, expecting 200 - OK", resp.StatusCode) - } -} diff --git a/x-pack/functionbeat/Dockerfile b/x-pack/functionbeat/Dockerfile index 3d0953e88e1..61e13baa293 100644 --- a/x-pack/functionbeat/Dockerfile +++ b/x-pack/functionbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.19.10 +FROM golang:1.19.12 RUN \ apt-get update \ @@ -11,7 +11,7 @@ RUN \ && rm -rf /var/lib/apt/lists/* # Use a virtualenv to avoid the PEP668 "externally managed environment" error caused by conflicts -# with the system Python installation. golang:1.19.10 uses Debian 12 which now enforces PEP668. +# with the system Python installation. golang:1.20.6 uses Debian 12 which now enforces PEP668. ENV VIRTUAL_ENV=/opt/venv RUN python3 -m venv $VIRTUAL_ENV ENV PATH="$VIRTUAL_ENV/bin:$PATH" diff --git a/x-pack/functionbeat/_meta/config/beat.reference.yml.tmpl b/x-pack/functionbeat/_meta/config/beat.reference.yml.tmpl index c5b5b0524fa..2f280df4c3a 100644 --- a/x-pack/functionbeat/_meta/config/beat.reference.yml.tmpl +++ b/x-pack/functionbeat/_meta/config/beat.reference.yml.tmpl @@ -8,7 +8,7 @@ # https://www.elastic.co/guide/en/beats/functionbeat/index.html {{header "Provider"}} -# Configure functions to run on AWS Lambda, currently we assume that the credentials +# Configure functions to run on AWS Lambda, currently, we assume that the credentials # are present in the environment to correctly create the function when using the CLI. # # Configure which S3 endpoint should we use. @@ -29,13 +29,13 @@ functionbeat.provider.aws.deploy_bucket: "functionbeat-deploy" #functionbeat.provider.aws.shared_credential_file: /etc/functionbeat/aws_credentials functionbeat.provider.aws.functions: - # Define the list of function availables, each function required to have a unique name. + # Define the list of functions available, each function is required to have a unique name. # Create a function that accepts events coming from cloudwatchlogs. - name: cloudwatch enabled: false type: cloudwatch_logs - # Description of the method to help identify them when you run multiples functions. + # Description of the method to help identify them when you run multiple functions. description: "lambda function for cloudwatch logs" # Concurrency, is the reserved number of instances for that function. @@ -59,7 +59,7 @@ functionbeat.provider.aws.functions: # security_group_ids: [] # subnet_ids: [] - # Dead letter queue configuration, this must be set to an ARN pointing to a SQS queue. + # Dead letter queue configuration, this must be set to an ARN pointing to an SQS queue. #dead_letter_config.target_arn: # Tags are key-value pairs attached to the function. @@ -90,7 +90,7 @@ functionbeat.provider.aws.functions: enabled: false type: sqs - # Description of the method to help identify them when you run multiples functions. + # Description of the method to help identify them when you run multiple functions. description: "lambda function for SQS events" # Concurrency, is the reserved number of instances for that function. @@ -114,7 +114,7 @@ functionbeat.provider.aws.functions: # security_group_ids: [] # subnet_ids: [] - # Dead letter queue configuration, this must be set to an ARN pointing to a SQS queue. + # Dead letter queue configuration, this must be set to an ARN pointing to an SQS queue. #dead_letter_config.target_arn: # Tags are key-value pairs attached to the function. @@ -150,7 +150,7 @@ functionbeat.provider.aws.functions: enabled: false type: kinesis - # Description of the method to help identify them when you run multiples functions. + # Description of the method to help identify them when you run multiple functions. description: "lambda function for Kinesis events" # Concurrency, is the reserved number of instances for that function. @@ -174,7 +174,7 @@ functionbeat.provider.aws.functions: # security_group_ids: [] # subnet_ids: [] - # Dead letter queue configuration, this must be set to an ARN pointing to a SQS queue. + # Dead letter queue configuration, this must be set to an ARN pointing to an SQS queue. #dead_letter_config.target_arn: # Tags are key-value pairs attached to the function. @@ -230,7 +230,7 @@ functionbeat.provider.aws.functions: enabled: false type: cloudwatch_logs_kinesis - # Description of the method to help identify them when you run multiples functions. + # Description of the method to help identify them when you run multiple functions. description: "lambda function for Cloudwatch logs in Kinesis events" # Set base64_encoded if your data is base64 encoded. @@ -249,7 +249,7 @@ functionbeat.provider.aws.functions: # There is a hard limit of 3008MiB for each function. Default is 128MiB. #memory_size: 128MiB - # Dead letter queue configuration, this must be set to an ARN pointing to a SQS queue. + # Dead letter queue configuration, this must be set to an ARN pointing to an SQS queue. #dead_letter_config.target_arn: # Tags are key-value pairs attached to the function. diff --git a/x-pack/functionbeat/_meta/config/beat.yml.tmpl b/x-pack/functionbeat/_meta/config/beat.yml.tmpl index 48c0e6ae40b..e3872794a27 100644 --- a/x-pack/functionbeat/_meta/config/beat.yml.tmpl +++ b/x-pack/functionbeat/_meta/config/beat.yml.tmpl @@ -9,7 +9,7 @@ # {{header "Provider"}} -# Configure functions to run on AWS Lambda, currently we assume that the credentials +# Configure functions to run on AWS Lambda, currently, we assume that the credentials # are present in the environment to correctly create the function when using the CLI. # # Configure which S3 endpoint should we use. @@ -18,13 +18,13 @@ functionbeat.provider.aws.endpoint: "s3.amazonaws.com" functionbeat.provider.aws.deploy_bucket: "functionbeat-deploy" functionbeat.provider.aws.functions: - # Define the list of function availables, each function required to have a unique name. + # Define the list of functions available, each function is required to have a unique name. # Create a function that accepts events coming from cloudwatchlogs. - name: cloudwatch enabled: false type: cloudwatch_logs - # Description of the method to help identify them when you run multiples functions. + # Description of the method to help identify them when you run multiple functions. description: "lambda function for cloudwatch logs" # Concurrency, is the reserved number of instances for that function. @@ -37,7 +37,7 @@ functionbeat.provider.aws.functions: # There is a hard limit of 3008MiB for each function. Default is 128MiB. #memory_size: 128MiB - # Dead letter queue configuration, this must be set to an ARN pointing to a SQS queue. + # Dead letter queue configuration, this must be set to an ARN pointing to an SQS queue. #dead_letter_config.target_arn: # Execution role of the function. @@ -54,7 +54,7 @@ functionbeat.provider.aws.functions: #fields: # env: staging - # List of cloudwatch log group registered to that function. + # List of cloudwatch log groups registered to that function. triggers: - log_group_name: /aws/lambda/functionbeat-cloudwatch_logs filter_pattern: mylog_ @@ -69,7 +69,7 @@ functionbeat.provider.aws.functions: enabled: false type: sqs - # Description of the method to help identify them when you run multiples functions. + # Description of the method to help identify them when you run multiple functions. description: "lambda function for SQS events" # Concurrency, is the reserved number of instances for that function. @@ -82,7 +82,7 @@ functionbeat.provider.aws.functions: # There is a hard limit of 3008MiB for each function. Default is 128MiB. #memory_size: 128MiB - # Dead letter queue configuration, this must be set to an ARN pointing to a SQS queue. + # Dead letter queue configuration, this must be set to an ARN pointing to an SQS queue. #dead_letter_config.target_arn: # Execution role of the function. @@ -119,7 +119,7 @@ functionbeat.provider.aws.functions: enabled: false type: kinesis - # Description of the method to help identify them when you run multiples functions. + # Description of the method to help identify them when you run multiple functions. description: "lambda function for Kinesis events" # Concurrency, is the reserved number of instances for that function. @@ -132,7 +132,7 @@ functionbeat.provider.aws.functions: # There is a hard limit of 3008MiB for each function. Default is 128MiB. #memory_size: 128MiB - # Dead letter queue configuration, this must be set to an ARN pointing to a SQS queue. + # Dead letter queue configuration, this must be set to an ARN pointing to an SQS queue. #dead_letter_config.target_arn: # Execution role of the function. @@ -189,7 +189,7 @@ functionbeat.provider.aws.functions: enabled: false type: cloudwatch_logs_kinesis - # Description of the method to help identify them when you run multiples functions. + # Description of the method to help identify them when you run multiple functions. description: "lambda function for Cloudwatch logs in Kinesis events" # Set base64_encoded if your data is base64 encoded. @@ -208,7 +208,7 @@ functionbeat.provider.aws.functions: # There is a hard limit of 3008MiB for each function. Default is 128MiB. #memory_size: 128MiB - # Dead letter queue configuration, this must be set to an ARN pointing to a SQS queue. + # Dead letter queue configuration, this must be set to an ARN pointing to an SQS queue. #dead_letter_config.target_arn: # Execution role of the function. diff --git a/x-pack/functionbeat/docs/troubleshooting.asciidoc b/x-pack/functionbeat/docs/troubleshooting.asciidoc index b18a37a2fe1..777f24edd82 100644 --- a/x-pack/functionbeat/docs/troubleshooting.asciidoc +++ b/x-pack/functionbeat/docs/troubleshooting.asciidoc @@ -9,6 +9,7 @@ following tips: * <> * <> +* <> * <> //sets block macro for getting-help.asciidoc included in next section @@ -29,5 +30,14 @@ include::{libbeat-dir}/getting-help.asciidoc[] include::{libbeat-dir}/debugging.asciidoc[] +//sets block macro for metrics-in-logs.asciidoc included in next section +[id="understand-{beatname_lc}-logs"] +[role="xpack"] +== Understand metrics in {beatname_uc} logs + +++++ +Understand logged metrics +++++ +include::{libbeat-dir}/metrics-in-logs.asciidoc[] diff --git a/x-pack/functionbeat/function/provider/provider.go b/x-pack/functionbeat/function/provider/provider.go index f3b4d56bd8a..6c33eb80b1a 100644 --- a/x-pack/functionbeat/function/provider/provider.go +++ b/x-pack/functionbeat/function/provider/provider.go @@ -8,8 +8,6 @@ import ( "context" "fmt" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/feature" "github.com/elastic/beats/v7/libbeat/publisher/pipeline" "github.com/elastic/beats/v7/x-pack/functionbeat/function/core" @@ -51,7 +49,7 @@ type Runnable struct { func (r *Runnable) Run(ctx context.Context, t telemetry.T) error { client, err := r.makeClient(r.config) if err != nil { - return errors.Wrap(err, "could not create a client for the function") + return fmt.Errorf("could not create a client for the function: %w", err) } defer client.Close() return r.function.Run(ctx, client, t) diff --git a/x-pack/functionbeat/function/provider/provider_test.go b/x-pack/functionbeat/function/provider/provider_test.go index e68eb9f57a4..47385cbb029 100644 --- a/x-pack/functionbeat/function/provider/provider_test.go +++ b/x-pack/functionbeat/function/provider/provider_test.go @@ -9,7 +9,6 @@ import ( "errors" "testing" - e "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/elastic/beats/v7/libbeat/beat" @@ -47,7 +46,7 @@ func TestRunnable(t *testing.T) { } errReceived := runnable.Run(context.Background(), telemetry.Ignored()) - assert.Equal(t, err, e.Cause(errReceived)) + assert.Equal(t, "could not create a client for the function: "+err.Error(), errReceived.Error()) }) t.Run("propagate functions errors to the coordinator", func(t *testing.T) { @@ -59,7 +58,7 @@ func TestRunnable(t *testing.T) { } errReceived := runnable.Run(context.Background(), telemetry.Ignored()) - assert.Equal(t, err, e.Cause(errReceived)) + assert.Equal(t, err.Error(), errReceived.Error()) }) t.Run("when there is no error run and exit normaly", func(t *testing.T) { diff --git a/x-pack/functionbeat/functionbeat.reference.yml b/x-pack/functionbeat/functionbeat.reference.yml index 31b7928c0f2..7855538f621 100644 --- a/x-pack/functionbeat/functionbeat.reference.yml +++ b/x-pack/functionbeat/functionbeat.reference.yml @@ -8,7 +8,7 @@ # https://www.elastic.co/guide/en/beats/functionbeat/index.html # ================================== Provider ================================== -# Configure functions to run on AWS Lambda, currently we assume that the credentials +# Configure functions to run on AWS Lambda, currently, we assume that the credentials # are present in the environment to correctly create the function when using the CLI. # # Configure which S3 endpoint should we use. @@ -29,13 +29,13 @@ functionbeat.provider.aws.deploy_bucket: "functionbeat-deploy" #functionbeat.provider.aws.shared_credential_file: /etc/functionbeat/aws_credentials functionbeat.provider.aws.functions: - # Define the list of function availables, each function required to have a unique name. + # Define the list of functions available, each function is required to have a unique name. # Create a function that accepts events coming from cloudwatchlogs. - name: cloudwatch enabled: false type: cloudwatch_logs - # Description of the method to help identify them when you run multiples functions. + # Description of the method to help identify them when you run multiple functions. description: "lambda function for cloudwatch logs" # Concurrency, is the reserved number of instances for that function. @@ -59,7 +59,7 @@ functionbeat.provider.aws.functions: # security_group_ids: [] # subnet_ids: [] - # Dead letter queue configuration, this must be set to an ARN pointing to a SQS queue. + # Dead letter queue configuration, this must be set to an ARN pointing to an SQS queue. #dead_letter_config.target_arn: # Tags are key-value pairs attached to the function. @@ -90,7 +90,7 @@ functionbeat.provider.aws.functions: enabled: false type: sqs - # Description of the method to help identify them when you run multiples functions. + # Description of the method to help identify them when you run multiple functions. description: "lambda function for SQS events" # Concurrency, is the reserved number of instances for that function. @@ -114,7 +114,7 @@ functionbeat.provider.aws.functions: # security_group_ids: [] # subnet_ids: [] - # Dead letter queue configuration, this must be set to an ARN pointing to a SQS queue. + # Dead letter queue configuration, this must be set to an ARN pointing to an SQS queue. #dead_letter_config.target_arn: # Tags are key-value pairs attached to the function. @@ -150,7 +150,7 @@ functionbeat.provider.aws.functions: enabled: false type: kinesis - # Description of the method to help identify them when you run multiples functions. + # Description of the method to help identify them when you run multiple functions. description: "lambda function for Kinesis events" # Concurrency, is the reserved number of instances for that function. @@ -174,7 +174,7 @@ functionbeat.provider.aws.functions: # security_group_ids: [] # subnet_ids: [] - # Dead letter queue configuration, this must be set to an ARN pointing to a SQS queue. + # Dead letter queue configuration, this must be set to an ARN pointing to an SQS queue. #dead_letter_config.target_arn: # Tags are key-value pairs attached to the function. @@ -230,7 +230,7 @@ functionbeat.provider.aws.functions: enabled: false type: cloudwatch_logs_kinesis - # Description of the method to help identify them when you run multiples functions. + # Description of the method to help identify them when you run multiple functions. description: "lambda function for Cloudwatch logs in Kinesis events" # Set base64_encoded if your data is base64 encoded. @@ -249,7 +249,7 @@ functionbeat.provider.aws.functions: # There is a hard limit of 3008MiB for each function. Default is 128MiB. #memory_size: 128MiB - # Dead letter queue configuration, this must be set to an ARN pointing to a SQS queue. + # Dead letter queue configuration, this must be set to an ARN pointing to an SQS queue. #dead_letter_config.target_arn: # Tags are key-value pairs attached to the function. @@ -300,10 +300,10 @@ functionbeat.provider.aws.functions: # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. +# If this option is not defined, the hostname is used. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. Tags make it easy to group servers by different # logical properties. #tags: ["service-X", "web-tier"] @@ -315,7 +315,7 @@ functionbeat.provider.aws.functions: # env: staging # If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields +# fields in the output document instead of being grouped under a field # sub-dictionary. Default is false. #fields_under_root: false @@ -327,7 +327,7 @@ functionbeat.provider.aws.functions: #queue: # Queue type by name (default 'mem') # The memory queue will present all available events (up to the outputs - # bulk_max_size) to the output, the moment the output is ready to server + # bulk_max_size) to the output, the moment the output is ready to serve # another batch of events. #mem: # Max number of events the queue can buffer. @@ -379,7 +379,7 @@ functionbeat.provider.aws.functions: # length of its retry interval each time, up to this maximum. #max_retry_interval: 30s -# Sets the maximum number of CPUs that can be executing simultaneously. The +# Sets the maximum number of CPUs that can be executed simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: @@ -500,7 +500,7 @@ functionbeat.provider.aws.functions: # ignore_missing: false # fail_on_error: true # -# The following example copies the value of message to message_copied +# The following example copies the value of the message to message_copied # #processors: # - copy_fields: @@ -510,7 +510,7 @@ functionbeat.provider.aws.functions: # fail_on_error: true # ignore_missing: false # -# The following example truncates the value of message to 1024 bytes +# The following example truncates the value of the message to 1024 bytes # #processors: # - truncate_fields: @@ -607,7 +607,7 @@ output.elasticsearch: # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. #index: "functionbeat-%{[agent.version]}" - # Optional ingest pipeline. By default no pipeline will be used. + # Optional ingest pipeline. By default, no pipeline will be used. #pipeline: "" # Optional HTTP path @@ -934,14 +934,14 @@ output.elasticsearch: # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards are disabled by default and can be enabled either by setting the -# options here, or by using the `-setup` CLI flag or the `setup` command. +# options here or by using the `-setup` CLI flag or the `setup` command. #setup.dashboards.enabled: false # The directory from where to read the dashboards. The default is the `kibana` # folder in the home path. #setup.dashboards.directory: ${path.home}/kibana -# The URL from where to download the dashboards archive. It is used instead of +# The URL from where to download the dashboard archive. It is used instead of # the directory if it has a value. #setup.dashboards.url: @@ -1038,7 +1038,7 @@ setup.template.settings: # Configure index lifecycle management (ILM) to manage the backing indices # of your data streams. -# Enable ILM support. Valid values are true, false. +# Enable ILM support. Valid values are true, or false. #setup.ilm.enabled: true # Set the lifecycle policy name. The default policy name is @@ -1193,25 +1193,25 @@ logging.files: # The name of the files where the logs are written to. #name: functionbeat - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. #rotateeverybytes: 10485760 # = 10MB - # Number of rotated log files to keep. Oldest files will be deleted first. + # Number of rotated log files to keep. The oldest files will be deleted first. #keepfiles: 7 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. #permissions: 0600 - # Enable log file rotation on time intervals in addition to size-based rotation. + # Enable log file rotation on time intervals in addition to the size-based rotation. # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h # are boundary-aligned with minutes, hours, days, weeks, months, and years as # reported by the local system clock. All other intervals are calculated from the # Unix epoch. Defaults to disabled. #interval: 0 - # Rotate existing logs on startup rather than appending to the existing + # Rotate existing logs on startup rather than appending them to the existing # file. Defaults to true. # rotateonstartup: true @@ -1239,7 +1239,7 @@ logging.files: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # In case you specify an additional path, the scheme is required: http://localhost:9200/path # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 #hosts: ["localhost:9200"] @@ -1286,7 +1286,7 @@ logging.files: # Elasticsearch after a network error. The default is 60s. #backoff.max: 60s - # Configure HTTP request timeout before failing an request to Elasticsearch. + # Configure HTTP request timeout before failing a request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. @@ -1383,15 +1383,15 @@ logging.files: # =============================== HTTP Endpoint ================================ -# Each beat can expose internal metrics through a HTTP endpoint. For security +# Each beat can expose internal metrics through an HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. -# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# Stats can be accessed through http://localhost:5066/stats. For pretty JSON output # append ?pretty to the URL. # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# The HTTP endpoint will bind to this hostname, IP address, unix socket, or named pipe. # When using IP addresses, it is recommended to only use localhost. #http.host: localhost @@ -1401,7 +1401,7 @@ logging.files: # Define which user should be owning the named pipe. #http.named_pipe.user: -# Define which the permissions that should be applied to the named pipe, use the Security +# Define which permissions should be applied to the named pipe, use the Security # Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with # `http.user`. #http.named_pipe.security_descriptor: diff --git a/x-pack/functionbeat/functionbeat.yml b/x-pack/functionbeat/functionbeat.yml index 63b2b56bdcb..51c9fa706b8 100644 --- a/x-pack/functionbeat/functionbeat.yml +++ b/x-pack/functionbeat/functionbeat.yml @@ -9,7 +9,7 @@ # # ================================== Provider ================================== -# Configure functions to run on AWS Lambda, currently we assume that the credentials +# Configure functions to run on AWS Lambda, currently, we assume that the credentials # are present in the environment to correctly create the function when using the CLI. # # Configure which S3 endpoint should we use. @@ -18,13 +18,13 @@ functionbeat.provider.aws.endpoint: "s3.amazonaws.com" functionbeat.provider.aws.deploy_bucket: "functionbeat-deploy" functionbeat.provider.aws.functions: - # Define the list of function availables, each function required to have a unique name. + # Define the list of functions available, each function is required to have a unique name. # Create a function that accepts events coming from cloudwatchlogs. - name: cloudwatch enabled: false type: cloudwatch_logs - # Description of the method to help identify them when you run multiples functions. + # Description of the method to help identify them when you run multiple functions. description: "lambda function for cloudwatch logs" # Concurrency, is the reserved number of instances for that function. @@ -37,7 +37,7 @@ functionbeat.provider.aws.functions: # There is a hard limit of 3008MiB for each function. Default is 128MiB. #memory_size: 128MiB - # Dead letter queue configuration, this must be set to an ARN pointing to a SQS queue. + # Dead letter queue configuration, this must be set to an ARN pointing to an SQS queue. #dead_letter_config.target_arn: # Execution role of the function. @@ -54,7 +54,7 @@ functionbeat.provider.aws.functions: #fields: # env: staging - # List of cloudwatch log group registered to that function. + # List of cloudwatch log groups registered to that function. triggers: - log_group_name: /aws/lambda/functionbeat-cloudwatch_logs filter_pattern: mylog_ @@ -69,7 +69,7 @@ functionbeat.provider.aws.functions: enabled: false type: sqs - # Description of the method to help identify them when you run multiples functions. + # Description of the method to help identify them when you run multiple functions. description: "lambda function for SQS events" # Concurrency, is the reserved number of instances for that function. @@ -82,7 +82,7 @@ functionbeat.provider.aws.functions: # There is a hard limit of 3008MiB for each function. Default is 128MiB. #memory_size: 128MiB - # Dead letter queue configuration, this must be set to an ARN pointing to a SQS queue. + # Dead letter queue configuration, this must be set to an ARN pointing to an SQS queue. #dead_letter_config.target_arn: # Execution role of the function. @@ -119,7 +119,7 @@ functionbeat.provider.aws.functions: enabled: false type: kinesis - # Description of the method to help identify them when you run multiples functions. + # Description of the method to help identify them when you run multiple functions. description: "lambda function for Kinesis events" # Concurrency, is the reserved number of instances for that function. @@ -132,7 +132,7 @@ functionbeat.provider.aws.functions: # There is a hard limit of 3008MiB for each function. Default is 128MiB. #memory_size: 128MiB - # Dead letter queue configuration, this must be set to an ARN pointing to a SQS queue. + # Dead letter queue configuration, this must be set to an ARN pointing to an SQS queue. #dead_letter_config.target_arn: # Execution role of the function. @@ -189,7 +189,7 @@ functionbeat.provider.aws.functions: enabled: false type: cloudwatch_logs_kinesis - # Description of the method to help identify them when you run multiples functions. + # Description of the method to help identify them when you run multiple functions. description: "lambda function for Cloudwatch logs in Kinesis events" # Set base64_encoded if your data is base64 encoded. @@ -208,7 +208,7 @@ functionbeat.provider.aws.functions: # There is a hard limit of 3008MiB for each function. Default is 128MiB. #memory_size: 128MiB - # Dead letter queue configuration, this must be set to an ARN pointing to a SQS queue. + # Dead letter queue configuration, this must be set to an ARN pointing to an SQS queue. #dead_letter_config.target_arn: # Execution role of the function. @@ -258,7 +258,7 @@ functionbeat.provider.aws.functions: # all the transactions sent by a single shipper in the web interface. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. #tags: ["service-X", "web-tier"] @@ -273,8 +273,8 @@ functionbeat.provider.aws.functions: # options here or by using the `setup` command. #setup.dashboards.enabled: false -# The URL from where to download the dashboards archive. By default this URL -# has a value which is computed based on the Beat name and version. For released +# The URL from where to download the dashboard archive. By default, this URL +# has a value that is computed based on the Beat name and version. For released # versions, this URL points to the dashboard archive on the artifacts.elastic.co # website. #setup.dashboards.url: @@ -357,7 +357,7 @@ processors: #logging.level: debug # At debug level, you can selectively enable logging only for some components. -# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# To enable all selectors, use ["*"]. Examples of other selectors are "beat", # "publisher", "service". #logging.selectors: ["*"] @@ -375,7 +375,7 @@ processors: #monitoring.cluster_uuid: # Uncomment to send the metrics to Elasticsearch. Most settings from the -# Elasticsearch output are accepted here as well. +# Elasticsearch outputs are accepted here as well. # Note that the settings should point to your Elasticsearch *monitoring* cluster. # Any setting that is not set is automatically inherited from the Elasticsearch # output configuration, so if you have the Elasticsearch output configured such diff --git a/x-pack/functionbeat/manager/core/makezip.go b/x-pack/functionbeat/manager/core/makezip.go index aa311db9c34..6f5c2c2d530 100644 --- a/x-pack/functionbeat/manager/core/makezip.go +++ b/x-pack/functionbeat/manager/core/makezip.go @@ -9,8 +9,6 @@ import ( yaml "gopkg.in/yaml.v2" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd/instance" "github.com/elastic/beats/v7/x-pack/functionbeat/config" @@ -105,7 +103,7 @@ func keystorePackager() (keystore.Packager, error) { store, err := instance.LoadKeystore(cfg, "functionbeat") if err != nil { - return nil, errors.Wrapf(err, "cannot load the keystore for packaging") + return nil, fmt.Errorf("cannot load the keystore for packaging: %w", err) } packager, ok := store.(keystore.Packager) diff --git a/x-pack/heartbeat/heartbeat.reference.yml b/x-pack/heartbeat/heartbeat.reference.yml index 1ab668e2bd4..b8281727026 100644 --- a/x-pack/heartbeat/heartbeat.reference.yml +++ b/x-pack/heartbeat/heartbeat.reference.yml @@ -10,7 +10,7 @@ ############################# Heartbeat ###################################### -# Define a directory to load monitor definitions from. Definitions take the form +# Define a directory from which to load monitor definitions. Definitions take the form # of individual yaml files. heartbeat.config.monitors: # Directory + glob pattern to search for configuration files @@ -25,7 +25,7 @@ heartbeat.monitors: - type: icmp # monitor type `icmp` (requires root) uses ICMP Echo Request to ping # configured hosts - # ID used to uniquely identify this monitor in elasticsearch even if the config changes + # ID used to uniquely identify this monitor in Elasticsearch even if the config changes id: my-monitor # Human readable display name for this service in Uptime UI and elsewhere @@ -43,7 +43,7 @@ heartbeat.monitors: # List of hosts to ping hosts: ["localhost"] - # Configure IP protocol types to ping on if hostnames are configured. + # Configure IP protocol types to ping if hostnames are configured. # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. ipv4: true ipv6: true @@ -55,7 +55,7 @@ heartbeat.monitors: # Waiting duration until another ICMP Echo Request is emitted. wait: 1s - # The tags of the monitors are included in their own field with each + # The tags of the monitors are included in their field with each # transaction published. Tags make it easy to group servers by different # logical properties. #tags: ["service-X", "web-tier"] @@ -81,9 +81,9 @@ heartbeat.monitors: # How often to check for changes #reload.period: 1s -- type: tcp # monitor type `tcp`. Connect via TCP and optionally verify endpoint +- type: tcp # monitor type `tcp`. Connect via TCP and optionally verify the endpoint # by sending/receiving a custom payload - # ID used to uniquely identify this monitor in elasticsearch even if the config changes + # ID used to uniquely identify this monitor in Elasticsearch even if the config changes id: my-monitor # Human readable display name for this service in Uptime UI and elsewhere @@ -97,23 +97,23 @@ heartbeat.monitors: # configure hosts to ping. # Entries can be: - # - plain host name or IP like `localhost`: + # - plain hostname or IP like `localhost`: # Requires ports configs to be checked. If ssl is configured, - # a SSL/TLS based connection will be established. Otherwise plain tcp connection + # an SSL/TLS based connection will be established. Otherwise plain tcp connection # will be established # - hostname + port like `localhost:12345`: - # Connect to port on given host. If ssl is configured, - # a SSL/TLS based connection will be established. Otherwise plain tcp connection + # Connect to port on a given host. If ssl is configured, + # an SSL/TLS based connection will be established. Otherwise plain tcp connection # will be established # - full url syntax. `scheme://:[port]`. The `` can be one of - # `tcp`, `plain`, `ssl` and `tls`. If `tcp`, `plain` is configured, a plain + # `tcp`, `plain`, `ssl`, and `tls`. If `tcp`, `plain` is configured, a plain # tcp connection will be established, even if ssl is configured. # Using `tls`/`ssl`, an SSL connection is established. If no ssl is configured, # system defaults will be used (not supported on windows). - # If `port` is missing in url, the ports setting is required. + # If `port` is missing in url, the port setting is required. hosts: ["localhost:9200"] - # Configure IP protocol types to ping on if hostnames are configured. + # Configure IP protocol types to ping if hostnames are configured. # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. ipv4: true ipv6: true @@ -126,10 +126,10 @@ heartbeat.monitors: #timeout: 16s # Optional payload string to send to remote and expected answer. If none is - # configured, the endpoint is expected to be up if connection attempt was + # configured, the endpoint is expected to be up if a connection attempt was # successful. If only `send_string` is configured, any response will be # accepted as ok. If only `receive_string` is configured, no payload will be - # send, but client expects to receive expected payload on connect. + # send, but the client expects to receive the expected payload on connect. #check: #send: '' #receive: '' @@ -159,8 +159,8 @@ heartbeat.monitors: # Set to true to publish fields with null values in events. #keep_null: false -- type: http # monitor type `http`. Connect via HTTP an optionally verify response - # ID used to uniquely identify this monitor in elasticsearch even if the config changes +- type: http # monitor type `http`. Connect via HTTP and optionally verify the response + # ID used to uniquely identify this monitor in Elasticsearch even if the config changes. id: my-http-monitor # Human readable display name for this service in Uptime UI and elsewhere @@ -170,12 +170,12 @@ heartbeat.monitors: #enabled: true # Configure task schedule - schedule: '@every 5s' # every 5 seconds from start of beat + schedule: '@every 5s' # every 5 seconds from the start of beat # Configure URLs to ping urls: ["http://localhost:9200"] - # Configure IP protocol types to ping on if hostnames are configured. + # Configure IP protocol types to ping if hostnames are configured. # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. ipv4: true ipv6: true @@ -191,7 +191,7 @@ heartbeat.monitors: #username: '' #password: '' - # TLS/SSL connection settings for use with HTTPS endpoint. If not configured + # TLS/SSL connection settings for use with HTTPS endpoint. If not configured, # system defaults will be used. #ssl: # Certificate Authorities @@ -202,7 +202,7 @@ heartbeat.monitors: # Request settings: #check.request: - # Configure HTTP method to use. Only 'HEAD', 'GET' and 'POST' methods are allowed. + # Configure HTTP method to use. Only 'HEAD', 'GET', and 'POST' methods are allowed. #method: "GET" # Dictionary of additional HTTP headers to send: @@ -247,17 +247,17 @@ heartbeat.monitors: #keep_null: false heartbeat.scheduler: - # Limit number of concurrent tasks executed by heartbeat. The task limit if + # Limit the number of concurrent tasks executed by heartbeat. The task limit if # disabled if set to 0. The default is 0. #limit: 0 - # Set the scheduler it's time zone + # Set the scheduler to its time zone #location: '' heartbeat.jobs: # Limit the number of concurrent monitors executed by heartbeat. This differs from # heartbeat.scheduler.limit in that it maps to individual monitors rather than the - # subtasks of monitors. For non-browser monitors a subtask usually corresponds to a + # subtasks of monitors. For non-browser monitors, a subtask usually corresponds to a # single file descriptor. # This feature is most useful for the browser type #browser.limit: 1 @@ -268,10 +268,10 @@ heartbeat.jobs: # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. +# If this option is not defined, the hostname is used. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. Tags make it easy to group servers by different # logical properties. #tags: ["service-X", "web-tier"] @@ -283,7 +283,7 @@ heartbeat.jobs: # env: staging # If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields +# fields in the output document instead of being grouped under a field # sub-dictionary. Default is false. #fields_under_root: false @@ -295,7 +295,7 @@ heartbeat.jobs: #queue: # Queue type by name (default 'mem') # The memory queue will present all available events (up to the outputs - # bulk_max_size) to the output, the moment the output is ready to server + # bulk_max_size) to the output, the moment the output is ready to serve # another batch of events. #mem: # Max number of events the queue can buffer. @@ -347,7 +347,7 @@ heartbeat.jobs: # length of its retry interval each time, up to this maximum. #max_retry_interval: 30s -# Sets the maximum number of CPUs that can be executing simultaneously. The +# Sets the maximum number of CPUs that can be executed simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: @@ -468,7 +468,7 @@ heartbeat.jobs: # ignore_missing: false # fail_on_error: true # -# The following example copies the value of message to message_copied +# The following example copies the value of the message to message_copied # #processors: # - copy_fields: @@ -478,7 +478,7 @@ heartbeat.jobs: # fail_on_error: true # ignore_missing: false # -# The following example truncates the value of message to 1024 bytes +# The following example truncates the value of the message to 1024 bytes # #processors: # - truncate_fields: @@ -575,7 +575,7 @@ output.elasticsearch: # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. #index: "heartbeat-%{[agent.version]}" - # Optional ingest pipeline. By default no pipeline will be used. + # Optional ingest pipeline. By default, no pipeline will be used. #pipeline: "" # Optional HTTP path @@ -1306,14 +1306,14 @@ output.elasticsearch: # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards are disabled by default and can be enabled either by setting the -# options here, or by using the `-setup` CLI flag or the `setup` command. +# options here or by using the `-setup` CLI flag or the `setup` command. #setup.dashboards.enabled: false # The directory from where to read the dashboards. The default is the `kibana` # folder in the home path. #setup.dashboards.directory: ${path.home}/kibana -# The URL from where to download the dashboards archive. It is used instead of +# The URL from where to download the dashboard archive. It is used instead of # the directory if it has a value. #setup.dashboards.url: @@ -1410,7 +1410,7 @@ setup.template.settings: # Configure index lifecycle management (ILM) to manage the backing indices # of your data streams. -# Enable ILM support. Valid values are true, false. +# Enable ILM support. Valid values are true, or false. #setup.ilm.enabled: true # Set the lifecycle policy name. The default policy name is @@ -1565,25 +1565,25 @@ logging.files: # The name of the files where the logs are written to. #name: heartbeat - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. #rotateeverybytes: 10485760 # = 10MB - # Number of rotated log files to keep. Oldest files will be deleted first. + # Number of rotated log files to keep. The oldest files will be deleted first. #keepfiles: 7 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. #permissions: 0600 - # Enable log file rotation on time intervals in addition to size-based rotation. + # Enable log file rotation on time intervals in addition to the size-based rotation. # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h # are boundary-aligned with minutes, hours, days, weeks, months, and years as # reported by the local system clock. All other intervals are calculated from the # Unix epoch. Defaults to disabled. #interval: 0 - # Rotate existing logs on startup rather than appending to the existing + # Rotate existing logs on startup rather than appending them to the existing # file. Defaults to true. # rotateonstartup: true @@ -1611,7 +1611,7 @@ logging.files: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # In case you specify an additional path, the scheme is required: http://localhost:9200/path # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 #hosts: ["localhost:9200"] @@ -1658,7 +1658,7 @@ logging.files: # Elasticsearch after a network error. The default is 60s. #backoff.max: 60s - # Configure HTTP request timeout before failing an request to Elasticsearch. + # Configure HTTP request timeout before failing a request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. @@ -1755,15 +1755,15 @@ logging.files: # =============================== HTTP Endpoint ================================ -# Each beat can expose internal metrics through a HTTP endpoint. For security +# Each beat can expose internal metrics through an HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. -# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# Stats can be accessed through http://localhost:5066/stats. For pretty JSON output # append ?pretty to the URL. # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# The HTTP endpoint will bind to this hostname, IP address, unix socket, or named pipe. # When using IP addresses, it is recommended to only use localhost. #http.host: localhost @@ -1773,7 +1773,7 @@ logging.files: # Define which user should be owning the named pipe. #http.named_pipe.user: -# Define which the permissions that should be applied to the named pipe, use the Security +# Define which permissions should be applied to the named pipe, use the Security # Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with # `http.user`. #http.named_pipe.security_descriptor: diff --git a/x-pack/heartbeat/heartbeat.yml b/x-pack/heartbeat/heartbeat.yml index e76be2b3c8a..76876b9c7ff 100644 --- a/x-pack/heartbeat/heartbeat.yml +++ b/x-pack/heartbeat/heartbeat.yml @@ -9,7 +9,7 @@ ############################# Heartbeat ###################################### -# Define a directory to load monitor definitions from. Definitions take the form +# Define a directory from which to load monitor definitions. Definitions take the form # of individual yaml files. heartbeat.config.monitors: # Directory + glob pattern to search for configuration files @@ -22,13 +22,13 @@ heartbeat.config.monitors: # Configure monitors inline heartbeat.monitors: - type: http - # Set enabled to true (or delete the following line) to enable this example monitor + # Set enabled to true (or delete the following line) to enable this monitor enabled: false - # ID used to uniquely identify this monitor in elasticsearch even if the config changes + # ID used to uniquely identify this monitor in Elasticsearch even if the config changes id: my-monitor # Human readable display name for this service in Uptime UI and elsewhere name: My Monitor - # List or urls to query + # List of URLs to query urls: ["http://localhost:9200"] # Configure task schedule schedule: '@every 10s' @@ -53,7 +53,7 @@ setup.template.settings: # all the transactions sent by a single shipper in the web interface. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. #tags: ["service-X", "web-tier"] @@ -144,7 +144,7 @@ processors: #logging.level: debug # At debug level, you can selectively enable logging only for some components. -# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# To enable all selectors, use ["*"]. Examples of other selectors are "beat", # "publisher", "service". #logging.selectors: ["*"] @@ -162,7 +162,7 @@ processors: #monitoring.cluster_uuid: # Uncomment to send the metrics to Elasticsearch. Most settings from the -# Elasticsearch output are accepted here as well. +# Elasticsearch outputs are accepted here as well. # Note that the settings should point to your Elasticsearch *monitoring* cluster. # Any setting that is not set is automatically inherited from the Elasticsearch # output configuration, so if you have the Elasticsearch output configured such diff --git a/x-pack/heartbeat/monitors/browser/browser.go b/x-pack/heartbeat/monitors/browser/browser.go index b09d03eea8a..bd269573643 100644 --- a/x-pack/heartbeat/monitors/browser/browser.go +++ b/x-pack/heartbeat/monitors/browser/browser.go @@ -34,7 +34,7 @@ func create(name string, cfg *config.C) (p plugin.Plugin, err error) { return plugin.Plugin{}, fmt.Errorf("script monitors cannot be run as root") } - s, err := NewProject(cfg) + s, err := NewSourceJob(cfg) if err != nil { return plugin.Plugin{}, err } diff --git a/x-pack/heartbeat/monitors/browser/project.go b/x-pack/heartbeat/monitors/browser/project.go deleted file mode 100644 index 853b0754bc0..00000000000 --- a/x-pack/heartbeat/monitors/browser/project.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. -//go:build linux || darwin - -package browser - -import ( - "context" - "encoding/json" - "fmt" - "time" - - "github.com/elastic/beats/v7/heartbeat/monitors/jobs" - "github.com/elastic/beats/v7/heartbeat/monitors/plugin" - "github.com/elastic/beats/v7/heartbeat/monitors/stdfields" - "github.com/elastic/beats/v7/libbeat/beat" - "github.com/elastic/beats/v7/x-pack/heartbeat/monitors/browser/synthexec" - "github.com/elastic/elastic-agent-libs/config" - "github.com/elastic/elastic-agent-libs/logp" - "github.com/elastic/elastic-agent-libs/mapstr" -) - -type JourneyLister func(ctx context.Context, projectPath string, params mapstr.M) (journeyNames []string, err error) - -type Project struct { - rawCfg *config.C - projectCfg *Config - ctx context.Context - cancel context.CancelFunc -} - -func NewProject(rawCfg *config.C) (*Project, error) { - // Global project context to cancel all jobs - // on close - ctx, cancel := context.WithCancel(context.Background()) - - s := &Project{ - rawCfg: rawCfg, - projectCfg: DefaultConfig(), - ctx: ctx, - cancel: cancel, - } - err := rawCfg.Unpack(s.projectCfg) - if err != nil { - return nil, ErrBadConfig(err) - } - - return s, nil -} - -func ErrBadConfig(err error) error { - return fmt.Errorf("could not parse project config: %w", err) -} - -func (p *Project) String() string { - panic("implement me") -} - -func (p *Project) Fetch() error { - return p.projectCfg.Source.Active().Fetch() -} - -func (p *Project) Workdir() string { - return p.projectCfg.Source.Active().Workdir() -} - -func (p *Project) Params() map[string]interface{} { - return p.projectCfg.Params -} - -func (p *Project) FilterJourneys() synthexec.FilterJourneyConfig { - return p.projectCfg.FilterJourneys -} - -func (p *Project) StdFields() stdfields.StdMonitorFields { - sFields, err := stdfields.ConfigToStdMonitorFields(p.rawCfg) - // Should be impossible since outer monitor.go should run this same code elsewhere - // TODO: Just pass stdfields in to remove second deserialize - if err != nil { - logp.L().Warnf("Could not deserialize monitor fields for browser, this should never happen: %s", err) - } - return sFields -} - -func (p *Project) Close() error { - if p.projectCfg.Source.ActiveMemo != nil { - p.projectCfg.Source.ActiveMemo.Close() - } - - // Cancel running jobs ctxs - p.cancel() - - return nil -} - -func (p *Project) extraArgs() []string { - extraArgs := p.projectCfg.SyntheticsArgs - if len(p.projectCfg.PlaywrightOpts) > 0 { - s, err := json.Marshal(p.projectCfg.PlaywrightOpts) - if err != nil { - // This should never happen, if it was parsed as a config it should be serializable - logp.L().Warn("could not serialize playwright options '%v': %w", p.projectCfg.PlaywrightOpts, err) - } else { - extraArgs = append(extraArgs, "--playwright-options", string(s)) - } - } - if p.projectCfg.IgnoreHTTPSErrors { - extraArgs = append(extraArgs, "--ignore-https-errors") - } - if p.projectCfg.Sandbox { - extraArgs = append(extraArgs, "--sandbox") - } - if p.projectCfg.Screenshots != "" { - extraArgs = append(extraArgs, "--screenshots", p.projectCfg.Screenshots) - } - if p.projectCfg.Throttling != nil { - switch t := p.projectCfg.Throttling.(type) { - case bool: - if !t { - extraArgs = append(extraArgs, "--no-throttling") - } - case string: - extraArgs = append(extraArgs, "--throttling", fmt.Sprintf("%v", p.projectCfg.Throttling)) - case map[string]interface{}: - j, err := json.Marshal(t) - if err != nil { - logp.L().Warnf("could not serialize throttling config to JSON: %s", err) - } else { - extraArgs = append(extraArgs, "--throttling", string(j)) - } - } - } - - return extraArgs -} - -func (p *Project) jobs() []jobs.Job { - var j jobs.Job - - isScript := p.projectCfg.Source.Inline != nil - ctx := context.WithValue(p.ctx, synthexec.SynthexecTimeout, p.projectCfg.Timeout+30*time.Second) - - if isScript { - src := p.projectCfg.Source.Inline.Script - j = synthexec.InlineJourneyJob(ctx, src, p.Params(), p.StdFields(), p.extraArgs()...) - } else { - j = func(event *beat.Event) ([]jobs.Job, error) { - err := p.Fetch() - if err != nil { - return nil, fmt.Errorf("could not fetch for project job: %w", err) - } - sj, err := synthexec.ProjectJob(ctx, p.Workdir(), p.Params(), p.FilterJourneys(), p.StdFields(), p.extraArgs()...) - if err != nil { - return nil, err - } - return sj(event) - } - } - return []jobs.Job{j} -} - -func (p *Project) plugin() plugin.Plugin { - return plugin.Plugin{ - Jobs: p.jobs(), - DoClose: p.Close, - Endpoints: 1, - } -} diff --git a/x-pack/heartbeat/monitors/browser/source/project.go b/x-pack/heartbeat/monitors/browser/source/project.go index f97e69d6709..7caf3edcc2e 100644 --- a/x-pack/heartbeat/monitors/browser/source/project.go +++ b/x-pack/heartbeat/monitors/browser/source/project.go @@ -17,6 +17,7 @@ import ( "path/filepath" "regexp" "strings" + "sync" "syscall" "github.com/elastic/elastic-agent-libs/logp" @@ -26,6 +27,8 @@ import ( type ProjectSource struct { Content string `config:"content" json:"content"` TargetDirectory string + fetched bool + mtx sync.Mutex } var ErrNoContent = fmt.Errorf("no 'content' value specified for project monitor source") @@ -39,6 +42,14 @@ func (p *ProjectSource) Validate() error { } func (p *ProjectSource) Fetch() error { + // We only need to unzip the source exactly once + p.mtx.Lock() + defer p.mtx.Unlock() + if p.fetched { + logp.L().Debugf("browser project: re-use already unpacked source: %s", p.Workdir()) + return nil + } + decodedBytes, err := base64.StdEncoding.DecodeString(p.Content) if err != nil { return err @@ -60,6 +71,9 @@ func (p *ProjectSource) Fetch() error { if err != nil { return fmt.Errorf("could not make temp dir for unzipping project source: %w", err) } + + logp.L().Debugf("browser project: unpack source: %s", p.Workdir()) + err = os.Chmod(p.TargetDirectory, defaultMod) if err != nil { return fmt.Errorf("failed assigning default mode %s to temp dir: %w", defaultMod, err) @@ -81,6 +95,8 @@ func (p *ProjectSource) Fetch() error { } } + // We've succeeded, mark the fetch as a success + p.fetched = true return nil } @@ -142,6 +158,8 @@ func (p *ProjectSource) Workdir() string { } func (p *ProjectSource) Close() error { + logp.L().Debugf("browser project: close project source: %s", p.Workdir()) + if p.TargetDirectory != "" { return os.RemoveAll(p.TargetDirectory) } diff --git a/x-pack/heartbeat/monitors/browser/source/project_test.go b/x-pack/heartbeat/monitors/browser/source/project_test.go index f30012670b9..09dda4b5146 100644 --- a/x-pack/heartbeat/monitors/browser/source/project_test.go +++ b/x-pack/heartbeat/monitors/browser/source/project_test.go @@ -50,11 +50,29 @@ func TestProjectSource(t *testing.T) { return } require.NoError(t, err) + fetchAndValidate(t, psrc) }) } } +func TestFetchCaching(t *testing.T) { + cfg := mapstr.M{ + "content": "UEsDBBQACAAIAJ27qVQAAAAAAAAAAAAAAAAiAAAAZXhhbXBsZXMvdG9kb3MvYWR2YW5jZWQuam91cm5leS50c5VRPW/CMBDd+RWnLA0Sigt0KqJqpbZTN+iEGKzkIC6JbfkuiBTx3+uEEAGlgi7Rnf38viIESCLkR/FJ6Eis1VIjpanATBKrWFCpOUU/kcCNzG2GJNgkhoRM1lLHmERfpnAay4ipo3JrHMMWmjPYwcKZHILn33zBqIV3ADIjkxdrJ4y251eZJFNJq3b1Hh1XJx+KeKK+8XATpxiv3o07RidI7Ex5OOocTEQixcz6mF66MRgGXkmxMhqkTiA2VcJ6NQsgpZcZAnueoAfhFqxcYs9/ncwJdl0YP9XeY6OJgb3qFDcMYwhejb5jsAUDyYxBaSi9HmCJlfZJ2vCYNCpc1h2d5m8AB/r99cU+GmS/hpwXc4nmrKh/K917yK57VqZe1lU6zM26WvIiY2WbHunWIiusb3IWVBP0/bP9NGinYTC/qcqWLloY9ybjNAy5VbzYdP1sdz3+8FqJleqsP7/ONPjjp++TPgS3eaks/wBQSwcIVYIEHGwBAADRAwAAUEsDBBQACAAIAJ27qVQAAAAAAAAAAAAAAAAZAAAAZXhhbXBsZXMvdG9kb3MvaGVscGVycy50c5VUTYvbMBC9768YRGAVyKb0uktCu9CeektvpRCtM4nFKpKQxt2kwf+9I9lJ5cRb6MWW5+u9eTOW3nsXCE4QCf0M8OCxImhhG9wexCc0KpKuPsSjpRr5FMXTXeVsJDBObT57v+I8WID0aoczaIKZwmIJpzvIFaUwqrFVDcp7MQPFdSqQlxAA9aY0QUqe7xw5mQo8saflZ3uGUpvNdxVfh1DEliHWmuOyGSan9GrXY4hdSW19Q1yswJ9Ika1zi28P5DZOZCZnjp2Pjh5lhr71+YAxSvHFEgZx20UqGVdoWGAXGFo0Zp5sD0YnOXX+uMi71TY3nTh2PYy0HZCaYMsm0umrC2cYuWYpStwWlksgPNBC9CKJ9UDqGDFQAv7GrFb6N/aqD0hEtl9pX9VYvQLViroR5KZqFXmlVEXmyDNJWS0wkT1aiqPD6fZPynIsEznoYDqdG7Q7qqcs2DPKzOVG7EyHhSj25n0Zyw62PJvcwH2vzz1PN3czSrifwHlaZfUbThuMFNzxPyj1GVeE/rHWRr2guaz1e6wu0foSmhPTL3DwiuqFshVDu/D4aPSPjz/FIK1n9dwQOfu3gk7pL9k4jK+M5lk0LBRy9CB7nn2yD+cStfuFQQ5+riK9kJQ3JV9cbCmuh1n6HF3h5LleimS7GkoynWVL5+KWS6h/AFBLBwgvDHpj+wEAAC8FAABQSwECLQMUAAgACACdu6lUVYIEHGwBAADRAwAAIgAAAAAAAAAAACAApIEAAAAAZXhhbXBsZXMvdG9kb3MvYWR2YW5jZWQuam91cm5leS50c1BLAQItAxQACAAIAJ27qVQvDHpj+wEAAC8FAAAZAAAAAAAAAAAAIACkgbwBAABleGFtcGxlcy90b2Rvcy9oZWxwZXJzLnRzUEsFBgAAAAACAAIAlwAAAP4DAAAAAA==", + } + psrc, err := dummyPSource(cfg) + require.NoError(t, err) + defer psrc.Close() + + err = psrc.Fetch() + require.NoError(t, err) + wdir := psrc.Workdir() + err = psrc.Fetch() + require.NoError(t, err) + wdirNext := psrc.Workdir() + require.Equal(t, wdir, wdirNext) +} + func validateFileContents(t *testing.T, dir string) { expected := []string{ "examples/todos/helpers.ts", @@ -73,6 +91,9 @@ func validateFileContents(t *testing.T, dir string) { } func fetchAndValidate(t *testing.T, psrc *ProjectSource) { + defer func() { + _ = psrc.Close() + }() err := psrc.Fetch() require.NoError(t, err) diff --git a/x-pack/heartbeat/monitors/browser/sourcejob.go b/x-pack/heartbeat/monitors/browser/sourcejob.go new file mode 100644 index 00000000000..d8ca78b23e3 --- /dev/null +++ b/x-pack/heartbeat/monitors/browser/sourcejob.go @@ -0,0 +1,256 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. +//go:build linux || darwin + +package browser + +import ( + "context" + "encoding/json" + "fmt" + "math" + "strings" + "time" + + "github.com/elastic/beats/v7/heartbeat/monitors/jobs" + "github.com/elastic/beats/v7/heartbeat/monitors/plugin" + "github.com/elastic/beats/v7/heartbeat/monitors/stdfields" + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/x-pack/heartbeat/monitors/browser/synthexec" + "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" +) + +type SourceJob struct { + rawCfg *config.C + browserCfg *Config + ctx context.Context + cancel context.CancelFunc +} + +func NewSourceJob(rawCfg *config.C) (*SourceJob, error) { + // Global browser context to cancel all jobs + // on close + ctx, cancel := context.WithCancel(context.Background()) + + s := &SourceJob{ + rawCfg: rawCfg, + browserCfg: DefaultConfig(), + ctx: ctx, + cancel: cancel, + } + err := rawCfg.Unpack(s.browserCfg) + if err != nil { + return nil, ErrBadConfig(err) + } + + return s, nil +} + +func ErrBadConfig(err error) error { + return fmt.Errorf("could not parse browser config: %w", err) +} + +func (sj *SourceJob) String() string { + panic("implement me") +} + +func (sj *SourceJob) Fetch() error { + return sj.browserCfg.Source.Active().Fetch() +} + +func (sj *SourceJob) Workdir() string { + return sj.browserCfg.Source.Active().Workdir() +} + +func (sj *SourceJob) Params() map[string]interface{} { + return sj.browserCfg.Params +} + +func (sj *SourceJob) FilterJourneys() synthexec.FilterJourneyConfig { + return sj.browserCfg.FilterJourneys +} + +func (sj *SourceJob) StdFields() stdfields.StdMonitorFields { + sFields, err := stdfields.ConfigToStdMonitorFields(sj.rawCfg) + // Should be impossible since outer monitor.go should run this same code elsewhere + // TODO: Just pass stdfields in to remove second deserialize + if err != nil { + logp.L().Warnf("Could not deserialize monitor fields for browser, this should never happen: %s", err) + } + return sFields +} + +func (sj *SourceJob) Close() error { + if sj.browserCfg.Source.ActiveMemo != nil { + sj.browserCfg.Source.ActiveMemo.Close() + } + + // Cancel running jobs ctxs + sj.cancel() + + return nil +} + +// Dev flags + expected number of params, math.MaxInt32 for variadic flags +var filterMap = map[string]int{ + "--dry-run": 0, + "-h": 0, + "--help": 0, + "--inline": 1, + "--match": math.MaxInt32, + "--outfd": 1, + "--pause-on-error": 0, + "--quiet-exit-code": 0, + "-r": math.MaxInt32, + "--require": math.MaxInt32, + "--reporter": 1, + "--tags": math.MaxInt32, + "-V": 0, + "--version": 0, + "--ws-endpoint": 1, +} + +func (sj *SourceJob) extraArgs(uiOrigin bool) []string { + extraArgs := []string{} + + if uiOrigin { + extraArgs = filterDevFlags(sj.browserCfg.SyntheticsArgs, filterMap) + } else { + extraArgs = append(extraArgs, sj.browserCfg.SyntheticsArgs...) + } + + if len(sj.browserCfg.PlaywrightOpts) > 0 { + s, err := json.Marshal(sj.browserCfg.PlaywrightOpts) + if err != nil { + // This should never happen, if it was parsed as a config it should be serializable + logp.L().Warn("could not serialize playwright options '%v': %w", sj.browserCfg.PlaywrightOpts, err) + } else { + extraArgs = append(extraArgs, "--playwright-options", string(s)) + } + } + if sj.browserCfg.IgnoreHTTPSErrors { + extraArgs = append(extraArgs, "--ignore-https-errors") + } + if sj.browserCfg.Sandbox { + extraArgs = append(extraArgs, "--sandbox") + } + if sj.browserCfg.Screenshots != "" { + extraArgs = append(extraArgs, "--screenshots", sj.browserCfg.Screenshots) + } + if sj.browserCfg.Throttling != nil { + switch t := sj.browserCfg.Throttling.(type) { + case bool: + if !t { + extraArgs = append(extraArgs, "--no-throttling") + } + case string: + extraArgs = append(extraArgs, "--throttling", fmt.Sprintf("%v", sj.browserCfg.Throttling)) + case map[string]interface{}: + j, err := json.Marshal(t) + if err != nil { + logp.L().Warnf("could not serialize throttling config to JSON: %s", err) + } else { + extraArgs = append(extraArgs, "--throttling", string(j)) + } + } + } + + return extraArgs +} + +func (sj *SourceJob) jobs() []jobs.Job { + var j jobs.Job + + isScript := sj.browserCfg.Source.Inline != nil + ctx := context.WithValue(sj.ctx, synthexec.SynthexecTimeout, sj.browserCfg.Timeout+30*time.Second) + sFields := sj.StdFields() + + if isScript { + src := sj.browserCfg.Source.Inline.Script + j = synthexec.InlineJourneyJob(ctx, src, sj.Params(), sFields, sj.extraArgs(sFields.Origin != "")...) + } else { + j = func(event *beat.Event) ([]jobs.Job, error) { + err := sj.Fetch() + if err != nil { + return nil, fmt.Errorf("could not fetch for browser source job: %w", err) + } + + sj, err := synthexec.ProjectJob(ctx, sj.Workdir(), sj.Params(), sj.FilterJourneys(), sFields, sj.extraArgs(sFields.Origin != "")...) + if err != nil { + return nil, err + } + return sj(event) + } + } + return []jobs.Job{j} +} + +func (sj *SourceJob) plugin() plugin.Plugin { + return plugin.Plugin{ + Jobs: sj.jobs(), + DoClose: sj.Close, + Endpoints: 1, + } +} + +type argsIterator struct { + i int + args []string + val string +} + +func (a *argsIterator) Next() bool { + if a.i >= len(a.args) { + return false + } + a.val = a.args[a.i] + a.i++ + return true +} + +func (a *argsIterator) Val() string { + return a.val +} + +func (a *argsIterator) Peek() (val string, ok bool) { + if a.i >= len(a.args) { + return "", false + } + + val = a.args[a.i] + ok = true + + return val, ok +} + +// Iterate through list and filter dev flags + potential params +func filterDevFlags(args []string, filter map[string]int) []string { + result := []string{} + + iter := argsIterator{i: 0, args: args} + for { + next := iter.Next() + + if !next { + break + } + + if pCount, ok := filter[iter.Val()]; ok { + ParamsIter: + for i := 0; i < pCount; i++ { + // Found filtered flag, check if it has associated params + if param, ok := iter.Peek(); ok && !strings.HasPrefix(param, "-") { + iter.Next() + } else { + break ParamsIter + } + } + } else { + result = append(result, iter.Val()) + } + } + + return result +} diff --git a/x-pack/heartbeat/monitors/browser/project_test.go b/x-pack/heartbeat/monitors/browser/sourcejob_test.go similarity index 57% rename from x-pack/heartbeat/monitors/browser/project_test.go rename to x-pack/heartbeat/monitors/browser/sourcejob_test.go index 7c9ecec2e46..69cd4f7ffa4 100644 --- a/x-pack/heartbeat/monitors/browser/project_test.go +++ b/x-pack/heartbeat/monitors/browser/sourcejob_test.go @@ -7,6 +7,7 @@ package browser import ( "encoding/json" + "fmt" "path" "path/filepath" "reflect" @@ -44,7 +45,7 @@ func TestValidLocal(t *testing.T) { }, "timeout": timeout, }) - _, e := NewProject(cfg) + _, e := NewSourceJob(cfg) require.Error(t, e) } @@ -66,10 +67,10 @@ func TestValidInline(t *testing.T) { }, "timeout": timeout, }) - s, e := NewProject(cfg) + s, e := NewSourceJob(cfg) require.NoError(t, e) require.NotNil(t, s) - require.Equal(t, script, s.projectCfg.Source.Inline.Script) + require.Equal(t, script, s.browserCfg.Source.Inline.Script) require.Equal(t, "", s.Workdir()) require.Equal(t, testParams, s.Params()) @@ -86,7 +87,7 @@ func TestNameRequired(t *testing.T) { }, }, }) - _, e := NewProject(cfg) + _, e := NewSourceJob(cfg) require.Regexp(t, ErrNameRequired, e) } @@ -99,7 +100,7 @@ func TestIDRequired(t *testing.T) { }, }, }) - _, e := NewProject(cfg) + _, e := NewSourceJob(cfg) require.Regexp(t, ErrIdRequired, e) } @@ -107,7 +108,7 @@ func TestEmptySource(t *testing.T) { cfg := conf.MustNewConfigFrom(mapstr.M{ "source": mapstr.M{}, }) - s, e := NewProject(cfg) + s, e := NewSourceJob(cfg) require.Regexp(t, ErrBadConfig(source.ErrInvalidSource), e) require.Nil(t, s) @@ -126,36 +127,43 @@ func TestExtraArgs(t *testing.T) { name string cfg *Config want []string + ui bool }{ { "no args", &Config{}, - nil, + []string{}, + false, }, { "default", DefaultConfig(), []string{"--screenshots", "on"}, + false, }, { "sandbox", &Config{Sandbox: true}, []string{"--sandbox"}, + false, }, { "throttling truthy", &Config{Throttling: true}, - nil, + []string{}, + false, }, { "disable throttling", &Config{Throttling: false}, []string{"--no-throttling"}, + false, }, { "override throttling - text format", &Config{Throttling: "10d/3u/20l"}, []string{"--throttling", "10d/3u/20l"}, + false, }, { "override throttling - JSON format", @@ -165,21 +173,25 @@ func TestExtraArgs(t *testing.T) { "latency": 20, }}, []string{"--throttling", `{"download":10,"latency":20,"upload":3}`}, + false, }, { "ignore_https_errors", &Config{IgnoreHTTPSErrors: true}, []string{"--ignore-https-errors"}, + false, }, { "screenshots", &Config{Screenshots: "off"}, []string{"--screenshots", "off"}, + false, }, { "capabilities", &Config{SyntheticsArgs: []string{"--capability", "trace", "ssblocks"}}, []string{"--capability", "trace", "ssblocks"}, + false, }, { "playwright options", @@ -187,19 +199,39 @@ func TestExtraArgs(t *testing.T) { PlaywrightOpts: playWrightOpts, }, []string{"--playwright-options", string(playwrightOptsJsonBytes)}, + false, }, { "kitchen sink", &Config{SyntheticsArgs: []string{"--capability", "trace", "ssblocks"}, Sandbox: true}, []string{"--capability", "trace", "ssblocks", "--sandbox"}, + false, + }, + { + "does not filter dev flags on non-ui origin", + &Config{SyntheticsArgs: []string{"--pause-on-error", "--dry-run", "--quiet-exit-code", "--outfd", "testfd"}, Sandbox: true}, + []string{"--pause-on-error", "--dry-run", "--quiet-exit-code", "--outfd", "testfd", "--sandbox"}, + false, + }, + { + "filters dev flags on ui origin", + &Config{SyntheticsArgs: []string{"--pause-on-error", "--dry-run", "--quiet-exit-code", "--outfd", "testfd"}, Sandbox: true}, + []string{"--sandbox"}, + true, + }, + { + "filters variadic dev flags on ui origin", + &Config{SyntheticsArgs: []string{"--tags", "tag1", "tag2", "tag3", "--match", "tag4", "tag5", "--sandbox", "-r", "require1", "require2", "--require", "require3", "require4", "require5"}}, + []string{"--sandbox"}, + true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - s := &Project{ - projectCfg: tt.cfg, + s := &SourceJob{ + browserCfg: tt.cfg, } - if got := s.extraArgs(); !reflect.DeepEqual(got, tt.want) { + if got := s.extraArgs(tt.ui); !reflect.DeepEqual(got, tt.want) { t.Errorf("Project.extraArgs() = %v, want %v", got, tt.want) } }) @@ -217,9 +249,109 @@ func TestEmptyTimeout(t *testing.T) { }, }, }) - s, e := NewProject(cfg) + s, e := NewSourceJob(cfg) require.NoError(t, e) require.NotNil(t, s) - require.Equal(t, s.projectCfg.Timeout, defaults.Timeout) + require.Equal(t, s.browserCfg.Timeout, defaults.Timeout) +} + +func TestFilterDevFlags(t *testing.T) { + allFlags := []string{} + for k := range filterMap { + allFlags = append(allFlags, k) + } + + variadicGen := func(flag string, n int) []string { + params := []string{"dummy"} + params = append(params, flag) + for i := 0; i < n; i++ { + params = append(params, fmt.Sprintf("flag-%d", i)) + } + + return params + } + tests := []struct { + name string + syntheticsArgs []string + want []string + }{ + { + "no args", + nil, + []string{}, + }, + { + "no args", + []string{}, + []string{}, + }, + { + "all filtered", + allFlags, + []string{}, + }, + { + "keep unfiltered", + append([]string{"unfiltered"}, allFlags...), + []string{"unfiltered"}, + }, + { + "filter associated params", + []string{"--help", "malformed1", "--outfd", "param1", "malformed2", "--reporter", "-malformed3"}, + []string{"malformed1", "malformed2", "-malformed3"}, + }, + { + "filter variadic flags - tags - 10", + variadicGen("--tags", 10), + []string{"dummy"}, + }, + { + "filter variadic flags - tags - 50", + variadicGen("--tags", 50), + []string{"dummy"}, + }, + { + "filter variadic flags - tags - 100", + variadicGen("--tags", 100), + []string{"dummy"}, + }, + { + "filter variadic flags - require - 10", + variadicGen("--require", 10), + []string{"dummy"}, + }, + { + "filter variadic flags - require - 50", + variadicGen("--require", 50), + []string{"dummy"}, + }, + { + "filter variadic flags - require - 100", + variadicGen("-r", 100), + []string{"dummy"}, + }, + { + "filter variadic flags - match - 10", + variadicGen("--match", 10), + []string{"dummy"}, + }, + { + "filter variadic flags - match - 50", + variadicGen("--match", 50), + []string{"dummy"}, + }, + { + "filter variadic flags - match - 100", + variadicGen("--match", 100), + []string{"dummy"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := filterDevFlags(tt.syntheticsArgs, filterMap); !reflect.DeepEqual(got, tt.want) { + t.Errorf("syntheticsArgs = %v, want %v", got, tt.want) + } + }) + } } diff --git a/x-pack/heartbeat/scenarios/framework/framework.go b/x-pack/heartbeat/scenarios/framework/framework.go index 3bde2eed0f7..1d43f5d3e78 100644 --- a/x-pack/heartbeat/scenarios/framework/framework.go +++ b/x-pack/heartbeat/scenarios/framework/framework.go @@ -26,7 +26,6 @@ import ( "github.com/elastic/beats/v7/heartbeat/monitors" "github.com/elastic/beats/v7/heartbeat/scheduler" "github.com/elastic/beats/v7/libbeat/beat" - "github.com/elastic/beats/v7/libbeat/publisher/pipeline" beatversion "github.com/elastic/beats/v7/libbeat/version" ) @@ -271,9 +270,8 @@ func setupFactoryAndSched(location *hbconfig.LocationWithID, stateLoader monitor AddTask: sched.Add, StateLoader: stateLoader, PluginsReg: plugin.GlobalPluginsReg, - PipelineClientFactory: func(pipeline beat.Pipeline) (pipeline.ISyncClient, error) { - c, _ := pipeline.Connect() - return monitors.SyncPipelineClientAdaptor{C: c}, nil + PipelineClientFactory: func(pipeline beat.Pipeline) (beat.Client, error) { + return pipeline.Connect() }, BeatRunFrom: location, }), diff --git a/x-pack/libbeat/autodiscover/providers/aws/ec2/watch.go b/x-pack/libbeat/autodiscover/providers/aws/ec2/watch.go index 967833895bd..decc8159868 100644 --- a/x-pack/libbeat/autodiscover/providers/aws/ec2/watch.go +++ b/x-pack/libbeat/autodiscover/providers/aws/ec2/watch.go @@ -6,10 +6,9 @@ package ec2 import ( "context" + "fmt" "time" - "github.com/pkg/errors" - awsauto "github.com/elastic/beats/v7/x-pack/libbeat/autodiscover/providers/aws" "github.com/elastic/elastic-agent-libs/logp" ) @@ -61,7 +60,7 @@ func (w *watcher) forever() { case <-w.ticker.C: err := w.once() if err != nil { - logp.Error(errors.Wrap(err, "error while fetching AWS EC2s")) + logp.Error(fmt.Errorf("error while fetching AWS EC2s: %w", err)) } } } diff --git a/x-pack/libbeat/autodiscover/providers/aws/elb/provider_test.go b/x-pack/libbeat/autodiscover/providers/aws/elb/provider_test.go index 9d050765781..688323e26f2 100644 --- a/x-pack/libbeat/autodiscover/providers/aws/elb/provider_test.go +++ b/x-pack/libbeat/autodiscover/providers/aws/elb/provider_test.go @@ -5,12 +5,12 @@ package elb import ( + "errors" "sync" "testing" "time" "github.com/gofrs/uuid" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/x-pack/libbeat/common/cloudfoundry/doer.go b/x-pack/libbeat/common/cloudfoundry/doer.go index 21bd7edaeb8..f1e3218b71b 100644 --- a/x-pack/libbeat/common/cloudfoundry/doer.go +++ b/x-pack/libbeat/common/cloudfoundry/doer.go @@ -12,8 +12,6 @@ import ( "strconv" "strings" - "github.com/pkg/errors" - "github.com/elastic/elastic-agent-libs/logp" ) @@ -44,7 +42,7 @@ func (d *authTokenDoer) Do(r *http.Request) (*http.Response, error) { // The reason for writing an error here is that pushing the error upstream // is handled by loggregate library, which is beyond our reach. d.log.Errorf("error creating UAA Auth Token: %+v", err) - return nil, errors.Wrap(err, "error retrieving UUA token") + return nil, fmt.Errorf("error retrieving UUA token: %w", err) } r.Header.Set("Authorization", t) return d.httpClient.Do(r) diff --git a/x-pack/libbeat/common/cloudfoundry/hub.go b/x-pack/libbeat/common/cloudfoundry/hub.go index e6ab4ce76ca..07f1000e533 100644 --- a/x-pack/libbeat/common/cloudfoundry/hub.go +++ b/x-pack/libbeat/common/cloudfoundry/hub.go @@ -5,11 +5,11 @@ package cloudfoundry import ( + "fmt" "net/http" "strings" "github.com/cloudfoundry-community/go-cfclient" - "github.com/pkg/errors" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/transport/httpcommon" @@ -68,7 +68,7 @@ func (h *Hub) Client() (*cfclient.Client, error) { UserAgent: h.userAgent, }) if err != nil { - return nil, errors.Wrap(err, "error creating cloudfoundry client") + return nil, fmt.Errorf("error creating cloudfoundry client: %w", err) } if h.cfg.DopplerAddress != "" { cf.Endpoint.DopplerEndpoint = h.cfg.DopplerAddress @@ -131,7 +131,7 @@ func (h *Hub) DopplerConsumerFromClient(client *cfclient.Client, callbacks Doppl } tlsConfig, err := tlscommon.LoadTLSConfig(h.cfg.Transport.TLS) if err != nil { - return nil, errors.Wrap(err, "loading tls config") + return nil, fmt.Errorf("loading tls config: %w", err) } proxy := h.cfg.Transport.Proxy.ProxyFunc() diff --git a/x-pack/libbeat/management/input_reload_test.go b/x-pack/libbeat/management/input_reload_test.go index 972c42908f1..61ed315dc7a 100644 --- a/x-pack/libbeat/management/input_reload_test.go +++ b/x-pack/libbeat/management/input_reload_test.go @@ -17,6 +17,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/reload" + "github.com/elastic/beats/v7/libbeat/tests/integration" "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent-client/v7/pkg/proto" ) @@ -51,7 +52,7 @@ func TestInputReload(t *testing.T) { configIdx = currentIdx } - srv := mockSrv([][]*proto.UnitExpected{ + srv := integration.NewMockServer([][]*proto.UnitExpected{ { { Id: "output-unit", diff --git a/x-pack/libbeat/management/managerV2.go b/x-pack/libbeat/management/managerV2.go index 55a6f57fe23..105ad98bca4 100644 --- a/x-pack/libbeat/management/managerV2.go +++ b/x-pack/libbeat/management/managerV2.go @@ -34,6 +34,8 @@ import ( "github.com/elastic/beats/v7/libbeat/version" ) +var errStoppingOnOutputChange = errors.New("stopping Beat on output change") + // diagnosticHandler is a wrapper type that's a bit of a hack, the compiler won't let us send the raw unit struct, // since there's a type disagreement with the `client.DiagnosticHook` argument, and due to licensing issues we can't import the agent client types into the reloader type diagnosticHandler struct { @@ -437,7 +439,11 @@ func (cm *BeatV2Manager) watchErrChan(ctx context.Context) { case <-ctx.Done(): return case err := <-cm.client.Errors(): - cm.logger.Errorf("elastic-agent-client error: %s", err) + // Don't print the context canceled errors that happen normally during shutdown, restart, etc + if !errors.Is(context.Canceled, err) { + cm.logger.Errorf("elastic-agent-client error: %s", err) + } + } } } @@ -589,7 +595,15 @@ func (cm *BeatV2Manager) reload(units map[unitKey]*client.Unit) { publisher.SetUnderAgentTrace(trace) // reload the output configuration - if err := cm.reloadOutput(outputUnit); err != nil { + restartBeat, err := cm.reloadOutput(outputUnit) + // The manager has already signalled the Beat to stop, + // there is nothing else to do. Trying to reload inputs + // will only lead to invalid state updates and possible + // race conditions. + if restartBeat { + return + } + if err != nil { // Output creation failed, there is no point in going any further // because there is no output read the events. // @@ -650,34 +664,40 @@ func (cm *BeatV2Manager) reload(units map[unitKey]*client.Unit) { } } -func (cm *BeatV2Manager) reloadOutput(unit *client.Unit) error { +// reloadOutput reload outputs, it returns a bool and an error. +// The bool, if set, indicates that the output reload requires an restart, +// in that case the error is always `nil`. +// +// In any other case, the bool is always false and the error will be non nil +// if any error has occurred. +func (cm *BeatV2Manager) reloadOutput(unit *client.Unit) (bool, error) { // Assuming that the output reloadable isn't a list, see createBeater() in cmd/instance/beat.go output := cm.registry.GetReloadableOutput() if output == nil { - return fmt.Errorf("failed to find beat reloadable type 'output'") + return false, fmt.Errorf("failed to find beat reloadable type 'output'") } if unit == nil { // output is being stopped err := output.Reload(nil) if err != nil { - return fmt.Errorf("failed to reload output: %w", err) + return false, fmt.Errorf("failed to reload output: %w", err) } cm.lastOutputCfg = nil cm.lastBeatOutputCfg = nil - return nil + return false, nil } expected := unit.Expected() if expected.Config == nil { // should not happen; hard stop - return fmt.Errorf("output unit has no config") + return false, fmt.Errorf("output unit has no config") } if cm.lastOutputCfg != nil && gproto.Equal(cm.lastOutputCfg, expected.Config) { // configuration for the output did not change; do nothing cm.logger.Debug("Skipped reloading output; configuration didn't change") - return nil + return false, nil } cm.logger.Debugf("Got output unit config '%s'", expected.Config.GetId()) @@ -686,21 +706,25 @@ func (cm *BeatV2Manager) reloadOutput(unit *client.Unit) error { cm.logger.Info("beat is restarting because output changed") _ = unit.UpdateState(client.UnitStateStopping, "Restarting", nil) cm.Stop() - return nil + return true, nil } reloadConfig, err := groupByOutputs(expected.Config) if err != nil { - return fmt.Errorf("failed to generate config for output: %w", err) + return false, fmt.Errorf("failed to generate config for output: %w", err) } + // Set those variables regardless of the outcome of output.Reload + // this ensures that if we're on a failed output state and a new + // output configuration is sent, the Beat will gracefully exit + cm.lastOutputCfg = expected.Config + cm.lastBeatOutputCfg = reloadConfig + err = output.Reload(reloadConfig) if err != nil { - return fmt.Errorf("failed to reload output: %w", err) + return false, fmt.Errorf("failed to reload output: %w", err) } - cm.lastOutputCfg = expected.Config - cm.lastBeatOutputCfg = reloadConfig - return nil + return false, nil } func (cm *BeatV2Manager) reloadInputs(inputUnits []*client.Unit) error { diff --git a/x-pack/libbeat/management/managerV2_test.go b/x-pack/libbeat/management/managerV2_test.go index aa84bb9f8f0..65e240ec21c 100644 --- a/x-pack/libbeat/management/managerV2_test.go +++ b/x-pack/libbeat/management/managerV2_test.go @@ -23,6 +23,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common/reload" "github.com/elastic/beats/v7/libbeat/features" + "github.com/elastic/beats/v7/libbeat/tests/integration" ) func TestManagerV2(t *testing.T) { @@ -74,7 +75,7 @@ func TestManagerV2(t *testing.T) { t.Logf("FQDN feature flag set to %v", fqdnEnabled) } - srv := mockSrv([][]*proto.UnitExpected{ + srv := integration.NewMockServer([][]*proto.UnitExpected{ { { Id: "output-unit", @@ -99,7 +100,7 @@ func TestManagerV2(t *testing.T) { Streams: []*proto.Stream{ { Id: "system/metrics-system.filesystem-default-system-1", - Source: requireNewStruct(t, map[string]interface{}{ + Source: integration.RequireNewStruct(t, map[string]interface{}{ "metricsets": []interface{}{"filesystem"}, "period": "1m", }), @@ -120,14 +121,14 @@ func TestManagerV2(t *testing.T) { Streams: []*proto.Stream{ { Id: "system/metrics-system.filesystem-default-system-2", - Source: requireNewStruct(t, map[string]interface{}{ + Source: integration.RequireNewStruct(t, map[string]interface{}{ "metricsets": []interface{}{"filesystem"}, "period": "1m", }), }, { Id: "system/metrics-system.filesystem-default-system-3", - Source: requireNewStruct(t, map[string]interface{}{ + Source: integration.RequireNewStruct(t, map[string]interface{}{ "metricsets": []interface{}{"filesystem"}, "period": "1m", }), @@ -253,7 +254,7 @@ func TestOutputError(t *testing.T) { Id: "default", Type: "mock", Name: "mock", - Source: requireNewStruct(t, + Source: integration.RequireNewStruct(t, map[string]interface{}{ "Is": "this", "required?": "Yes!", @@ -280,7 +281,7 @@ func TestOutputError(t *testing.T) { Id: "default", Type: "mock", Name: "mock", - Source: requireNewStruct(t, + Source: integration.RequireNewStruct(t, map[string]interface{}{ "this": "is", "required": true, @@ -348,59 +349,6 @@ func TestOutputError(t *testing.T) { }, 10*time.Second, 100*time.Millisecond, "desired state, output failed, was not reached") } -func mockSrv( - units [][]*proto.UnitExpected, - featuresIdxs []uint64, - features []*proto.Features, - observedCallback func(*proto.CheckinObserved, int), - delay time.Duration, -) *mock.StubServerV2 { - i := 0 - agentInfo := &proto.CheckinAgentInfo{ - Id: "elastic-agent-id", - Version: "8.6.0", - Snapshot: true, - } - return &mock.StubServerV2{ - CheckinV2Impl: func(observed *proto.CheckinObserved) *proto.CheckinExpected { - if observedCallback != nil { - observedCallback(observed, i) - } - matches := DoesStateMatch(observed, units[i], featuresIdxs[i]) - if !matches { - // send same set of units and features - return &proto.CheckinExpected{ - AgentInfo: agentInfo, - Units: units[i], - Features: features[i], - FeaturesIdx: featuresIdxs[i], - } - } - // delay sending next expected based on delay - if delay > 0 { - <-time.After(delay) - } - // send next set of units and features - i += 1 - if i >= len(units) { - // stay on last index - i = len(units) - 1 - } - return &proto.CheckinExpected{ - AgentInfo: agentInfo, - Units: units[i], - Features: features[i], - FeaturesIdx: featuresIdxs[i], - } - }, - ActionImpl: func(response *proto.ActionResponse) error { - // actions not tested here - return nil - }, - ActionsChan: make(chan *mock.PerformAction, 100), - } -} - type reloadable struct { mx sync.Mutex config *reload.ConfigWithMeta diff --git a/x-pack/metricbeat/docker-compose.yml b/x-pack/metricbeat/docker-compose.yml index 9f7732ff37c..d8631f2053e 100644 --- a/x-pack/metricbeat/docker-compose.yml +++ b/x-pack/metricbeat/docker-compose.yml @@ -24,11 +24,11 @@ services: kibana: # Copied configuration from OSS metricbeat because services with depends_on # cannot be extended with extends - image: docker.elastic.co/integrations-ci/beats-kibana:${KIBANA_VERSION:-8.7.0}-1 + image: docker.elastic.co/integrations-ci/beats-kibana:${KIBANA_VERSION:-8.8.1}-1 build: context: ../../metricbeat/module/kibana/_meta args: - KIBANA_VERSION: ${KIBANA_VERSION:-8.7.0} + KIBANA_VERSION: ${KIBANA_VERSION:-8.8.1} depends_on: - elasticsearch ports: diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml index a1ba8091da1..3d3fa585ff0 100644 --- a/x-pack/metricbeat/metricbeat.reference.yml +++ b/x-pack/metricbeat/metricbeat.reference.yml @@ -1571,10 +1571,10 @@ metricbeat.modules: # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. +# If this option is not defined, the hostname is used. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. Tags make it easy to group servers by different # logical properties. #tags: ["service-X", "web-tier"] @@ -1586,7 +1586,7 @@ metricbeat.modules: # env: staging # If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields +# fields in the output document instead of being grouped under a field # sub-dictionary. Default is false. #fields_under_root: false @@ -1598,7 +1598,7 @@ metricbeat.modules: #queue: # Queue type by name (default 'mem') # The memory queue will present all available events (up to the outputs - # bulk_max_size) to the output, the moment the output is ready to server + # bulk_max_size) to the output, the moment the output is ready to serve # another batch of events. #mem: # Max number of events the queue can buffer. @@ -1650,7 +1650,7 @@ metricbeat.modules: # length of its retry interval each time, up to this maximum. #max_retry_interval: 30s -# Sets the maximum number of CPUs that can be executing simultaneously. The +# Sets the maximum number of CPUs that can be executed simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: @@ -1771,7 +1771,7 @@ metricbeat.modules: # ignore_missing: false # fail_on_error: true # -# The following example copies the value of message to message_copied +# The following example copies the value of the message to message_copied # #processors: # - copy_fields: @@ -1781,7 +1781,7 @@ metricbeat.modules: # fail_on_error: true # ignore_missing: false # -# The following example truncates the value of message to 1024 bytes +# The following example truncates the value of the message to 1024 bytes # #processors: # - truncate_fields: @@ -1878,7 +1878,7 @@ output.elasticsearch: # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. #index: "metricbeat-%{[agent.version]}" - # Optional ingest pipeline. By default no pipeline will be used. + # Optional ingest pipeline. By default, no pipeline will be used. #pipeline: "" # Optional HTTP path @@ -2609,14 +2609,14 @@ output.elasticsearch: # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards are disabled by default and can be enabled either by setting the -# options here, or by using the `-setup` CLI flag or the `setup` command. +# options here or by using the `-setup` CLI flag or the `setup` command. #setup.dashboards.enabled: false # The directory from where to read the dashboards. The default is the `kibana` # folder in the home path. #setup.dashboards.directory: ${path.home}/kibana -# The URL from where to download the dashboards archive. It is used instead of +# The URL from where to download the dashboard archive. It is used instead of # the directory if it has a value. #setup.dashboards.url: @@ -2713,7 +2713,7 @@ setup.template.settings: # Configure index lifecycle management (ILM) to manage the backing indices # of your data streams. -# Enable ILM support. Valid values are true, false. +# Enable ILM support. Valid values are true, or false. #setup.ilm.enabled: true # Set the lifecycle policy name. The default policy name is @@ -2868,25 +2868,25 @@ logging.files: # The name of the files where the logs are written to. #name: metricbeat - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. #rotateeverybytes: 10485760 # = 10MB - # Number of rotated log files to keep. Oldest files will be deleted first. + # Number of rotated log files to keep. The oldest files will be deleted first. #keepfiles: 7 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. #permissions: 0600 - # Enable log file rotation on time intervals in addition to size-based rotation. + # Enable log file rotation on time intervals in addition to the size-based rotation. # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h # are boundary-aligned with minutes, hours, days, weeks, months, and years as # reported by the local system clock. All other intervals are calculated from the # Unix epoch. Defaults to disabled. #interval: 0 - # Rotate existing logs on startup rather than appending to the existing + # Rotate existing logs on startup rather than appending them to the existing # file. Defaults to true. # rotateonstartup: true @@ -2914,7 +2914,7 @@ logging.files: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # In case you specify an additional path, the scheme is required: http://localhost:9200/path # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 #hosts: ["localhost:9200"] @@ -2961,7 +2961,7 @@ logging.files: # Elasticsearch after a network error. The default is 60s. #backoff.max: 60s - # Configure HTTP request timeout before failing an request to Elasticsearch. + # Configure HTTP request timeout before failing a request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. @@ -3058,15 +3058,15 @@ logging.files: # =============================== HTTP Endpoint ================================ -# Each beat can expose internal metrics through a HTTP endpoint. For security +# Each beat can expose internal metrics through an HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. -# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# Stats can be accessed through http://localhost:5066/stats. For pretty JSON output # append ?pretty to the URL. # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# The HTTP endpoint will bind to this hostname, IP address, unix socket, or named pipe. # When using IP addresses, it is recommended to only use localhost. #http.host: localhost @@ -3076,7 +3076,7 @@ logging.files: # Define which user should be owning the named pipe. #http.named_pipe.user: -# Define which the permissions that should be applied to the named pipe, use the Security +# Define which permissions should be applied to the named pipe, use the Security # Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with # `http.user`. #http.named_pipe.security_descriptor: diff --git a/x-pack/metricbeat/metricbeat.yml b/x-pack/metricbeat/metricbeat.yml index df17ac01dbe..8b974c6c020 100644 --- a/x-pack/metricbeat/metricbeat.yml +++ b/x-pack/metricbeat/metricbeat.yml @@ -33,7 +33,7 @@ setup.template.settings: # all the transactions sent by a single shipper in the web interface. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. #tags: ["service-X", "web-tier"] @@ -48,8 +48,8 @@ setup.template.settings: # options here or by using the `setup` command. #setup.dashboards.enabled: false -# The URL from where to download the dashboards archive. By default this URL -# has a value which is computed based on the Beat name and version. For released +# The URL from where to download the dashboard archive. By default, this URL +# has a value that is computed based on the Beat name and version. For released # versions, this URL points to the dashboard archive on the artifacts.elastic.co # website. #setup.dashboards.url: @@ -134,7 +134,7 @@ processors: #logging.level: debug # At debug level, you can selectively enable logging only for some components. -# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# To enable all selectors, use ["*"]. Examples of other selectors are "beat", # "publisher", "service". #logging.selectors: ["*"] @@ -152,7 +152,7 @@ processors: #monitoring.cluster_uuid: # Uncomment to send the metrics to Elasticsearch. Most settings from the -# Elasticsearch output are accepted here as well. +# Elasticsearch outputs are accepted here as well. # Note that the settings should point to your Elasticsearch *monitoring* cluster. # Any setting that is not set is automatically inherited from the Elasticsearch # output configuration, so if you have the Elasticsearch output configured such diff --git a/x-pack/metricbeat/module/azure/app_insights/app_insights.go b/x-pack/metricbeat/module/azure/app_insights/app_insights.go index 6790810e2a2..637251be54f 100644 --- a/x-pack/metricbeat/module/azure/app_insights/app_insights.go +++ b/x-pack/metricbeat/module/azure/app_insights/app_insights.go @@ -5,10 +5,9 @@ package app_insights import ( + "fmt" "time" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/mb/parse" "github.com/elastic/beats/v7/metricbeat/mb" @@ -56,7 +55,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { } client, err := NewClient(config) if err != nil { - return nil, errors.Wrapf(err, "error initializing the monitor client: module azure - %s metricset", metricsetName) + return nil, fmt.Errorf("error initializing the monitor client: module azure - %s metricset: %w", metricsetName, err) } return &MetricSet{ BaseMetricSet: base, @@ -69,7 +68,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(report mb.ReporterV2) error { results, err := m.client.GetMetricValues() if err != nil { - return errors.Wrap(err, "error retrieving metric values") + return fmt.Errorf("error retrieving metric values: %w", err) } events := EventsMapping(results, m.client.Config.ApplicationId, m.client.Config.Namespace) for _, event := range events { diff --git a/x-pack/metricbeat/module/azure/app_insights/client.go b/x-pack/metricbeat/module/azure/app_insights/client.go index 2efdf57ccea..d2bed8fbf0e 100644 --- a/x-pack/metricbeat/module/azure/app_insights/client.go +++ b/x-pack/metricbeat/module/azure/app_insights/client.go @@ -11,7 +11,6 @@ import ( "github.com/gofrs/uuid" "github.com/Azure/azure-sdk-for-go/services/preview/appinsights/v1/insights" - "github.com/pkg/errors" "github.com/elastic/elastic-agent-libs/logp" ) @@ -62,7 +61,7 @@ func (client *Client) GetMetricValues() (insights.ListMetricsResultsItem, error) } id, err := uuid.NewV4() if err != nil { - return result, errors.Wrap(err, "could not generate identifier in client") + return result, fmt.Errorf("could not generate identifier in client: %w", err) } strId := id.String() bodyMetrics = append(bodyMetrics, insights.MetricsPostBodySchema{ID: &strId, Parameters: &bodyMetric}) @@ -72,7 +71,7 @@ func (client *Client) GetMetricValues() (insights.ListMetricsResultsItem, error) if err == nil { return result, nil } - return result, errors.Wrap(err, "could not retrieve app insights metrics from service") + return result, fmt.Errorf("could not retrieve app insights metrics from service: %w", err) } func calculateTimespan(duration time.Duration) *string { diff --git a/x-pack/metricbeat/module/azure/app_insights/client_test.go b/x-pack/metricbeat/module/azure/app_insights/client_test.go index ca62e9a82d2..4763c28da20 100644 --- a/x-pack/metricbeat/module/azure/app_insights/client_test.go +++ b/x-pack/metricbeat/module/azure/app_insights/client_test.go @@ -5,10 +5,10 @@ package app_insights import ( + "errors" "testing" "github.com/Azure/azure-sdk-for-go/services/preview/appinsights/v1/insights" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" ) diff --git a/x-pack/metricbeat/module/azure/azure.go b/x-pack/metricbeat/module/azure/azure.go index 38db7f7c6d7..aa26a0c5705 100644 --- a/x-pack/metricbeat/module/azure/azure.go +++ b/x-pack/metricbeat/module/azure/azure.go @@ -7,8 +7,6 @@ package azure import ( "fmt" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/mb" ) @@ -41,7 +39,7 @@ func NewMetricSet(base mb.BaseMetricSet) (*MetricSet, error) { var config Config err := base.Module().UnpackConfig(&config) if err != nil { - return nil, errors.Wrap(err, "error unpack raw module config using UnpackConfig") + return nil, fmt.Errorf("error unpack raw module config using UnpackConfig: %w", err) } //validate config based on metricset @@ -49,13 +47,13 @@ func NewMetricSet(base mb.BaseMetricSet) (*MetricSet, error) { case nativeMetricset: // resources must be configured for the monitor metricset if len(config.Resources) == 0 { - return nil, errors.Errorf("no resource options defined: module azure - %s metricset", metricsetName) + return nil, fmt.Errorf("no resource options defined: module azure - %s metricset", metricsetName) } default: // validate config resource options entered, no resource queries allowed for the compute_vm and compute_vm_scaleset metricsets for _, resource := range config.Resources { if resource.Query != "" { - return nil, errors.Errorf("error initializing the monitor client: module azure - %s metricset. No queries allowed, please select one of the allowed options", metricsetName) + return nil, fmt.Errorf("error initializing the monitor client: module azure - %s metricset. No queries allowed, please select one of the allowed options", metricsetName) } } // check for lightweight resources if no groups or ids have been entered, if not a new resource is created to check the entire subscription @@ -77,7 +75,7 @@ func NewMetricSet(base mb.BaseMetricSet) (*MetricSet, error) { // instantiate monitor client monitorClient, err := NewClient(config) if err != nil { - return nil, errors.Wrapf(err, "error initializing the monitor client: module azure - %s metricset", metricsetName) + return nil, fmt.Errorf("error initializing the monitor client: module azure - %s metricset: %w", metricsetName, err) } return &MetricSet{ BaseMetricSet: base, @@ -104,7 +102,7 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { results := m.Client.GetMetricValues(metrics, report) err := EventsMapping(results, m.Client, report) if err != nil { - return errors.Wrap(err, "error running EventsMapping") + return fmt.Errorf("error running EventsMapping: %w", err) } } return nil diff --git a/x-pack/metricbeat/module/azure/billing/billing.go b/x-pack/metricbeat/module/azure/billing/billing.go index a2968d71a6d..4777df5f217 100644 --- a/x-pack/metricbeat/module/azure/billing/billing.go +++ b/x-pack/metricbeat/module/azure/billing/billing.go @@ -127,11 +127,12 @@ func usageIntervalFrom(reference time.Time) (time.Time, time.Time) { // reference time. // // For example, if the reference time is 2007-01-09 09:41:00Z, the forecast period is: +// The forecast data is fetched from current day - 2 and for next 30 days. // -// 2007-01-01T00:00:00Z -> 2007-01-31:59:59Z +// 2007-01-07T00:00:00Z -> 2007-02-05:59:59Z func forecastIntervalFrom(reference time.Time) (time.Time, time.Time) { - referenceUTC := reference.UTC() - beginningOfMonth := time.Date(referenceUTC.Year(), referenceUTC.Month(), 1, 0, 0, 0, 0, time.UTC) - endOfMonth := beginningOfMonth.AddDate(0, 1, 0).Add(-1 * time.Second) - return beginningOfMonth, endOfMonth + referenceUTC := reference.UTC().Truncate(24 * time.Hour).Add((-48) * time.Hour) + forecastStartDate := time.Date(referenceUTC.Year(), referenceUTC.Month(), referenceUTC.Day(), 0, 0, 0, 0, time.UTC) + forecastEndDate := forecastStartDate.AddDate(0, 0, 0).Add(-1*time.Second).AddDate(0, 0, 30) + return forecastStartDate, forecastEndDate } diff --git a/x-pack/metricbeat/module/azure/billing/billing_test.go b/x-pack/metricbeat/module/azure/billing/billing_test.go index 65f4482cc5b..e8dc95119b4 100644 --- a/x-pack/metricbeat/module/azure/billing/billing_test.go +++ b/x-pack/metricbeat/module/azure/billing/billing_test.go @@ -32,9 +32,9 @@ func TestForecastPeriodFrom(t *testing.T) { referenceTime, err := time.Parse("2006-01-02 15:04:05", "2007-01-09 09:41:00") assert.NoError(t, err) - expectedStartTime, err := time.Parse("2006-01-02 15:04:05", "2007-01-01 00:00:00") + expectedStartTime, err := time.Parse("2006-01-02 15:04:05", "2007-01-07 00:00:00") assert.NoError(t, err) - expectedEndTime, err := time.Parse("2006-01-02 15:04:05", "2007-01-31 23:59:59") + expectedEndTime, err := time.Parse("2006-01-02 15:04:05", "2007-02-05 23:59:59") assert.NoError(t, err) actualStartTime, actualEndTime := forecastIntervalFrom(referenceTime) diff --git a/x-pack/metricbeat/module/enterprisesearch/health/data.go b/x-pack/metricbeat/module/enterprisesearch/health/data.go index c2620bff247..318f9fa0598 100644 --- a/x-pack/metricbeat/module/enterprisesearch/health/data.go +++ b/x-pack/metricbeat/module/enterprisesearch/health/data.go @@ -6,9 +6,10 @@ package health import ( "encoding/json" + "errors" + "fmt" "github.com/joeshaw/multierror" - "github.com/pkg/errors" s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" @@ -125,7 +126,7 @@ func eventMapping(report mb.ReporterV2, input []byte, isXpack bool) error { event.MetricSetFields, err = schema.Apply(data) if err != nil { - errs = append(errs, errors.Wrap(err, "failure to apply health schema")) + errs = append(errs, fmt.Errorf("failure to apply health schema: %w", err)) } else { report.Event(event) } diff --git a/x-pack/metricbeat/module/enterprisesearch/health/health.go b/x-pack/metricbeat/module/enterprisesearch/health/health.go index d263314bf95..a8de3c90bc3 100644 --- a/x-pack/metricbeat/module/enterprisesearch/health/health.go +++ b/x-pack/metricbeat/module/enterprisesearch/health/health.go @@ -5,7 +5,7 @@ package health import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/metricbeat/helper" @@ -71,12 +71,12 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(report mb.ReporterV2) error { content, err := m.http.FetchContent() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } err = eventMapping(report, content, m.XPackEnabled) if err != nil { - return errors.Wrap(err, "error converting event") + return fmt.Errorf("error converting event: %w", err) } return nil diff --git a/x-pack/metricbeat/module/enterprisesearch/stats/data.go b/x-pack/metricbeat/module/enterprisesearch/stats/data.go index ea20ded1745..cd1ebd13dc3 100644 --- a/x-pack/metricbeat/module/enterprisesearch/stats/data.go +++ b/x-pack/metricbeat/module/enterprisesearch/stats/data.go @@ -6,9 +6,10 @@ package stats import ( "encoding/json" + "errors" + "fmt" "github.com/joeshaw/multierror" - "github.com/pkg/errors" s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" @@ -195,7 +196,7 @@ func eventMapping(report mb.ReporterV2, input []byte, isXpack bool) error { event.MetricSetFields, err = schema.Apply(data) if err != nil { - errs = append(errs, errors.Wrap(err, "failure to apply stats schema")) + errs = append(errs, fmt.Errorf("failure to apply stats schema: %w", err)) } else { report.Event(event) } diff --git a/x-pack/metricbeat/module/enterprisesearch/stats/stats.go b/x-pack/metricbeat/module/enterprisesearch/stats/stats.go index c59b3481af4..234a65d0d11 100644 --- a/x-pack/metricbeat/module/enterprisesearch/stats/stats.go +++ b/x-pack/metricbeat/module/enterprisesearch/stats/stats.go @@ -5,7 +5,7 @@ package stats import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/metricbeat/helper" @@ -71,12 +71,12 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(report mb.ReporterV2) error { content, err := m.http.FetchContent() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } err = eventMapping(report, content, m.XPackEnabled) if err != nil { - return errors.Wrap(err, "error converting event") + return fmt.Errorf("error converting event: %w", err) } return nil diff --git a/x-pack/metricbeat/module/iis/application_pool/reader.go b/x-pack/metricbeat/module/iis/application_pool/reader.go index 4ce1f240381..77d377a75d6 100644 --- a/x-pack/metricbeat/module/iis/application_pool/reader.go +++ b/x-pack/metricbeat/module/iis/application_pool/reader.go @@ -7,14 +7,13 @@ package application_pool import ( + "fmt" "strings" "github.com/elastic/beats/v7/metricbeat/helper/windows/pdh" "github.com/elastic/elastic-agent-libs/mapstr" "github.com/elastic/go-sysinfo" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/elastic-agent-libs/logp" ) @@ -77,7 +76,7 @@ func newReader(config Config) (*Reader, error) { err := r.initAppPools() if err != nil { - return nil, errors.Wrap(err, "error loading counters for existing app pools") + return nil, fmt.Errorf("error loading counters for existing app pools: %w", err) } return r, nil } @@ -86,7 +85,7 @@ func newReader(config Config) (*Reader, error) { func (r *Reader) initAppPools() error { apps, err := getApplicationPools(r.config.Names) if err != nil { - return errors.Wrap(err, "failed retrieving running worker processes") + return fmt.Errorf("failed retrieving running worker processes: %w", err) } r.applicationPools = apps if len(apps) == 0 { @@ -116,14 +115,14 @@ func (r *Reader) initAppPools() error { } for _, v := range childQueries { if err := r.query.AddCounter(v, "", "float", len(childQueries) > 1); err != nil { - return errors.Wrapf(err, `failed to add counter (query="%v")`, v) + return fmt.Errorf(`failed to add counter (query="%v"): %w`, v, err) } r.workerProcesses[v] = key } } err = r.query.RemoveUnusedCounters(newQueries) if err != nil { - return errors.Wrap(err, "failed removing unused counter values") + return fmt.Errorf("failed removing unused counter values: %w", err) } return nil } @@ -138,14 +137,14 @@ func (r *Reader) read() ([]mb.Event, error) { // Some counters, such as rate counters, require two counter values in order to compute a displayable value. In this case we must call PdhCollectQueryData twice before calling PdhGetFormattedCounterValue. // For more information, see Collecting Performance Data (https://docs.microsoft.com/en-us/windows/desktop/PerfCtrs/collecting-performance-data). if err := r.query.CollectData(); err != nil { - return nil, errors.Wrap(err, "failed querying counter values") + return nil, fmt.Errorf("failed querying counter values: %w", err) } // Get the values. values, err := r.query.GetFormattedCounterValues() if err != nil { r.close() - return nil, errors.Wrap(err, "failed formatting counter values") + return nil, fmt.Errorf("failed formatting counter values: %w", err) } var events []mb.Event eventGroup := r.mapEvents(values) diff --git a/x-pack/metricbeat/module/iis/test/integration.go b/x-pack/metricbeat/module/iis/test/integration.go index ede53c9406f..525bc3c1003 100644 --- a/x-pack/metricbeat/module/iis/test/integration.go +++ b/x-pack/metricbeat/module/iis/test/integration.go @@ -8,8 +8,10 @@ package test import ( + "errors" + "fmt" + "github.com/StackExchange/wmi" - "github.com/pkg/errors" ) // Service struct used to map Win32_Service @@ -37,7 +39,7 @@ func EnsureIISIsRunning() error { return errors.New("IIS is not not installed") } if ser[0].State != "Running" { - return errors.Errorf("IIS is installed but status is %s", ser[0].State) + return fmt.Errorf("IIS is installed but status is %s", ser[0].State) } return nil } diff --git a/x-pack/metricbeat/module/mssql/connection.go b/x-pack/metricbeat/module/mssql/connection.go index fa4c0fa36ae..ac328491ddb 100644 --- a/x-pack/metricbeat/module/mssql/connection.go +++ b/x-pack/metricbeat/module/mssql/connection.go @@ -6,24 +6,23 @@ package mssql import ( "database/sql" + "fmt" // Register driver. _ "github.com/denisenkom/go-mssqldb" - - "github.com/pkg/errors" ) // NewConnection returns a connection already established with MSSQL func NewConnection(uri string) (*sql.DB, error) { db, err := sql.Open("sqlserver", uri) if err != nil { - return nil, errors.Wrap(err, "could not create db instance") + return nil, fmt.Errorf("could not create db instance: %w", err) } // Check the connection before executing all queries to reduce the number // of connection errors that we might encounter. if err = db.Ping(); err != nil { - err = errors.Wrap(err, "error doing ping to db") + err = fmt.Errorf("error doing ping to db: %w", err) } return db, err diff --git a/x-pack/metricbeat/module/mssql/performance/data_integration_test.go b/x-pack/metricbeat/module/mssql/performance/data_integration_test.go index 498fdc1d0de..0d3d1d8fc78 100644 --- a/x-pack/metricbeat/module/mssql/performance/data_integration_test.go +++ b/x-pack/metricbeat/module/mssql/performance/data_integration_test.go @@ -5,11 +5,11 @@ package performance import ( + "errors" "net/url" "testing" _ "github.com/denisenkom/go-mssqldb" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" diff --git a/x-pack/metricbeat/module/mssql/transaction_log/transaction_log.go b/x-pack/metricbeat/module/mssql/transaction_log/transaction_log.go index 2fb7c43e0d2..821b5db5d08 100644 --- a/x-pack/metricbeat/module/mssql/transaction_log/transaction_log.go +++ b/x-pack/metricbeat/module/mssql/transaction_log/transaction_log.go @@ -8,8 +8,6 @@ import ( "database/sql" "fmt" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/x-pack/metricbeat/module/mssql" "github.com/elastic/elastic-agent-libs/logp" @@ -58,7 +56,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { db, err := mssql.NewConnection(base.HostData().URI) if err != nil { - return nil, errors.Wrap(err, "could not create connection to db") + return nil, fmt.Errorf("could not create connection to db: %w", err) } return &MetricSet{ @@ -131,7 +129,7 @@ func (m *MetricSet) getLogSpaceUsageForDb(dbName string) (mapstr.M, error) { &res.logSpaceInBytesSinceLastBackup); err != nil { // Because this query only returns a single result an error in the first scan is // probably a "data returned but not properly scanned" - err = errors.Wrap(err, "error scanning single result") + err = fmt.Errorf("error scanning single result: %w", err) return nil, err } @@ -156,7 +154,7 @@ func (m *MetricSet) getLogStats(db dbInfo) (mapstr.M, error) { if err := row.Scan(&res.databaseID, &res.sizeMB, &res.activeSizeMB, &res.backupTime, &res.sinceLastBackupMB, &res.sinceLastCheckpointMB, &res.recoverySizeMB); err != nil { // Because this query only returns a single result an error in the first scan is // probably a "data returned but not properly scanned" - err = errors.Wrap(err, "error scanning single result") + err = fmt.Errorf("error scanning single result: %w", err) return nil, err } @@ -189,14 +187,14 @@ func (m *MetricSet) getDbsNames() ([]dbInfo, error) { var rows *sql.Rows rows, err := m.db.Query("SELECT name, database_id FROM sys.databases") if err != nil { - return nil, errors.Wrap(err, "error doing query 'SELECT name, database_id FROM sys.databases'") + return nil, fmt.Errorf("error doing query 'SELECT name, database_id FROM sys.databases': %w", err) } defer closeRows(rows) for rows.Next() { var row dbInfo if err = rows.Scan(&row.name, &row.id); err != nil { - return nil, errors.Wrap(err, "error scanning row results") + return nil, fmt.Errorf("error scanning row results: %w", err) } res = append(res, row) diff --git a/x-pack/metricbeat/module/oracle/performance/buffer_cache_hit_ratio.go b/x-pack/metricbeat/module/oracle/performance/buffer_cache_hit_ratio.go index 0b0fb8ad41c..fc0db9d7859 100644 --- a/x-pack/metricbeat/module/oracle/performance/buffer_cache_hit_ratio.go +++ b/x-pack/metricbeat/module/oracle/performance/buffer_cache_hit_ratio.go @@ -7,8 +7,7 @@ package performance import ( "context" "database/sql" - - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/x-pack/metricbeat/module/oracle" "github.com/elastic/elastic-agent-libs/mapstr" @@ -35,7 +34,7 @@ func (e *performanceExtractor) bufferCacheHitRatio(ctx context.Context) ([]buffe 1 - (physical_reads / (db_block_gets + consistent_gets)) "Hit Ratio" FROM V$BUFFER_POOL_STATISTICS`) if err != nil { - return nil, errors.Wrap(err, "error executing query") + return nil, fmt.Errorf("error executing query: %w", err) } results := make([]bufferCacheHitRatio, 0) diff --git a/x-pack/metricbeat/module/oracle/performance/cursors.go b/x-pack/metricbeat/module/oracle/performance/cursors.go index ebc5a1df61d..d29b16b7815 100644 --- a/x-pack/metricbeat/module/oracle/performance/cursors.go +++ b/x-pack/metricbeat/module/oracle/performance/cursors.go @@ -7,8 +7,7 @@ package performance import ( "context" "database/sql" - - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/x-pack/metricbeat/module/oracle" "github.com/elastic/elastic-agent-libs/mapstr" @@ -56,7 +55,7 @@ func (e *performanceExtractor) cursorsByUsernameAndMachine(ctx context.Context) s.machine ORDER BY 1 DESC`) if err != nil { - return nil, errors.Wrap(err, "error executing query") + return nil, fmt.Errorf("error executing query: %w", err) } results := make([]cursorsByUsernameAndMachine, 0) diff --git a/x-pack/metricbeat/module/oracle/performance/data.go b/x-pack/metricbeat/module/oracle/performance/data.go index f24bfd86969..1004fac720a 100644 --- a/x-pack/metricbeat/module/oracle/performance/data.go +++ b/x-pack/metricbeat/module/oracle/performance/data.go @@ -6,8 +6,7 @@ package performance import ( "context" - - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" ) @@ -17,19 +16,19 @@ func (m *MetricSet) extract(ctx context.Context, extractor performanceExtractMet out = &extractedData{} if out.bufferCacheHitRatios, err = extractor.bufferCacheHitRatio(ctx); err != nil { - return nil, errors.Wrap(err, "error getting buffer cache hit ratio") + return nil, fmt.Errorf("error getting buffer cache hit ratio: %w", err) } if out.libraryData, err = extractor.libraryCache(ctx); err != nil { - return nil, errors.Wrap(err, "error getting libraryCache data") + return nil, fmt.Errorf("error getting libraryCache data: %w", err) } if out.cursorsByUsernameAndMachine, err = extractor.cursorsByUsernameAndMachine(ctx); err != nil { - return nil, errors.Wrap(err, "error getting cursors by username and machine") + return nil, fmt.Errorf("error getting cursors by username and machine: %w", err) } if out.totalCursors, err = extractor.totalCursors(ctx); err != nil { - return nil, errors.Wrap(err, "error getting total cursors") + return nil, fmt.Errorf("error getting total cursors: %w", err) } return @@ -40,7 +39,7 @@ func (m *MetricSet) extract(ctx context.Context, extractor performanceExtractMet func (m *MetricSet) extractAndTransform(ctx context.Context) ([]mb.Event, error) { extractedMetricsData, err := m.extract(ctx, m.extractor) if err != nil { - return nil, errors.Wrap(err, "error extracting data") + return nil, fmt.Errorf("error extracting data: %w", err) } return m.transform(extractedMetricsData), nil diff --git a/x-pack/metricbeat/module/oracle/performance/library_cache.go b/x-pack/metricbeat/module/oracle/performance/library_cache.go index fec3b1b4a7f..fdb7a01abdf 100644 --- a/x-pack/metricbeat/module/oracle/performance/library_cache.go +++ b/x-pack/metricbeat/module/oracle/performance/library_cache.go @@ -7,8 +7,7 @@ package performance import ( "context" "database/sql" - - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/x-pack/metricbeat/module/oracle" "github.com/elastic/elastic-agent-libs/mapstr" @@ -36,7 +35,7 @@ func (e *performanceExtractor) libraryCache(ctx context.Context) ([]libraryCache UNION SELECT 'io_reloads' "Ratio", (SUM(reloads) / SUM(pins)) FROM V$LIBRARYCACHE`) if err != nil { - return nil, errors.Wrap(err, "error executing query") + return nil, fmt.Errorf("error executing query: %w", err) } results := make([]libraryCache, 0) diff --git a/x-pack/metricbeat/module/oracle/tablespace/data.go b/x-pack/metricbeat/module/oracle/tablespace/data.go index 526c65e85b7..30c86d766da 100644 --- a/x-pack/metricbeat/module/oracle/tablespace/data.go +++ b/x-pack/metricbeat/module/oracle/tablespace/data.go @@ -6,12 +6,11 @@ package tablespace import ( "context" + "fmt" "github.com/elastic/beats/v7/x-pack/metricbeat/module/oracle" "github.com/elastic/elastic-agent-libs/mapstr" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/mb" ) @@ -21,15 +20,15 @@ func (m *MetricSet) extract(ctx context.Context, extractor tablespaceExtractMeth out = &extractedData{} if out.dataFiles, err = extractor.dataFilesData(ctx); err != nil { - return nil, errors.Wrap(err, "error getting data_files") + return nil, fmt.Errorf("error getting data_files: %w", err) } if out.tempFreeSpace, err = extractor.tempFreeSpaceData(ctx); err != nil { - return nil, errors.Wrap(err, "error getting temp_free_space") + return nil, fmt.Errorf("error getting temp_free_space: %w", err) } if out.freeSpace, err = extractor.usedAndFreeSpaceData(ctx); err != nil { - return nil, errors.Wrap(err, "error getting free space data") + return nil, fmt.Errorf("error getting free space data: %w", err) } return @@ -54,7 +53,7 @@ func (m *MetricSet) transform(in *extractedData) (out map[string]mapstr.M) { func (m *MetricSet) extractAndTransform(ctx context.Context) ([]mb.Event, error) { extractedMetricsData, err := m.extract(ctx, m.extractor) if err != nil { - return nil, errors.Wrap(err, "error extracting data") + return nil, fmt.Errorf("error extracting data: %w", err) } out := m.transform(extractedMetricsData) diff --git a/x-pack/metricbeat/module/oracle/tablespace/data_files.go b/x-pack/metricbeat/module/oracle/tablespace/data_files.go index 73df14b2ca1..be1eba9349d 100644 --- a/x-pack/metricbeat/module/oracle/tablespace/data_files.go +++ b/x-pack/metricbeat/module/oracle/tablespace/data_files.go @@ -8,8 +8,6 @@ import ( "context" "database/sql" "fmt" - - "github.com/pkg/errors" ) type dataFile struct { @@ -34,7 +32,7 @@ func (d *dataFile) eventKey() string { func (e *tablespaceExtractor) dataFilesData(ctx context.Context) ([]dataFile, error) { rows, err := e.db.QueryContext(ctx, "SELECT FILE_NAME, FILE_ID, TABLESPACE_NAME, BYTES, STATUS, MAXBYTES, USER_BYTES, ONLINE_STATUS FROM SYS.DBA_DATA_FILES UNION SELECT FILE_NAME, FILE_ID, TABLESPACE_NAME, BYTES, STATUS, MAXBYTES, USER_BYTES, STATUS AS ONLINE_STATUS FROM SYS.DBA_TEMP_FILES") if err != nil { - return nil, errors.Wrap(err, "error executing query") + return nil, fmt.Errorf("error executing query: %w", err) } results := make([]dataFile, 0) diff --git a/x-pack/metricbeat/module/oracle/tablespace/mocks_test.go b/x-pack/metricbeat/module/oracle/tablespace/mocks_test.go index a88b030a29f..12348236bcc 100644 --- a/x-pack/metricbeat/module/oracle/tablespace/mocks_test.go +++ b/x-pack/metricbeat/module/oracle/tablespace/mocks_test.go @@ -7,8 +7,7 @@ package tablespace import ( "context" "database/sql" - - "github.com/pkg/errors" + "errors" ) // happyMockExtractor is a tablespaceExtractMethods implementor that follow and ideal happy path on the entire set of data diff --git a/x-pack/metricbeat/module/oracle/tablespace/temp_free_space.go b/x-pack/metricbeat/module/oracle/tablespace/temp_free_space.go index 8bf50e24568..efc8401e2c0 100644 --- a/x-pack/metricbeat/module/oracle/tablespace/temp_free_space.go +++ b/x-pack/metricbeat/module/oracle/tablespace/temp_free_space.go @@ -7,8 +7,7 @@ package tablespace import ( "context" "database/sql" - - "github.com/pkg/errors" + "fmt" ) type tempFreeSpace struct { @@ -29,7 +28,7 @@ func (d *tempFreeSpace) eventKey() string { func (e *tablespaceExtractor) tempFreeSpaceData(ctx context.Context) ([]tempFreeSpace, error) { rows, err := e.db.QueryContext(ctx, "SELECT TABLESPACE_NAME, TABLESPACE_SIZE, ALLOCATED_SPACE, FREE_SPACE FROM DBA_TEMP_FREE_SPACE") if err != nil { - return nil, errors.Wrap(err, "error executing query") + return nil, fmt.Errorf("error executing query: %w", err) } results := make([]tempFreeSpace, 0) diff --git a/x-pack/metricbeat/module/oracle/tablespace/used_and_free_space.go b/x-pack/metricbeat/module/oracle/tablespace/used_and_free_space.go index eec7197dbb4..b17b249808d 100644 --- a/x-pack/metricbeat/module/oracle/tablespace/used_and_free_space.go +++ b/x-pack/metricbeat/module/oracle/tablespace/used_and_free_space.go @@ -7,8 +7,7 @@ package tablespace import ( "context" "database/sql" - - "github.com/pkg/errors" + "fmt" ) type usedAndFreeSpace struct { @@ -28,7 +27,7 @@ func (d *usedAndFreeSpace) eventKey() string { func (e *tablespaceExtractor) usedAndFreeSpaceData(ctx context.Context) ([]usedAndFreeSpace, error) { rows, err := e.db.QueryContext(ctx, "SELECT b.tablespace_name, tbs_size used, a.free_space free FROM (SELECT tablespace_name, sum(bytes) AS free_space FROM dba_free_space GROUP BY tablespace_name) a, (SELECT tablespace_name, sum(bytes) AS tbs_size FROM dba_data_files GROUP BY tablespace_name) b WHERE a.tablespace_name(+)=b.tablespace_name") if err != nil { - return nil, errors.Wrap(err, "error executing query") + return nil, fmt.Errorf("error executing query: %w", err) } results := make([]usedAndFreeSpace, 0) diff --git a/x-pack/metricbeat/module/stan/_meta/Dockerfile b/x-pack/metricbeat/module/stan/_meta/Dockerfile index 030427a7c00..3f707023d1a 100644 --- a/x-pack/metricbeat/module/stan/_meta/Dockerfile +++ b/x-pack/metricbeat/module/stan/_meta/Dockerfile @@ -2,17 +2,17 @@ ARG STAN_VERSION=0.15.1 FROM nats-streaming:$STAN_VERSION # build stage -FROM golang:1.13-alpine3.11 AS build-env -RUN apk --no-cache add build-base git mercurial gcc -RUN cd src && go get -d github.com/nats-io/stan.go/ -RUN cd src/github.com/nats-io/stan.go/examples/stan-bench && git checkout tags/v0.5.2 && go build . +FROM golang:1.20.7 AS build-env +RUN apt-get install git mercurial gcc +RUN git clone https://github.com/nats-io/stan.go.git /stan-go +RUN cd /stan-go/examples/stan-bench && git checkout tags/v0.5.2 && go build . # create an enhanced container with nc command available since nats is based # on scratch image making healthcheck impossible FROM alpine:latest RUN apk add --no-cache --upgrade bash COPY --from=0 nats-streaming-server /nats-streaming-server -COPY --from=build-env /go/src/github.com/nats-io/stan.go/examples/stan-bench/stan-bench /stan-bench +COPY --from=build-env /stan-go/examples/stan-bench/stan-bench /stan-bench # Expose client, management, and cluster ports EXPOSE 4222 8222 ADD healthcheck.sh /healthcheck.sh diff --git a/x-pack/metricbeat/module/stan/channels/channels.go b/x-pack/metricbeat/module/stan/channels/channels.go index d2824943ccc..ab3ef50fd9d 100644 --- a/x-pack/metricbeat/module/stan/channels/channels.go +++ b/x-pack/metricbeat/module/stan/channels/channels.go @@ -5,7 +5,7 @@ package channels import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -68,10 +68,10 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(r mb.ReporterV2) (err error) { content, err := m.http.FetchContent() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } if err = eventsMapping(content, r); err != nil { - return errors.Wrap(err, "error in mapping") + return fmt.Errorf("error in mapping: %w", err) } return nil diff --git a/x-pack/metricbeat/module/stan/channels/data.go b/x-pack/metricbeat/module/stan/channels/data.go index 9ac80ce63d4..25d5d51f392 100644 --- a/x-pack/metricbeat/module/stan/channels/data.go +++ b/x-pack/metricbeat/module/stan/channels/data.go @@ -6,8 +6,7 @@ package channels import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" @@ -66,12 +65,12 @@ type Channels struct { func eventMapping(content map[string]interface{}) (mb.Event, error) { fields, err := channelSchema.Apply(content) if err != nil { - return mb.Event{}, errors.Wrap(err, "error applying channels schema") + return mb.Event{}, fmt.Errorf("error applying channels schema: %w", err) } moduleFields, err := moduleSchema.Apply(content) if err != nil { - return mb.Event{}, errors.Wrap(err, "error applying module schema") + return mb.Event{}, fmt.Errorf("error applying module schema: %w", err) } event := mb.Event{ @@ -85,7 +84,7 @@ func eventMapping(content map[string]interface{}) (mb.Event, error) { func eventsMapping(content []byte, r mb.ReporterV2) error { channelsIn := Channels{} if err := json.Unmarshal(content, &channelsIn); err != nil { - return errors.Wrap(err, "error unmarshaling Nats streaming channels response to JSON") + return fmt.Errorf("error unmarshaling Nats streaming channels response to JSON: %w", err) } for _, ch := range channelsIn.Channels { @@ -109,7 +108,7 @@ func eventsMapping(content []byte, r mb.ReporterV2) error { } if evt, err = eventMapping(chWrapper); err != nil { - r.Error(errors.Wrap(err, "error mapping channel to its schema")) + r.Error(fmt.Errorf("error mapping channel to its schema: %w", err)) continue } if !r.Event(evt) { diff --git a/x-pack/metricbeat/module/stan/stats/data.go b/x-pack/metricbeat/module/stan/stats/data.go index 569a201f53d..d73549e2655 100644 --- a/x-pack/metricbeat/module/stan/stats/data.go +++ b/x-pack/metricbeat/module/stan/stats/data.go @@ -6,8 +6,7 @@ package stats import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" @@ -37,17 +36,17 @@ var ( func eventMapping(content []byte, r mb.ReporterV2) error { var streaming = make(map[string]interface{}) if err := json.Unmarshal(content, &streaming); err != nil { - return errors.Wrap(err, "error in streaming server mapping") + return fmt.Errorf("error in streaming server mapping: %w", err) } fields, err := clientsSchema.Apply(streaming) if err != nil { - return errors.Wrap(err, "error parsing Nats streaming server API response") + return fmt.Errorf("error parsing Nats streaming server API response: %w", err) } moduleFields, err := moduleSchema.Apply(streaming) if err != nil { - return errors.Wrap(err, "error applying module schema") + return fmt.Errorf("error applying module schema: %w", err) } event := mb.Event{ MetricSetFields: fields, diff --git a/x-pack/metricbeat/module/stan/stats/stats.go b/x-pack/metricbeat/module/stan/stats/stats.go index c83f27cb6e8..05a1c9760e2 100644 --- a/x-pack/metricbeat/module/stan/stats/stats.go +++ b/x-pack/metricbeat/module/stan/stats/stats.go @@ -5,7 +5,7 @@ package stats import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -66,11 +66,11 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(r mb.ReporterV2) error { content, err := m.http.FetchContent() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } err = eventMapping(content, r) if err != nil { - return errors.Wrap(err, "error in mapping") + return fmt.Errorf("error in mapping: %w", err) } return nil diff --git a/x-pack/metricbeat/module/stan/subscriptions/data.go b/x-pack/metricbeat/module/stan/subscriptions/data.go index 2135c68fca0..16f8c80ecc2 100644 --- a/x-pack/metricbeat/module/stan/subscriptions/data.go +++ b/x-pack/metricbeat/module/stan/subscriptions/data.go @@ -6,8 +6,7 @@ package subscriptions import ( "encoding/json" - - "github.com/pkg/errors" + "fmt" s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" @@ -39,12 +38,12 @@ var ( func eventMapping(content map[string]interface{}) (mb.Event, error) { fields, err := subscriptionsSchema.Apply(content) if err != nil { - return mb.Event{}, errors.Wrap(err, "error applying subscription schema") + return mb.Event{}, fmt.Errorf("error applying subscription schema: %w", err) } moduleFields, err := moduleSchema.Apply(content) if err != nil { - return mb.Event{}, errors.Wrap(err, "error applying module schema") + return mb.Event{}, fmt.Errorf("error applying module schema: %w", err) } event := mb.Event{ @@ -88,7 +87,7 @@ func eventsMapping(content []byte, r mb.ReporterV2) error { var err error channels := Channels{} if err = json.Unmarshal(content, &channels); err != nil { - return errors.Wrap(err, "error unmarshaling Nats streaming channels detailed response to JSON") + return fmt.Errorf("error unmarshaling Nats streaming channels detailed response to JSON: %w", err) } for _, ch := range channels.Channels { @@ -99,7 +98,7 @@ func eventsMapping(content []byte, r mb.ReporterV2) error { sub["cluster_id"] = channels.ClusterID evt, err = eventMapping(sub) if err != nil { - r.Error(errors.Wrap(err, "error mapping subscription event")) + r.Error(fmt.Errorf("error mapping subscription event: %w", err)) continue } diff --git a/x-pack/metricbeat/module/stan/subscriptions/subscriptions.go b/x-pack/metricbeat/module/stan/subscriptions/subscriptions.go index 428db923de1..457912b4a5f 100644 --- a/x-pack/metricbeat/module/stan/subscriptions/subscriptions.go +++ b/x-pack/metricbeat/module/stan/subscriptions/subscriptions.go @@ -5,7 +5,7 @@ package subscriptions import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" @@ -68,10 +68,10 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(r mb.ReporterV2) (err error) { content, err := m.http.FetchContent() if err != nil { - return errors.Wrap(err, "error in fetch") + return fmt.Errorf("error in fetch: %w", err) } if err = eventsMapping(content, r); err != nil { - return errors.Wrap(err, "error in mapping") + return fmt.Errorf("error in mapping: %w", err) } return nil diff --git a/x-pack/metricbeat/module/syncgateway/syncgateway.go b/x-pack/metricbeat/module/syncgateway/syncgateway.go index dfd2e16cb84..4d261faa190 100644 --- a/x-pack/metricbeat/module/syncgateway/syncgateway.go +++ b/x-pack/metricbeat/module/syncgateway/syncgateway.go @@ -6,11 +6,10 @@ package syncgateway import ( "encoding/json" + "fmt" "sync" "time" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" ) @@ -76,7 +75,7 @@ func (m *module) GetSyncgatewayResponse(http *helper.HTTP) (*SgResponse, error) input := SgResponse{} if err = json.Unmarshal(byt, &input); err != nil { - return nil, errors.Wrap(err, "error unmarshalling JSON of SyncGateway expvar response") + return nil, fmt.Errorf("error unmarshalling JSON of SyncGateway expvar response: %w", err) } m.expvarCache.cachedData = input diff --git a/x-pack/metricbeat/modules.d/activemq.yml.disabled b/x-pack/metricbeat/modules.d/activemq.yml.disabled index 33716db01c9..de0ecb7c79f 100644 --- a/x-pack/metricbeat/modules.d/activemq.yml.disabled +++ b/x-pack/metricbeat/modules.d/activemq.yml.disabled @@ -1,5 +1,5 @@ # Module: activemq -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-activemq.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-activemq.html - module: activemq metricsets: ['broker', 'queue', 'topic'] diff --git a/x-pack/metricbeat/modules.d/airflow.yml.disabled b/x-pack/metricbeat/modules.d/airflow.yml.disabled index 010b1daadd8..e874fcf7db0 100644 --- a/x-pack/metricbeat/modules.d/airflow.yml.disabled +++ b/x-pack/metricbeat/modules.d/airflow.yml.disabled @@ -1,5 +1,5 @@ # Module: airflow -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-airflow.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-airflow.html - module: airflow host: "localhost" diff --git a/x-pack/metricbeat/modules.d/aws.yml.disabled b/x-pack/metricbeat/modules.d/aws.yml.disabled index 3fc7c43a108..ddd36a4c326 100644 --- a/x-pack/metricbeat/modules.d/aws.yml.disabled +++ b/x-pack/metricbeat/modules.d/aws.yml.disabled @@ -1,5 +1,5 @@ # Module: aws -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-aws.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-aws.html - module: aws period: 1m diff --git a/x-pack/metricbeat/modules.d/awsfargate.yml.disabled b/x-pack/metricbeat/modules.d/awsfargate.yml.disabled index b2b91f06ee4..81c34f5759d 100644 --- a/x-pack/metricbeat/modules.d/awsfargate.yml.disabled +++ b/x-pack/metricbeat/modules.d/awsfargate.yml.disabled @@ -1,5 +1,5 @@ # Module: awsfargate -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-awsfargate.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-awsfargate.html - module: awsfargate period: 10s diff --git a/x-pack/metricbeat/modules.d/azure.yml.disabled b/x-pack/metricbeat/modules.d/azure.yml.disabled index 10d00e003cf..e42f064618a 100644 --- a/x-pack/metricbeat/modules.d/azure.yml.disabled +++ b/x-pack/metricbeat/modules.d/azure.yml.disabled @@ -1,5 +1,5 @@ # Module: azure -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-azure.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-azure.html - module: azure metricsets: diff --git a/x-pack/metricbeat/modules.d/cloudfoundry.yml.disabled b/x-pack/metricbeat/modules.d/cloudfoundry.yml.disabled index 22a600e51d8..e082545a78d 100644 --- a/x-pack/metricbeat/modules.d/cloudfoundry.yml.disabled +++ b/x-pack/metricbeat/modules.d/cloudfoundry.yml.disabled @@ -1,5 +1,5 @@ # Module: cloudfoundry -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-cloudfoundry.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-cloudfoundry.html - module: cloudfoundry metricsets: diff --git a/x-pack/metricbeat/modules.d/cockroachdb.yml.disabled b/x-pack/metricbeat/modules.d/cockroachdb.yml.disabled index 5b0a48e86bb..198fb66f8d8 100644 --- a/x-pack/metricbeat/modules.d/cockroachdb.yml.disabled +++ b/x-pack/metricbeat/modules.d/cockroachdb.yml.disabled @@ -1,5 +1,5 @@ # Module: cockroachdb -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-cockroachdb.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-cockroachdb.html - module: cockroachdb metricsets: ['status'] diff --git a/x-pack/metricbeat/modules.d/containerd.yml.disabled b/x-pack/metricbeat/modules.d/containerd.yml.disabled index f21b32139eb..20b03cd9e50 100644 --- a/x-pack/metricbeat/modules.d/containerd.yml.disabled +++ b/x-pack/metricbeat/modules.d/containerd.yml.disabled @@ -1,5 +1,5 @@ # Module: containerd -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-containerd.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-containerd.html - module: containerd metricsets: ["cpu", "memory", "blkio"] diff --git a/x-pack/metricbeat/modules.d/coredns.yml.disabled b/x-pack/metricbeat/modules.d/coredns.yml.disabled index 644a62bc4b7..60e8b71c32c 100644 --- a/x-pack/metricbeat/modules.d/coredns.yml.disabled +++ b/x-pack/metricbeat/modules.d/coredns.yml.disabled @@ -1,5 +1,5 @@ # Module: coredns -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-coredns.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-coredns.html - module: coredns metricsets: ["stats"] diff --git a/x-pack/metricbeat/modules.d/enterprisesearch-xpack.yml.disabled b/x-pack/metricbeat/modules.d/enterprisesearch-xpack.yml.disabled index e42dde843c2..0af7916573a 100644 --- a/x-pack/metricbeat/modules.d/enterprisesearch-xpack.yml.disabled +++ b/x-pack/metricbeat/modules.d/enterprisesearch-xpack.yml.disabled @@ -1,5 +1,5 @@ # Module: enterprisesearch -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-enterprisesearch.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-enterprisesearch.html - module: enterprisesearch xpack.enabled: true diff --git a/x-pack/metricbeat/modules.d/enterprisesearch.yml.disabled b/x-pack/metricbeat/modules.d/enterprisesearch.yml.disabled index 241791cc203..122e56b627b 100644 --- a/x-pack/metricbeat/modules.d/enterprisesearch.yml.disabled +++ b/x-pack/metricbeat/modules.d/enterprisesearch.yml.disabled @@ -1,5 +1,5 @@ # Module: enterprisesearch -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-enterprisesearch.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-enterprisesearch.html - module: enterprisesearch metricsets: ["health", "stats"] diff --git a/x-pack/metricbeat/modules.d/gcp.yml.disabled b/x-pack/metricbeat/modules.d/gcp.yml.disabled index 4a42e04b311..f79e1607a45 100644 --- a/x-pack/metricbeat/modules.d/gcp.yml.disabled +++ b/x-pack/metricbeat/modules.d/gcp.yml.disabled @@ -1,5 +1,5 @@ # Module: gcp -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-gcp.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-gcp.html - module: gcp metricsets: diff --git a/x-pack/metricbeat/modules.d/ibmmq.yml.disabled b/x-pack/metricbeat/modules.d/ibmmq.yml.disabled index a2fdf552f1c..43940532263 100644 --- a/x-pack/metricbeat/modules.d/ibmmq.yml.disabled +++ b/x-pack/metricbeat/modules.d/ibmmq.yml.disabled @@ -1,5 +1,5 @@ # Module: ibmmq -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-ibmmq.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-ibmmq.html - module: ibmmq metricsets: ['qmgr'] diff --git a/x-pack/metricbeat/modules.d/iis.yml.disabled b/x-pack/metricbeat/modules.d/iis.yml.disabled index f81d67eedff..19f348a2875 100644 --- a/x-pack/metricbeat/modules.d/iis.yml.disabled +++ b/x-pack/metricbeat/modules.d/iis.yml.disabled @@ -1,5 +1,5 @@ # Module: iis -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-iis.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-iis.html - module: iis metricsets: diff --git a/x-pack/metricbeat/modules.d/istio.yml.disabled b/x-pack/metricbeat/modules.d/istio.yml.disabled index 55c2a1d715a..ccb0884610a 100644 --- a/x-pack/metricbeat/modules.d/istio.yml.disabled +++ b/x-pack/metricbeat/modules.d/istio.yml.disabled @@ -1,5 +1,5 @@ # Module: istio -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-istio.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-istio.html # Istio mesh. To collect all Mixer-generated metrics. For versions of Istio prior to 1.5. - module: istio diff --git a/x-pack/metricbeat/modules.d/mssql.yml.disabled b/x-pack/metricbeat/modules.d/mssql.yml.disabled index 12eff0522ee..fbbb7bad8fc 100644 --- a/x-pack/metricbeat/modules.d/mssql.yml.disabled +++ b/x-pack/metricbeat/modules.d/mssql.yml.disabled @@ -1,5 +1,5 @@ # Module: mssql -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-mssql.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-mssql.html - module: mssql metricsets: diff --git a/x-pack/metricbeat/modules.d/oracle.yml.disabled b/x-pack/metricbeat/modules.d/oracle.yml.disabled index 99d59eb2c3a..445924b61ea 100644 --- a/x-pack/metricbeat/modules.d/oracle.yml.disabled +++ b/x-pack/metricbeat/modules.d/oracle.yml.disabled @@ -1,5 +1,5 @@ # Module: oracle -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-oracle.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-oracle.html # Module: oracle diff --git a/x-pack/metricbeat/modules.d/prometheus.yml.disabled b/x-pack/metricbeat/modules.d/prometheus.yml.disabled index d6e00936b2a..11cc449ba47 100644 --- a/x-pack/metricbeat/modules.d/prometheus.yml.disabled +++ b/x-pack/metricbeat/modules.d/prometheus.yml.disabled @@ -1,5 +1,5 @@ # Module: prometheus -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-prometheus.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-prometheus.html - module: prometheus period: 10s diff --git a/x-pack/metricbeat/modules.d/redisenterprise.yml.disabled b/x-pack/metricbeat/modules.d/redisenterprise.yml.disabled index c3121d7c2fb..350843a88e9 100644 --- a/x-pack/metricbeat/modules.d/redisenterprise.yml.disabled +++ b/x-pack/metricbeat/modules.d/redisenterprise.yml.disabled @@ -1,5 +1,5 @@ # Module: redisenterprise -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-redisenterprise.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-redisenterprise.html - module: redisenterprise metricsets: diff --git a/x-pack/metricbeat/modules.d/sql.yml.disabled b/x-pack/metricbeat/modules.d/sql.yml.disabled index 5663e03b1ef..f45644b0b11 100644 --- a/x-pack/metricbeat/modules.d/sql.yml.disabled +++ b/x-pack/metricbeat/modules.d/sql.yml.disabled @@ -1,5 +1,5 @@ # Module: sql -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-sql.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-sql.html - module: sql metricsets: diff --git a/x-pack/metricbeat/modules.d/stan.yml.disabled b/x-pack/metricbeat/modules.d/stan.yml.disabled index 0f93c0f5a0c..b3f19229874 100644 --- a/x-pack/metricbeat/modules.d/stan.yml.disabled +++ b/x-pack/metricbeat/modules.d/stan.yml.disabled @@ -1,5 +1,5 @@ # Module: stan -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-stan.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-stan.html - module: stan metricsets: ["stats", "subscriptions", "channels"] diff --git a/x-pack/metricbeat/modules.d/statsd.yml.disabled b/x-pack/metricbeat/modules.d/statsd.yml.disabled index ced946c242f..16712fd96b3 100644 --- a/x-pack/metricbeat/modules.d/statsd.yml.disabled +++ b/x-pack/metricbeat/modules.d/statsd.yml.disabled @@ -1,5 +1,5 @@ # Module: statsd -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-statsd.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-statsd.html - module: statsd host: "localhost" diff --git a/x-pack/metricbeat/modules.d/syncgateway.yml.disabled b/x-pack/metricbeat/modules.d/syncgateway.yml.disabled index 54c42a11809..f37b367c959 100644 --- a/x-pack/metricbeat/modules.d/syncgateway.yml.disabled +++ b/x-pack/metricbeat/modules.d/syncgateway.yml.disabled @@ -1,5 +1,5 @@ # Module: syncgateway -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-syncgateway.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-syncgateway.html - module: syncgateway metricsets: diff --git a/x-pack/metricbeat/modules.d/tomcat.yml.disabled b/x-pack/metricbeat/modules.d/tomcat.yml.disabled index 58a9a4038f7..623f5a888d5 100644 --- a/x-pack/metricbeat/modules.d/tomcat.yml.disabled +++ b/x-pack/metricbeat/modules.d/tomcat.yml.disabled @@ -1,5 +1,5 @@ # Module: tomcat -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/master/metricbeat-module-tomcat.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-tomcat.html - module: tomcat metricsets: ['threading', 'cache', 'memory', 'requests'] diff --git a/x-pack/osquerybeat/_meta/config/beat.docker.yml.tmpl b/x-pack/osquerybeat/_meta/config/beat.docker.yml.tmpl index fdf633c24a1..c10222b6335 100644 --- a/x-pack/osquerybeat/_meta/config/beat.docker.yml.tmpl +++ b/x-pack/osquerybeat/_meta/config/beat.docker.yml.tmpl @@ -12,5 +12,5 @@ osquerybeat: # ============================== Process Security ============================== # Disable seccomp system call filtering on Linux. -# Otherwise osquerybeat can't fork osqueryd with error: Failed to start osqueryd process: fork/exec ./osqueryd: operation not permitted +# Otherwise osquerybeat can't fork osqueryd with the error: Failed to start osqueryd process: fork/exec ./osqueryd: operation not permitted seccomp.enabled: false diff --git a/x-pack/osquerybeat/_meta/config/beat.reference.yml.tmpl b/x-pack/osquerybeat/_meta/config/beat.reference.yml.tmpl index fdf633c24a1..c10222b6335 100644 --- a/x-pack/osquerybeat/_meta/config/beat.reference.yml.tmpl +++ b/x-pack/osquerybeat/_meta/config/beat.reference.yml.tmpl @@ -12,5 +12,5 @@ osquerybeat: # ============================== Process Security ============================== # Disable seccomp system call filtering on Linux. -# Otherwise osquerybeat can't fork osqueryd with error: Failed to start osqueryd process: fork/exec ./osqueryd: operation not permitted +# Otherwise osquerybeat can't fork osqueryd with the error: Failed to start osqueryd process: fork/exec ./osqueryd: operation not permitted seccomp.enabled: false diff --git a/x-pack/osquerybeat/_meta/config/beat.yml.tmpl b/x-pack/osquerybeat/_meta/config/beat.yml.tmpl index fdf633c24a1..c10222b6335 100644 --- a/x-pack/osquerybeat/_meta/config/beat.yml.tmpl +++ b/x-pack/osquerybeat/_meta/config/beat.yml.tmpl @@ -12,5 +12,5 @@ osquerybeat: # ============================== Process Security ============================== # Disable seccomp system call filtering on Linux. -# Otherwise osquerybeat can't fork osqueryd with error: Failed to start osqueryd process: fork/exec ./osqueryd: operation not permitted +# Otherwise osquerybeat can't fork osqueryd with the error: Failed to start osqueryd process: fork/exec ./osqueryd: operation not permitted seccomp.enabled: false diff --git a/x-pack/osquerybeat/ext/osquery-extension/pkg/tables/elastic_file_analysis.go b/x-pack/osquerybeat/ext/osquery-extension/pkg/tables/elastic_file_analysis.go index f531a8d6233..d5616a039aa 100644 --- a/x-pack/osquerybeat/ext/osquery-extension/pkg/tables/elastic_file_analysis.go +++ b/x-pack/osquerybeat/ext/osquery-extension/pkg/tables/elastic_file_analysis.go @@ -17,8 +17,9 @@ import ( "strings" "syscall" - "github.com/elastic/beats/v7/x-pack/osquerybeat/internal/command" "github.com/osquery/osquery-go/plugin/table" + + "github.com/elastic/beats/v7/x-pack/osquerybeat/internal/command" ) func ExecuteStderr(ctx context.Context, name string, arg ...string) (out string, err error) { diff --git a/x-pack/osquerybeat/osquerybeat.reference.yml b/x-pack/osquerybeat/osquerybeat.reference.yml index 8fc40a03532..416462f3f47 100644 --- a/x-pack/osquerybeat/osquerybeat.reference.yml +++ b/x-pack/osquerybeat/osquerybeat.reference.yml @@ -12,17 +12,17 @@ osquerybeat: # ============================== Process Security ============================== # Disable seccomp system call filtering on Linux. -# Otherwise osquerybeat can't fork osqueryd with error: Failed to start osqueryd process: fork/exec ./osqueryd: operation not permitted +# Otherwise osquerybeat can't fork osqueryd with the error: Failed to start osqueryd process: fork/exec ./osqueryd: operation not permitted seccomp.enabled: false # ================================== General =================================== # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. +# If this option is not defined, the hostname is used. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. Tags make it easy to group servers by different # logical properties. #tags: ["service-X", "web-tier"] @@ -34,7 +34,7 @@ seccomp.enabled: false # env: staging # If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields +# fields in the output document instead of being grouped under a field # sub-dictionary. Default is false. #fields_under_root: false @@ -46,7 +46,7 @@ seccomp.enabled: false #queue: # Queue type by name (default 'mem') # The memory queue will present all available events (up to the outputs - # bulk_max_size) to the output, the moment the output is ready to server + # bulk_max_size) to the output, the moment the output is ready to serve # another batch of events. #mem: # Max number of events the queue can buffer. @@ -98,7 +98,7 @@ seccomp.enabled: false # length of its retry interval each time, up to this maximum. #max_retry_interval: 30s -# Sets the maximum number of CPUs that can be executing simultaneously. The +# Sets the maximum number of CPUs that can be executed simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: @@ -219,7 +219,7 @@ seccomp.enabled: false # ignore_missing: false # fail_on_error: true # -# The following example copies the value of message to message_copied +# The following example copies the value of the message to message_copied # #processors: # - copy_fields: @@ -229,7 +229,7 @@ seccomp.enabled: false # fail_on_error: true # ignore_missing: false # -# The following example truncates the value of message to 1024 bytes +# The following example truncates the value of the message to 1024 bytes # #processors: # - truncate_fields: @@ -326,7 +326,7 @@ output.elasticsearch: # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. #index: "osquerybeat-%{[agent.version]}" - # Optional ingest pipeline. By default no pipeline will be used. + # Optional ingest pipeline. By default, no pipeline will be used. #pipeline: "" # Optional HTTP path @@ -653,14 +653,14 @@ output.elasticsearch: # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards are disabled by default and can be enabled either by setting the -# options here, or by using the `-setup` CLI flag or the `setup` command. +# options here or by using the `-setup` CLI flag or the `setup` command. #setup.dashboards.enabled: false # The directory from where to read the dashboards. The default is the `kibana` # folder in the home path. #setup.dashboards.directory: ${path.home}/kibana -# The URL from where to download the dashboards archive. It is used instead of +# The URL from where to download the dashboard archive. It is used instead of # the directory if it has a value. #setup.dashboards.url: @@ -757,7 +757,7 @@ setup.template.settings: # Configure index lifecycle management (ILM) to manage the backing indices # of your data streams. -# Enable ILM support. Valid values are true, false. +# Enable ILM support. Valid values are true, or false. #setup.ilm.enabled: true # Set the lifecycle policy name. The default policy name is @@ -912,25 +912,25 @@ logging.files: # The name of the files where the logs are written to. #name: osquerybeat - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. #rotateeverybytes: 10485760 # = 10MB - # Number of rotated log files to keep. Oldest files will be deleted first. + # Number of rotated log files to keep. The oldest files will be deleted first. #keepfiles: 7 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. #permissions: 0600 - # Enable log file rotation on time intervals in addition to size-based rotation. + # Enable log file rotation on time intervals in addition to the size-based rotation. # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h # are boundary-aligned with minutes, hours, days, weeks, months, and years as # reported by the local system clock. All other intervals are calculated from the # Unix epoch. Defaults to disabled. #interval: 0 - # Rotate existing logs on startup rather than appending to the existing + # Rotate existing logs on startup rather than appending them to the existing # file. Defaults to true. # rotateonstartup: true @@ -958,7 +958,7 @@ logging.files: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # In case you specify an additional path, the scheme is required: http://localhost:9200/path # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 #hosts: ["localhost:9200"] @@ -1005,7 +1005,7 @@ logging.files: # Elasticsearch after a network error. The default is 60s. #backoff.max: 60s - # Configure HTTP request timeout before failing an request to Elasticsearch. + # Configure HTTP request timeout before failing a request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. @@ -1102,15 +1102,15 @@ logging.files: # =============================== HTTP Endpoint ================================ -# Each beat can expose internal metrics through a HTTP endpoint. For security +# Each beat can expose internal metrics through an HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. -# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# Stats can be accessed through http://localhost:5066/stats. For pretty JSON output # append ?pretty to the URL. # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# The HTTP endpoint will bind to this hostname, IP address, unix socket, or named pipe. # When using IP addresses, it is recommended to only use localhost. #http.host: localhost @@ -1120,7 +1120,7 @@ logging.files: # Define which user should be owning the named pipe. #http.named_pipe.user: -# Define which the permissions that should be applied to the named pipe, use the Security +# Define which permissions should be applied to the named pipe, use the Security # Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with # `http.user`. #http.named_pipe.security_descriptor: diff --git a/x-pack/osquerybeat/osquerybeat.yml b/x-pack/osquerybeat/osquerybeat.yml index abb1954f0ef..f2341dab994 100644 --- a/x-pack/osquerybeat/osquerybeat.yml +++ b/x-pack/osquerybeat/osquerybeat.yml @@ -12,7 +12,7 @@ osquerybeat: # ============================== Process Security ============================== # Disable seccomp system call filtering on Linux. -# Otherwise osquerybeat can't fork osqueryd with error: Failed to start osqueryd process: fork/exec ./osqueryd: operation not permitted +# Otherwise osquerybeat can't fork osqueryd with the error: Failed to start osqueryd process: fork/exec ./osqueryd: operation not permitted seccomp.enabled: false # ================================== General =================================== @@ -21,7 +21,7 @@ seccomp.enabled: false # all the transactions sent by a single shipper in the web interface. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. #tags: ["service-X", "web-tier"] @@ -36,8 +36,8 @@ seccomp.enabled: false # options here or by using the `setup` command. #setup.dashboards.enabled: false -# The URL from where to download the dashboards archive. By default this URL -# has a value which is computed based on the Beat name and version. For released +# The URL from where to download the dashboard archive. By default, this URL +# has a value that is computed based on the Beat name and version. For released # versions, this URL points to the dashboard archive on the artifacts.elastic.co # website. #setup.dashboards.url: @@ -120,7 +120,7 @@ processors: #logging.level: debug # At debug level, you can selectively enable logging only for some components. -# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# To enable all selectors, use ["*"]. Examples of other selectors are "beat", # "publisher", "service". #logging.selectors: ["*"] @@ -138,7 +138,7 @@ processors: #monitoring.cluster_uuid: # Uncomment to send the metrics to Elasticsearch. Most settings from the -# Elasticsearch output are accepted here as well. +# Elasticsearch outputs are accepted here as well. # Note that the settings should point to your Elasticsearch *monitoring* cluster. # Any setting that is not set is automatically inherited from the Elasticsearch # output configuration, so if you have the Elasticsearch output configured such diff --git a/x-pack/packetbeat/packetbeat.reference.yml b/x-pack/packetbeat/packetbeat.reference.yml index 736c0754893..7442b7f6a0f 100644 --- a/x-pack/packetbeat/packetbeat.reference.yml +++ b/x-pack/packetbeat/packetbeat.reference.yml @@ -15,7 +15,7 @@ # to sniff on the device carrying the default route. packetbeat.interfaces.device: any -# The network CIDR blocks that are considered "internal" networks for +# The network CIDR blocks are considered "internal" networks for # the purpose of network perimeter boundary classification. The valid # values for internal_networks are the same as those that can be used # with processor network conditions. @@ -39,7 +39,7 @@ packetbeat.interfaces.internal_networks: #packetbeat.interfaces.snaplen: 65535 # The maximum size of the shared memory buffer to use between the kernel and -# user space. A bigger buffer usually results in lower CPU usage, but consumes +# user space. A bigger buffer usually results in lower CPU usage but consumes # more memory. This setting is only available for the af_packet sniffer type. # The default is 30 MB. #packetbeat.interfaces.buffer_size_mb: 30 @@ -58,23 +58,23 @@ packetbeat.interfaces.internal_networks: # The value must be between 0 and 65535. By default, no value is set. # # This is only available on Linux and requires using `type: af_packet`. Each process -# must be running in same network namespace. All processes must use the same +# must be running in the same network namespace. All processes must use the same # interface settings. You must take responsibility for running multiple instances # of Packetbeat. #packetbeat.interfaces.fanout_group: ~ # Packetbeat automatically generates a BPF for capturing only the traffic on -# ports where it expects to find known protocols. Use this settings to tell +# ports where it expects to find known protocols. Use this setting to tell # Packetbeat to generate a BPF filter that accepts VLAN tags. #packetbeat.interfaces.with_vlans: true # Use this setting to override the automatically generated BPF filter. #packetbeat.interfaces.bpf_filter: -# With `auto_promisc_mode` Packetbeat puts interface in promiscuous mode automatically on startup. +# With `auto_promisc_mode` Packetbeat puts the interface in promiscuous mode automatically on startup. # This option does not work with `any` interface device. # The default option is false and requires manual set-up of promiscuous mode. -# Warning: under some circumstances (e.g beat crash) promiscuous mode +# Warning: under some circumstances (e.g., beat crash) promiscuous mode # can stay enabled even after beat is shut down. #packetbeat.interfaces.auto_promisc_mode: true @@ -130,7 +130,7 @@ packetbeat.protocols: # Default: false #parse_arguments: false - # Hide all methods relative to connection negotiation between server and + # Hide all methods relative to connection negotiation between the server and # client. # Default: true #hide_connection_information: true @@ -162,7 +162,7 @@ packetbeat.protocols: #send_request: true # If this option is enabled, the raw message of the response (`cassandra_request.request_headers` field) - # is included in published events. The default is true. enable `send_request` first before enable this option. + # is included in published events. The default is true. enable `send_request` first before enabling this option. #send_request_header: true # If this option is enabled, the raw message of the response (`cassandra_response` field) @@ -170,7 +170,7 @@ packetbeat.protocols: #send_response: true # If this option is enabled, the raw message of the response (`cassandra_response.response_headers` field) - # is included in published events. The default is true. enable `send_response` first before enable this option. + # is included in published events. The default is true. enable `send_response` first before enabling this option. #send_response_header: true # Set to true to publish fields with null values in events. @@ -237,8 +237,8 @@ packetbeat.protocols: # the HTTP protocol by commenting out the list of ports. ports: [80, 8080, 8000, 5000, 8002] - # Uncomment the following to hide certain parameters in URL or forms attached - # to HTTP requests. The names of the parameters are case insensitive. + # Uncomment the following to hide certain parameters in the URL or forms attached + # to HTTP requests. The names of the parameters are case-insensitive. # The value of the parameters will be replaced with the 'xxxxx' string. # This is generally useful for avoiding storing user passwords or other # sensitive information. @@ -642,10 +642,10 @@ packetbeat.ignore_outgoing: false # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. +# If this option is not defined, the hostname is used. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. Tags make it easy to group servers by different # logical properties. #tags: ["service-X", "web-tier"] @@ -657,7 +657,7 @@ packetbeat.ignore_outgoing: false # env: staging # If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields +# fields in the output document instead of being grouped under a field # sub-dictionary. Default is false. #fields_under_root: false @@ -669,7 +669,7 @@ packetbeat.ignore_outgoing: false #queue: # Queue type by name (default 'mem') # The memory queue will present all available events (up to the outputs - # bulk_max_size) to the output, the moment the output is ready to server + # bulk_max_size) to the output, the moment the output is ready to serve # another batch of events. #mem: # Max number of events the queue can buffer. @@ -721,7 +721,7 @@ packetbeat.ignore_outgoing: false # length of its retry interval each time, up to this maximum. #max_retry_interval: 30s -# Sets the maximum number of CPUs that can be executing simultaneously. The +# Sets the maximum number of CPUs that can be executed simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: @@ -842,7 +842,7 @@ packetbeat.ignore_outgoing: false # ignore_missing: false # fail_on_error: true # -# The following example copies the value of message to message_copied +# The following example copies the value of the message to message_copied # #processors: # - copy_fields: @@ -852,7 +852,7 @@ packetbeat.ignore_outgoing: false # fail_on_error: true # ignore_missing: false # -# The following example truncates the value of message to 1024 bytes +# The following example truncates the value of the message to 1024 bytes # #processors: # - truncate_fields: @@ -949,7 +949,7 @@ output.elasticsearch: # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. #index: "packetbeat-%{[agent.version]}" - # Optional ingest pipeline. By default no pipeline will be used. + # Optional ingest pipeline. By default, no pipeline will be used. #pipeline: "" # Optional HTTP path @@ -1680,14 +1680,14 @@ output.elasticsearch: # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards are disabled by default and can be enabled either by setting the -# options here, or by using the `-setup` CLI flag or the `setup` command. +# options here or by using the `-setup` CLI flag or the `setup` command. #setup.dashboards.enabled: false # The directory from where to read the dashboards. The default is the `kibana` # folder in the home path. #setup.dashboards.directory: ${path.home}/kibana -# The URL from where to download the dashboards archive. It is used instead of +# The URL from where to download the dashboard archive. It is used instead of # the directory if it has a value. #setup.dashboards.url: @@ -1784,7 +1784,7 @@ setup.template.settings: # Configure index lifecycle management (ILM) to manage the backing indices # of your data streams. -# Enable ILM support. Valid values are true, false. +# Enable ILM support. Valid values are true, or false. #setup.ilm.enabled: true # Set the lifecycle policy name. The default policy name is @@ -1939,25 +1939,25 @@ logging.files: # The name of the files where the logs are written to. #name: packetbeat - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. #rotateeverybytes: 10485760 # = 10MB - # Number of rotated log files to keep. Oldest files will be deleted first. + # Number of rotated log files to keep. The oldest files will be deleted first. #keepfiles: 7 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. #permissions: 0600 - # Enable log file rotation on time intervals in addition to size-based rotation. + # Enable log file rotation on time intervals in addition to the size-based rotation. # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h # are boundary-aligned with minutes, hours, days, weeks, months, and years as # reported by the local system clock. All other intervals are calculated from the # Unix epoch. Defaults to disabled. #interval: 0 - # Rotate existing logs on startup rather than appending to the existing + # Rotate existing logs on startup rather than appending them to the existing # file. Defaults to true. # rotateonstartup: true @@ -1985,7 +1985,7 @@ logging.files: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # In case you specify an additional path, the scheme is required: http://localhost:9200/path # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 #hosts: ["localhost:9200"] @@ -2032,7 +2032,7 @@ logging.files: # Elasticsearch after a network error. The default is 60s. #backoff.max: 60s - # Configure HTTP request timeout before failing an request to Elasticsearch. + # Configure HTTP request timeout before failing a request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. @@ -2129,15 +2129,15 @@ logging.files: # =============================== HTTP Endpoint ================================ -# Each beat can expose internal metrics through a HTTP endpoint. For security +# Each beat can expose internal metrics through an HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. -# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# Stats can be accessed through http://localhost:5066/stats. For pretty JSON output # append ?pretty to the URL. # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# The HTTP endpoint will bind to this hostname, IP address, unix socket, or named pipe. # When using IP addresses, it is recommended to only use localhost. #http.host: localhost @@ -2147,7 +2147,7 @@ logging.files: # Define which user should be owning the named pipe. #http.named_pipe.user: -# Define which the permissions that should be applied to the named pipe, use the Security +# Define which permissions should be applied to the named pipe, use the Security # Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with # `http.user`. #http.named_pipe.security_descriptor: diff --git a/x-pack/packetbeat/packetbeat.yml b/x-pack/packetbeat/packetbeat.yml index 34f4f370875..cfe15388a35 100644 --- a/x-pack/packetbeat/packetbeat.yml +++ b/x-pack/packetbeat/packetbeat.yml @@ -168,8 +168,8 @@ setup.template.settings: # options here or by using the `setup` command. #setup.dashboards.enabled: false -# The URL from where to download the dashboards archive. By default this URL -# has a value which is computed based on the Beat name and version. For released +# The URL from where to download the dashboard archive. By default, this URL +# has a value that is computed based on the Beat name and version. For released # versions, this URL points to the dashboard archive on the artifacts.elastic.co # website. #setup.dashboards.url: @@ -262,7 +262,7 @@ processors: #logging.level: debug # At debug level, you can selectively enable logging only for some components. -# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# To enable all selectors, use ["*"]. Examples of other selectors are "beat", # "publisher", "service". #logging.selectors: ["*"] @@ -280,7 +280,7 @@ processors: #monitoring.cluster_uuid: # Uncomment to send the metrics to Elasticsearch. Most settings from the -# Elasticsearch output are accepted here as well. +# Elasticsearch outputs are accepted here as well. # Note that the settings should point to your Elasticsearch *monitoring* cluster. # Any setting that is not set is automatically inherited from the Elasticsearch # output configuration, so if you have the Elasticsearch output configured such diff --git a/x-pack/winlogbeat/module/powershell/ingest/powershell.yml b/x-pack/winlogbeat/module/powershell/ingest/powershell.yml index 2c3a3a3e85c..34f537bce36 100644 --- a/x-pack/winlogbeat/module/powershell/ingest/powershell.yml +++ b/x-pack/winlogbeat/module/powershell/ingest/powershell.yml @@ -232,7 +232,7 @@ processors: field: param3 source: |- def parseRawDetail(String raw) { - Pattern detailRegex = /^([^:(]+)\((.+)\)\:\s*(.+)?$/; + Pattern detailRegex = /^([^:(]+)\(([^)]+)\)\:\s*(.+)?$/; Pattern parameterBindingRegex = /name\=(.+);\s*value\=(.+)$/; def matcher = detailRegex.matcher(raw); diff --git a/x-pack/winlogbeat/module/powershell/ingest/powershell_operational.yml b/x-pack/winlogbeat/module/powershell/ingest/powershell_operational.yml index cb7830ed304..db36f6aed99 100644 --- a/x-pack/winlogbeat/module/powershell/ingest/powershell_operational.yml +++ b/x-pack/winlogbeat/module/powershell/ingest/powershell_operational.yml @@ -284,7 +284,7 @@ processors: field: Payload source: |- def parseRawDetail(String raw) { - Pattern detailRegex = /^(.+)\((.+)\)\:\s*(.+)?$/; + Pattern detailRegex = /^([^:(]+)\(([^)]+)\)\:\s*(.+)?$/; Pattern parameterBindingRegex = /name\=(.+);\s*value\=(.+)$/; def matcher = detailRegex.matcher(raw); diff --git a/x-pack/winlogbeat/winlogbeat.reference.yml b/x-pack/winlogbeat/winlogbeat.reference.yml index a375f22540a..3c6799e7329 100644 --- a/x-pack/winlogbeat/winlogbeat.reference.yml +++ b/x-pack/winlogbeat/winlogbeat.reference.yml @@ -60,10 +60,10 @@ winlogbeat.event_logs: # The name of the shipper that publishes the network data. It can be used to group # all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. +# If this option is not defined, the hostname is used. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. Tags make it easy to group servers by different # logical properties. #tags: ["service-X", "web-tier"] @@ -75,7 +75,7 @@ winlogbeat.event_logs: # env: staging # If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields +# fields in the output document instead of being grouped under a field # sub-dictionary. Default is false. #fields_under_root: false @@ -87,7 +87,7 @@ winlogbeat.event_logs: #queue: # Queue type by name (default 'mem') # The memory queue will present all available events (up to the outputs - # bulk_max_size) to the output, the moment the output is ready to server + # bulk_max_size) to the output, the moment the output is ready to serve # another batch of events. #mem: # Max number of events the queue can buffer. @@ -139,7 +139,7 @@ winlogbeat.event_logs: # length of its retry interval each time, up to this maximum. #max_retry_interval: 30s -# Sets the maximum number of CPUs that can be executing simultaneously. The +# Sets the maximum number of CPUs that can be executed simultaneously. The # default is the number of logical CPUs available in the system. #max_procs: @@ -260,7 +260,7 @@ winlogbeat.event_logs: # ignore_missing: false # fail_on_error: true # -# The following example copies the value of message to message_copied +# The following example copies the value of the message to message_copied # #processors: # - copy_fields: @@ -270,7 +270,7 @@ winlogbeat.event_logs: # fail_on_error: true # ignore_missing: false # -# The following example truncates the value of message to 1024 bytes +# The following example truncates the value of the message to 1024 bytes # #processors: # - truncate_fields: @@ -367,7 +367,7 @@ output.elasticsearch: # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. #index: "winlogbeat-%{[agent.version]}" - # Optional ingest pipeline. By default no pipeline will be used. + # Optional ingest pipeline. By default, no pipeline will be used. #pipeline: "" # Optional HTTP path @@ -1098,14 +1098,14 @@ output.elasticsearch: # These settings control loading the sample dashboards to the Kibana index. Loading # the dashboards are disabled by default and can be enabled either by setting the -# options here, or by using the `-setup` CLI flag or the `setup` command. +# options here or by using the `-setup` CLI flag or the `setup` command. #setup.dashboards.enabled: false # The directory from where to read the dashboards. The default is the `kibana` # folder in the home path. #setup.dashboards.directory: ${path.home}/kibana -# The URL from where to download the dashboards archive. It is used instead of +# The URL from where to download the dashboard archive. It is used instead of # the directory if it has a value. #setup.dashboards.url: @@ -1202,7 +1202,7 @@ setup.template.settings: # Configure index lifecycle management (ILM) to manage the backing indices # of your data streams. -# Enable ILM support. Valid values are true, false. +# Enable ILM support. Valid values are true, or false. #setup.ilm.enabled: true # Set the lifecycle policy name. The default policy name is @@ -1357,25 +1357,25 @@ logging.files: # The name of the files where the logs are written to. #name: winlogbeat - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. #rotateeverybytes: 10485760 # = 10MB - # Number of rotated log files to keep. Oldest files will be deleted first. + # Number of rotated log files to keep. The oldest files will be deleted first. #keepfiles: 7 # The permissions mask to apply when rotating log files. The default value is 0600. # Must be a valid Unix-style file permissions mask expressed in octal notation. #permissions: 0600 - # Enable log file rotation on time intervals in addition to size-based rotation. + # Enable log file rotation on time intervals in addition to the size-based rotation. # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h # are boundary-aligned with minutes, hours, days, weeks, months, and years as # reported by the local system clock. All other intervals are calculated from the # Unix epoch. Defaults to disabled. #interval: 0 - # Rotate existing logs on startup rather than appending to the existing + # Rotate existing logs on startup rather than appending them to the existing # file. Defaults to true. # rotateonstartup: true @@ -1403,7 +1403,7 @@ logging.files: # Array of hosts to connect to. # Scheme and port can be left out and will be set to the default (http and 9200) - # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # In case you specify an additional path, the scheme is required: http://localhost:9200/path # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 #hosts: ["localhost:9200"] @@ -1450,7 +1450,7 @@ logging.files: # Elasticsearch after a network error. The default is 60s. #backoff.max: 60s - # Configure HTTP request timeout before failing an request to Elasticsearch. + # Configure HTTP request timeout before failing a request to Elasticsearch. #timeout: 90 # Use SSL settings for HTTPS. @@ -1547,15 +1547,15 @@ logging.files: # =============================== HTTP Endpoint ================================ -# Each beat can expose internal metrics through a HTTP endpoint. For security +# Each beat can expose internal metrics through an HTTP endpoint. For security # reasons the endpoint is disabled by default. This feature is currently experimental. -# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# Stats can be accessed through http://localhost:5066/stats. For pretty JSON output # append ?pretty to the URL. # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# The HTTP endpoint will bind to this hostname, IP address, unix socket, or named pipe. # When using IP addresses, it is recommended to only use localhost. #http.host: localhost @@ -1565,7 +1565,7 @@ logging.files: # Define which user should be owning the named pipe. #http.named_pipe.user: -# Define which the permissions that should be applied to the named pipe, use the Security +# Define which permissions should be applied to the named pipe, use the Security # Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with # `http.user`. #http.named_pipe.security_descriptor: diff --git a/x-pack/winlogbeat/winlogbeat.yml b/x-pack/winlogbeat/winlogbeat.yml index 6ae52cd3f3a..bf7d2f819eb 100644 --- a/x-pack/winlogbeat/winlogbeat.yml +++ b/x-pack/winlogbeat/winlogbeat.yml @@ -53,7 +53,7 @@ setup.template.settings: # all the transactions sent by a single shipper in the web interface. #name: -# The tags of the shipper are included in their own field with each +# The tags of the shipper are included in their field with each # transaction published. #tags: ["service-X", "web-tier"] @@ -68,8 +68,8 @@ setup.template.settings: # options here or by using the `setup` command. #setup.dashboards.enabled: false -# The URL from where to download the dashboards archive. By default this URL -# has a value which is computed based on the Beat name and version. For released +# The URL from where to download the dashboard archive. By default, this URL +# has a value that is computed based on the Beat name and version. For released # versions, this URL points to the dashboard archive on the artifacts.elastic.co # website. #setup.dashboards.url: @@ -152,7 +152,7 @@ processors: #logging.level: debug # At debug level, you can selectively enable logging only for some components. -# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# To enable all selectors, use ["*"]. Examples of other selectors are "beat", # "publisher", "service". #logging.selectors: ["*"] @@ -170,7 +170,7 @@ processors: #monitoring.cluster_uuid: # Uncomment to send the metrics to Elasticsearch. Most settings from the -# Elasticsearch output are accepted here as well. +# Elasticsearch outputs are accepted here as well. # Note that the settings should point to your Elasticsearch *monitoring* cluster. # Any setting that is not set is automatically inherited from the Elasticsearch # output configuration, so if you have the Elasticsearch output configured such