diff --git a/.buildkite/auditbeat/auditbeat-pipeline.yml b/.buildkite/auditbeat/auditbeat-pipeline.yml index e083df17749..b65044e7344 100644 --- a/.buildkite/auditbeat/auditbeat-pipeline.yml +++ b/.buildkite/auditbeat/auditbeat-pipeline.yml @@ -3,7 +3,6 @@ name: "beats-auditbeat" env: AWS_ARM_INSTANCE_TYPE: "m6g.xlarge" - AWS_IMAGE_UBUNTU_ARM_64: "platform-ingest-beats-ubuntu-2204-aarch64" GCP_DEFAULT_MACHINE_TYPE: "c2d-highcpu-8" GCP_HI_PERF_MACHINE_TYPE: "c2d-highcpu-16" @@ -12,7 +11,12 @@ env: IMAGE_MACOS_ARM: "generic-13-ventura-arm" IMAGE_MACOS_X86_64: "generic-13-ventura-x64" IMAGE_RHEL9: "family/platform-ingest-beats-rhel-9" - IMAGE_UBUNTU_X86_64: "family/platform-ingest-beats-ubuntu-2204" + IMAGE_UBUNTU_2004_X86_64: "family/platform-ingest-beats-ubuntu-2004" + IMAGE_UBUNTU_2004_ARM64: "platform-ingest-beats-ubuntu-2004-aarch64" + IMAGE_UBUNTU_2204_X86_64: "family/platform-ingest-beats-ubuntu-2204" + IMAGE_UBUNTU_2204_ARM64: "platform-ingest-beats-ubuntu-2204-aarch64" + IMAGE_UBUNTU_2404_X86_64: "family/platform-ingest-beats-ubuntu-2404" + IMAGE_UBUNTU_2404_ARM64: "platform-ingest-beats-ubuntu-2404-aarch64" IMAGE_WIN_10: "family/platform-ingest-beats-windows-10" IMAGE_WIN_11: "family/platform-ingest-beats-windows-11" IMAGE_WIN_2016: "family/platform-ingest-beats-windows-2016" @@ -81,7 +85,7 @@ steps: - limit: 1 agents: provider: "gcp" - image: "${IMAGE_UBUNTU_X86_64}" + image: "${IMAGE_UBUNTU_2204_X86_64}" machineType: "${GCP_DEFAULT_MACHINE_TYPE}" artifact_paths: - "auditbeat/build/*.xml" @@ -181,7 +185,7 @@ steps: - limit: 1 agents: provider: "gcp" - image: "${IMAGE_UBUNTU_X86_64}" + image: "${IMAGE_UBUNTU_2204_X86_64}" machineType: "${GCP_HI_PERF_MACHINE_TYPE}" notify: - github_commit_status: @@ -192,7 +196,7 @@ steps: if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*(macOS|arm|integrations).*/ steps: - - label: ":ubuntu: Auditbeat: Ubuntu x86_64 Integration Tests" + - label: ":ubuntu: Auditbeat: Ubuntu x86_64 Integration Tests -- {{matrix.image}}" key: "auditbeat-extended-integ-tests" if: build.env("GITHUB_PR_LABELS") =~ /.*integrations.*/ command: | @@ -204,7 +208,7 @@ steps: - limit: 1 agents: provider: "gcp" - image: "${IMAGE_UBUNTU_X86_64}" + image: "{{matrix.image}}" machineType: "${GCP_DEFAULT_MACHINE_TYPE}" artifact_paths: - "auditbeat/build/*.xml" @@ -218,8 +222,14 @@ steps: notify: - github_commit_status: context: "auditbeat: Ubuntu x86_64 Integration Tests" + matrix: + setup: + image: + - "${IMAGE_UBUNTU_2004_X86_64}" + - "${IMAGE_UBUNTU_2204_X86_64}" + - "${IMAGE_UBUNTU_2404_X86_64}" - - label: ":ubuntu: Auditbeat: Ubuntu arm64 Integration Tests" + - label: ":ubuntu: Auditbeat: Ubuntu arm64 Integration Tests -- {{matrix.image}}" key: "auditbeat-extended-arm64-integ-tests" if: build.env("GITHUB_PR_LABELS") =~ /.*integrations.*/ command: | @@ -231,7 +241,7 @@ steps: - limit: 1 agents: provider: "aws" - imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}" + imagePrefix: "{{matrix.image}}" instanceType: "${AWS_ARM_INSTANCE_TYPE}" artifact_paths: - "auditbeat/build/*.xml" @@ -245,6 +255,12 @@ steps: notify: - github_commit_status: context: "auditbeat: Ubuntu arm64 Integration Tests" + matrix: + setup: + image: + - "${IMAGE_UBUNTU_2004_ARM64}" + - "${IMAGE_UBUNTU_2204_ARM64}" + - "${IMAGE_UBUNTU_2404_ARM64}" - label: ":ubuntu: Auditbeat: Ubuntu arm64 Unit Tests" key: "auditbeat-extended-arm64-unit-tests" @@ -258,7 +274,7 @@ steps: - limit: 1 agents: provider: "aws" - imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}" + imagePrefix: "${IMAGE_UBUNTU_2204_ARM64}" instanceType: "${AWS_ARM_INSTANCE_TYPE}" artifact_paths: - "auditbeat/build/*.xml" @@ -436,7 +452,7 @@ steps: timeout_in_minutes: 20 agents: provider: gcp - image: "${IMAGE_UBUNTU_X86_64}" + image: "${IMAGE_UBUNTU_2204_X86_64}" machineType: "${GCP_HI_PERF_MACHINE_TYPE}" notify: - github_commit_status: @@ -457,7 +473,7 @@ steps: timeout_in_minutes: 20 agents: provider: "aws" - imagePrefix: "${AWS_IMAGE_UBUNTU_ARM_64}" + imagePrefix: "${IMAGE_UBUNTU_2204_ARM64}" instanceType: "${AWS_ARM_INSTANCE_TYPE}" notify: - github_commit_status: diff --git a/.buildkite/filebeat/filebeat-pipeline.yml b/.buildkite/filebeat/filebeat-pipeline.yml index 2b58709b213..46720357074 100644 --- a/.buildkite/filebeat/filebeat-pipeline.yml +++ b/.buildkite/filebeat/filebeat-pipeline.yml @@ -25,7 +25,7 @@ env: # Integration Tests K8S_VERSION: "v1.31.0" - ASDF_KIND_VERSION: "0.20.0" + ASDF_KIND_VERSION: "0.24.0" # Unit tests RACE_DETECTOR: "true" @@ -111,6 +111,9 @@ steps: artifact_paths: - "filebeat/build/*.xml" - "filebeat/build/*.json" + - "filebeat/build/integration-tests/*" + - "filebeat/build/integration-tests/Test*/*" + - "filebeat/build/integration-tests/Test*/data/**/*" plugins: - test-collector#v1.10.2: files: "filebeat/build/TEST-*.xml" diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command index 5718d97879e..b2b165b76b6 100644 --- a/.buildkite/hooks/pre-command +++ b/.buildkite/hooks/pre-command @@ -20,6 +20,7 @@ fi if [[ "$BUILDKITE_PIPELINE_SLUG" == "auditbeat" || \ "$BUILDKITE_PIPELINE_SLUG" == "beats-libbeat" || \ + "$BUILDKITE_PIPELINE_SLUG" == "beats-macos-tests" || \ "$BUILDKITE_PIPELINE_SLUG" == "beats-metricbeat" || \ "$BUILDKITE_PIPELINE_SLUG" == "beats-packetbeat" || \ "$BUILDKITE_PIPELINE_SLUG" == "beats-xpack-agentbeat" || \ diff --git a/.buildkite/macos-tests-pipeline.yml b/.buildkite/macos-tests-pipeline.yml new file mode 100644 index 00000000000..f910f4f119a --- /dev/null +++ b/.buildkite/macos-tests-pipeline.yml @@ -0,0 +1,576 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json + +env: + IMAGE_MACOS_ARM: "generic-13-ventura-arm" + IMAGE_MACOS_X86_64: "generic-13-ventura-x64" + + # Other deps + ASDF_MAGE_VERSION: 1.15.0 + ASDF_NODEJS_VERSION: 18.17.1 + ASDF_PYTHON_VERSION: 3.10.9 + + # Unit tests + RACE_DETECTOR: "true" + TEST_COVERAGE: "true" + + # Concurrency definition + CONCURRENCY_GROUP: "orka-concurrency-group" + CONCURRENCY_COUNT: 10 + CONCURRENCY_METHOD: eager + +steps: + - group: "Auditbeat macOS Tests" + steps: + - label: ":mac: Auditbeat: macOS x86_64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd auditbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: + - "auditbeat/build/*.xml" + - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - label: ":mac: Auditbeat: macOS arm64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd auditbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_ARM}" + artifact_paths: + - "auditbeat/build/*.xml" + - "auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - group: "Filebeat macOS Tests" + steps: + - label: ":mac: Filebeat: macOS x86_64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd filebeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: + - "filebeat/build/*.xml" + - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - label: ":mac: Filebeat: macOS arm64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd filebeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_ARM}" + artifact_paths: + - "filebeat/build/*.xml" + - "filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - group: "Heartbeat macOS Tests" + steps: + - label: ":mac: Heartbeat: macOS x86_64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd heartbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: + - "metricbeat/build/*.xml" + - "metricbeat/build/*.json" + + - label: ":mac: Heartbeat: macOS arm64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd heartbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_ARM}" + artifact_paths: + - "metricbeat/build/*.xml" + - "metricbeat/build/*.json" + + - group: "Metricbeat macOS Tests" + steps: + - label: ":mac: Metricbeat: macOS x64_64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd metricbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: + - "metricbeat/build/*.xml" + - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - label: ":mac: Metricbeat: macOS arm64 Unit Tests" + skip: "Skipping due to elastic/beats#33035" + # https://github.com/elastic/beats/issues/33035 + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd metricbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_ARM}" + artifact_paths: + - "metricbeat/build/*.xml" + - "metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - group: "Packetbeat macOS Tests" + steps: + - label: ":mac: Packetbeat: macOS x86_64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd packetbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: + - "packetbeat/build/*.xml" + - "packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - label: ":mac: Packetbeat: macOS arm64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd packetbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_ARM}" + artifact_paths: + - "packetbeat/build/*.xml" + - "packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - group: "x-pack/auditbeat macOS Tests" + steps: + - label: ":mac: x-pack/auditbeat: macOS x86_64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd x-pack/auditbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: + - "x-pack/auditbeat/build/*.xml" + - "x-pack/auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - label: ":mac: x-pack/auditbeat: macOS arm64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd x-pack/auditbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_ARM}" + artifact_paths: + - "x-pack/auditbeat/build/*.xml" + - "x-pack/auditbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/auditbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - group: "x-pack/filebeat macOS Tests" + steps: + - label: ":mac: x-pack/filebeat: macOS x86_64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd x-pack/filebeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: + - "x-pack/filebeat/build/*.xml" + - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - label: ":mac: x-pack/filebeat: macOS arm64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd x-pack/filebeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_ARM}" + artifact_paths: + - "x-pack/filebeat/build/*.xml" + - "x-pack/filebeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/filebeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - group: "x-pack/heartbeat macOS Tests" + steps: + - label: ":mac: x-pack/heartbeat: macOS x86_64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + withNodeJSEnv $ASDF_NODEJS_VERSION + installNodeJsDependencies + cd x-pack/heartbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: + - "x-pack/heartbeat/build/*.xml" + - "x-pack/heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - label: ":mac: x-pack/heartbeat: macOS arm64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + withNodeJSEnv $ASDF_NODEJS_VERSION + installNodeJsDependencies + cd x-pack/heartbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_ARM}" + artifact_paths: + - "x-pack/heartbeat/build/*.xml" + - "x-pack/heartbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/heartbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - group: "x-pack/metricbeat macOS Tests" + steps: + - label: ":mac: x-pack/metricbeat: macOS x86_64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd x-pack/metricbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: + - "x-pack/metricbeat/build/*.xml" + - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - label: ":mac: x-pack/metricbeat: macOS arm64 Unit Tests" + skip: "Skipping due to elastic/beats#33036 & elastic/beats#40496" + # https://github.com/elastic/beats/issues/33036 https://github.com/elastic/beats/issues/40496 + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd x-pack/metricbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_ARM}" + artifact_paths: + - "x-pack/metricbeat/build/*.xml" + - "x-pack/metricbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/metricbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - group: "x-pack/osquerybeat macOS Tests" + steps: + - label: ":mac: x-pack/osquerybeat: macOS x86_64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd x-pack/osquerybeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: + - "x-pack/osquerybeat/build/*.xml" + - "x-pack/osquerybeat/build/*.json" + + - label: ":mac: x-pack/osquerybeat: macOS arm64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd x-pack/osquerybeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_ARM}" + artifact_paths: + - "x-pack/osquerybeat/build/*.xml" + - "x-pack/osquerybeat/build/*.json" + + - group: "x-pack/packetbeat macOS Tests" + steps: + - label: ":mac: x-pack/packetbeat: macOS x86_64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd x-pack/packetbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_X86_64}" + artifact_paths: + - "x-pack/packetbeat/build/*.xml" + - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true + + - label: ":mac: x-pack/packetbeat: macOS arm64 Unit Tests" + command: | + set -euo pipefail + source .buildkite/scripts/install_macos_tools.sh + cd x-pack/packetbeat + mage build unitTest + concurrency_group: "${CONCURRENCY_GROUP}" + concurrency: "${CONCURRENCY_COUNT}" + concurrency_method: "${CONCURRENCY_METHOD}" + retry: + automatic: + - limit: 3 # using higher retries for now due to lack of custom vm images and vendor instability + agents: + provider: "orka" + imagePrefix: "${IMAGE_MACOS_ARM}" + artifact_paths: + - "x-pack/packetbeat/build/*.xml" + - "x-pack/packetbeat/build/*.json" + plugins: + - test-collector#v1.10.2: + files: "x-pack/packetbeat/build/TEST-*.xml" + format: "junit" + branches: "main" + debug: true diff --git a/.buildkite/metricbeat/pipeline.yml b/.buildkite/metricbeat/pipeline.yml index 1a9dab4a2f9..63c1870c158 100644 --- a/.buildkite/metricbeat/pipeline.yml +++ b/.buildkite/metricbeat/pipeline.yml @@ -22,7 +22,7 @@ env: # Integration Tests K8S_VERSION: "v1.31.0" - ASDF_KIND_VERSION: "0.20.0" + ASDF_KIND_VERSION: "0.24.0" # Module Tests BEAT_PATH: "metricbeat" diff --git a/.buildkite/packaging.pipeline.yml b/.buildkite/packaging.pipeline.yml index 67cdfa36326..07296d3bc3c 100644 --- a/.buildkite/packaging.pipeline.yml +++ b/.buildkite/packaging.pipeline.yml @@ -88,7 +88,8 @@ steps: env: PLATFORMS: "${PLATFORMS}" SNAPSHOT: true - DEV: true + # packaging with `DEV=true` may cause linker issues while crosscompiling https://github.com/elastic/beats/issues/41270 + DEV: false command: ".buildkite/scripts/packaging/package-dra.sh {{matrix}}" agents: provider: gcp @@ -122,7 +123,8 @@ steps: PLATFORMS: "${PLATFORMS_ARM}" PACKAGES: "docker" SNAPSHOT: true - DEV: true + # packaging with `DEV=true` may cause linker issues while crosscompiling https://github.com/elastic/beats/issues/41270 + DEV: false command: ".buildkite/scripts/packaging/package-dra.sh {{matrix}}" agents: provider: "aws" @@ -152,7 +154,8 @@ steps: env: PLATFORMS: "${PLATFORMS}" SNAPSHOT: true - DEV: true + # packaging with `DEV=true` may cause linker issues while crosscompiling https://github.com/elastic/beats/issues/41270 + DEV: false command: ".buildkite/scripts/packaging/package-dra.sh x-pack/agentbeat" agents: provider: gcp diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 49b9f58a4b6..8f060dc48cf 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -68,6 +68,7 @@ CHANGELOG* /libbeat/processors/dns/ @elastic/sec-deployment-and-devices /libbeat/processors/registered_domain/ @elastic/sec-deployment-and-devices /libbeat/processors/syslog/ @elastic/sec-deployment-and-devices +/libbeat/processors/translate_ldap_attribute/ @elastic/sec-windows-platform /libbeat/processors/translate_sid/ @elastic/sec-windows-platform /libbeat/reader/syslog/ @elastic/sec-deployment-and-devices /libbeat/scripts @elastic/ingest-eng-prod diff --git a/.golangci.yml b/.golangci.yml index 52c4fb79720..936215ea909 100755 --- a/.golangci.yml +++ b/.golangci.yml @@ -30,6 +30,9 @@ issues: - text: "imported and not used" linters: - typecheck + - text: "previous case" + linters: + - typecheck # From mage we are printing to the console to ourselves - path: (.*magefile.go|.*dev-tools/mage/.*) linters: forbidigo diff --git a/CHANGELOG-developer.next.asciidoc b/CHANGELOG-developer.next.asciidoc index 01a7205e713..0998beb9f7d 100644 --- a/CHANGELOG-developer.next.asciidoc +++ b/CHANGELOG-developer.next.asciidoc @@ -106,6 +106,8 @@ The list below covers the major changes between 7.0.0-rc2 and main only. - Close connections properly in Filbeat's HTTPJSON input. {pull}39790[39790] - Add the Offset property to libbeat/reader.Message to store the total number of bytes read and discarded before generating the message. This enables inputs to accurately determine how much data has been read up to the message, using Message.Bytes + Message.Offset. {pull}39873[39873] {issue}39653[39653] - AWS CloudWatch Metrics record previous endTime to use for next collection period and change log.logger from cloudwatch to aws.cloudwatch. {pull}40870[40870] +- Fix flaky test in cel and httpjson inputs of filebeat. {issue}40503[40503] {pull}41358[41358] +- Fix documentation and implementation of raw message handling in Filebeat http_endpoint by removing it. {pull}41498[41498] ==== Added diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 449bc30dd52..5c273f7bfb1 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -7,6 +7,12 @@ === Beats version 8.15.3 https://github.com/elastic/beats/compare/v8.15.2\...v8.15.3[View commits] +==== Known issues + +*Affecting all Beats* + +- Memory usage is not correctly limited by the number of events actively in the memory queue, but rather the maximum size of the memory queue regardless of usage. {issue}41355[41355] + ==== Breaking changes *Filebeat* @@ -38,6 +44,7 @@ https://github.com/elastic/beats/compare/v8.15.2\...v8.15.3[View commits] *Affecting all Beats* - Update Go version to 1.22.8. {pull}41139[41139] +- Add kafka compression support for ZSTD. *Metricbeat* @@ -54,6 +61,12 @@ https://github.com/elastic/beats/compare/v8.15.0\...v8.15.2[View commits] - Beats Docker images do not log to stderr by default. The workaround is to pass the CLI flag `-e` or to set `logging.to_stderr: true` in the configuration file. {issue}41118[41118] +==== Known issues + +*Affecting all Beats* + +- Memory usage is not correctly limited by the number of events actively in the memory queue, but rather the maximum size of the memory queue regardless of usage. {issue}41355[41355] + ==== Bugfixes *Affecting all Beats* @@ -72,7 +85,9 @@ https://github.com/elastic/beats/compare/v8.15.0\...v8.15.1[View commits] *Affecting all Beats* -- Beats Docker images do not log to stderr by default. The workaround is to pass the CLI flag `-e` or to set `logging.to_stderr: true` in the configuration file. {issue}41118[41118] +- Beats Docker images do not log to stderr by default. The workaround is to pass the CLI flag `-e` or to set `logging.to_stderr: true` in the configuration file. +- Beats stop publishing data after a network error unless restarted. Avoid upgrading to 8.15.1. Affected Beats log `Get \"https://${ELASTICSEARCH_HOST}:443\": context canceled` repeatedly. {issue}40705{40705} +- Memory usage is not correctly limited by the number of events actively in the memory queue, but rather the maximum size of the memory queue regardless of usage. {issue}41355[41355] ==== Bugfixes @@ -129,6 +144,7 @@ https://github.com/elastic/beats/compare/v8.14.3\...v8.15.0[View commits] *Filebeat* - The Azure EventHub input in Filebeat is not found when running on Windows. Please refrain from upgrading to 8.15. See {issue}40608[40608] for details. +- Memory usage is not correctly limited by the number of events actively in the memory queue, but rather the maximum size of the memory queue regardless of usage. {issue}41355[41355] ==== Breaking changes @@ -464,7 +480,7 @@ https://github.com/elastic/beats/compare/v8.13.2\...v8.13.3[View commits] *Affecting all Beats* -- Update Go version to 1.21.9. {pulk}38727[38727] +- Update Go version to 1.21.9. {pull}38727[38727] - The environment variable `BEATS_ADD_CLOUD_METADATA_PROVIDERS` overrides configured/default `add_cloud_metadata` providers. {pull}38669[38669] *Auditbeat* diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index bac0ca314e8..05345fb5ec0 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -14,6 +14,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Beats won't log start up information when running under the Elastic Agent {40390}40390[40390] - Filebeat now needs `dup3`, `faccessat2`, `prctl` and `setrlimit` syscalls to run the journald input. If this input is not being used, the syscalls are not needed. All Beats have those syscalls allowed now because the default seccomp policy is global to all Beats. {pull}40061[40061] - Beats will rate limit the logs about errors when indexing events on Elasticsearch, logging a summary every 10s. The logs sent to the event log is unchanged. {issue}40157[40157] +- Drop support for Debian 10 and upgrade statically linked glibc from 2.28 to 2.31 {pull}41402[41402] *Auditbeat* @@ -47,8 +48,8 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Change log.file.path field in awscloudwatch input to nested object. {pull}41099[41099] - Remove deprecated awscloudwatch field from Filebeat. {pull}41089[41089] - The performance of ingesting SQS data with the S3 input has improved by up to 60x for queues with many small events. `max_number_of_messages` config for SQS mode is now ignored, as the new design no longer needs a manual cap on messages. Instead, use `number_of_workers` to scale ingestion rate in both S3 and SQS modes. The increased efficiency may increase network bandwidth consumption, which can be throttled by lowering `number_of_workers`. It may also increase number of events stored in memory, which can be throttled by lowering the configured size of the internal queue. {pull}40699[40699] -- System module events now contain `input.type: systemlogs` instead of `input.type: log` when harvesting log files. {pull}41061[41061] +- Add kafka compression support for ZSTD. *Heartbeat* @@ -81,6 +82,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Winlogbeat* - Add "event.category" and "event.type" to Sysmon module for EventIDs 8, 9, 19, 20, 27, 28, 255 {pull}35193[35193] +- Fix truncated windows event log message {pull}41327[41327] *Functionbeat* @@ -106,6 +108,9 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Support Elastic Agent control protocol chunking support {pull}37343[37343] - Lower logging level to debug when attempting to configure beats with unknown fields from autodiscovered events/environments {pull}[37816][37816] - Set timeout of 1 minute for FQDN requests {pull}37756[37756] +- Fix issue where old data could be saved in the memory queue after acknowledgment, increasing memory use {pull}41356[41356] +- Ensure Elasticsearch output can always recover from network errors {pull}40794[40794] +- Add `translate_ldap_attribute` processor. {pull}41472[41472] *Auditbeat* @@ -164,6 +169,10 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Fixed failed job handling and removed false-positive error logs in the GCS input. {pull}41142[41142] - Bump github.com/elastic/go-sfdc dependency used by x-pack/filebeat/input/salesforce. {pull}41192[41192] - Log bad handshake details when websocket connection fails {pull}41300[41300] +- Improve modification time handling for entities and entity deletion logic in the Active Directory entityanalytics input. {pull}41179[41179] +- Journald input now can read events from all boots {issue}41083[41083] {pull}41244[41244] +- Fix double encoding of client_secret in the Entity Analytics input's Azure Active Directory provider {pull}41393[41393] +- Fix errors in SQS host resolution in the `aws-s3` input when using custom (non-AWS) endpoints. {pull}41504[41504] *Heartbeat* @@ -196,6 +205,8 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Use namespace for GetListMetrics when exists in AWS {pull}41022[41022] - Fix http server helper SSL config. {pull}39405[39405] - Fix Kubernetes metadata sometimes not being present after startup {pull}41216[41216] +- Do not report non-existant 0 values for RSS metrics in docker/memory {pull}41449[41449] + *Osquerybeat* @@ -227,6 +238,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Update to Go 1.22.7. {pull}41018[41018] - Replace Ubuntu 20.04 with 24.04 for Docker base images {issue}40743[40743] {pull}40942[40942] - Reduce memory consumption of k8s autodiscovery and the add_kubernetes_metadata processor when Deployment metadata is enabled +- Add `lowercase` processor. {issue}22254[22254] {pull}41424[41424] *Auditbeat* @@ -237,7 +249,6 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Auditbeat* - *Auditbeat* @@ -316,9 +327,10 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Add CSV decoding capacity to gcs input {pull}40979[40979] - Add support to source AWS cloudwatch logs from linked accounts. {pull}41188[41188] - Jounrald input now supports filtering by facilities {pull}41061[41061] -- System module now supports reading from jounrald. {pull}41061[41061] - Add support to include AWS cloudwatch linked accounts when using log_group_name_prefix to define log group names. {pull}41206[41206] - Improved Azure Blob Storage input documentation. {pull}41252[41252] +- Make ETW input GA. {pull}41389[41389] +- Add support for Okta entity analytics provider to collect role and factor data for users. {pull}41460[41460] *Auditbeat* @@ -353,6 +365,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Add new metricset cluster for the vSphere module. {pull}40536[40536] - Add new metricset network for the vSphere module. {pull}40559[40559] - Add new metricset resourcepool for the vSphere module. {pull}40456[40456] +- Add AWS Cloudwatch capability to retrieve tags from AWS/ApiGateway resources {pull}40755[40755] - Add new metricset datastorecluster for vSphere module. {pull}40634[40634] - Add support for new metrics in datastorecluster metricset. {pull}40694[40694] - Add new metrics for the vSphere Virtualmachine metricset. {pull}40485[40485] @@ -379,6 +392,8 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Winlogbeat* +- Add handling for missing `EvtVarType`s in experimental api. {issue}19337[19337] {pull}41418[41418] + *Functionbeat* diff --git a/NOTICE.txt b/NOTICE.txt index b5df79133f7..f33fb7667c4 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -5063,11 +5063,11 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-lambda-go@v1.44 -------------------------------------------------------------------------------- Dependency : github.com/aws/aws-sdk-go-v2 -Version: v1.30.4 +Version: v1.30.5 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2@v1.30.4/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2@v1.30.5/LICENSE.txt: Apache License @@ -6122,12 +6122,12 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/featu -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/service/cloudformation -Version: v1.53.5 +Dependency : github.com/aws/aws-sdk-go-v2/service/apigateway +Version: v1.25.8 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/cloudformation@v1.53.5/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/apigateway@v1.25.8/LICENSE.txt: Apache License @@ -6334,12 +6334,12 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/servi -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/service/cloudwatch -Version: v1.40.5 +Dependency : github.com/aws/aws-sdk-go-v2/service/apigatewayv2 +Version: v1.22.8 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/cloudwatch@v1.40.5/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/apigatewayv2@v1.22.8/LICENSE.txt: Apache License @@ -6546,12 +6546,12 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/servi -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs -Version: v1.37.5 +Dependency : github.com/aws/aws-sdk-go-v2/service/cloudformation +Version: v1.53.5 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs@v1.37.5/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/cloudformation@v1.53.5/LICENSE.txt: Apache License @@ -6758,12 +6758,12 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/servi -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/service/costexplorer -Version: v1.40.4 +Dependency : github.com/aws/aws-sdk-go-v2/service/cloudwatch +Version: v1.40.5 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/costexplorer@v1.40.4/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/cloudwatch@v1.40.5/LICENSE.txt: Apache License @@ -6970,12 +6970,12 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/servi -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/service/ec2 -Version: v1.176.0 +Dependency : github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs +Version: v1.37.5 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/ec2@v1.176.0/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs@v1.37.5/LICENSE.txt: Apache License @@ -7182,12 +7182,12 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/servi -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 -Version: v1.34.2 +Dependency : github.com/aws/aws-sdk-go-v2/service/costexplorer +Version: v1.40.4 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2@v1.34.2/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/costexplorer@v1.40.4/LICENSE.txt: Apache License @@ -7394,12 +7394,12 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/servi -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/service/health -Version: v1.26.4 +Dependency : github.com/aws/aws-sdk-go-v2/service/ec2 +Version: v1.176.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/health@v1.26.4/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/ec2@v1.176.0/LICENSE.txt: Apache License @@ -7606,12 +7606,12 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/servi -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/service/iam -Version: v1.35.0 +Dependency : github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 +Version: v1.34.2 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/iam@v1.35.0/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2@v1.34.2/LICENSE.txt: Apache License @@ -7818,12 +7818,12 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/servi -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/service/kinesis -Version: v1.29.5 +Dependency : github.com/aws/aws-sdk-go-v2/service/health +Version: v1.26.4 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/kinesis@v1.29.5/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/health@v1.26.4/LICENSE.txt: Apache License @@ -8030,12 +8030,12 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/servi -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/service/organizations -Version: v1.30.3 +Dependency : github.com/aws/aws-sdk-go-v2/service/iam +Version: v1.35.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/organizations@v1.30.3/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/iam@v1.35.0/LICENSE.txt: Apache License @@ -8242,12 +8242,436 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/servi -------------------------------------------------------------------------------- -Dependency : github.com/aws/aws-sdk-go-v2/service/rds -Version: v1.82.2 +Dependency : github.com/aws/aws-sdk-go-v2/service/kinesis +Version: v1.29.5 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/rds@v1.82.2/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/kinesis@v1.29.5/LICENSE.txt: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/aws/aws-sdk-go-v2/service/organizations +Version: v1.30.3 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/organizations@v1.30.3/LICENSE.txt: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/aws/aws-sdk-go-v2/service/rds +Version: v1.82.2 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/service/rds@v1.82.2/LICENSE.txt: Apache License @@ -13019,11 +13443,11 @@ SOFTWARE -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-libs -Version: v0.12.1 +Version: v0.17.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.12.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.17.1/LICENSE: Apache License Version 2.0, January 2004 @@ -14747,23 +15171,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-quark -Version: v0.1.2 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-quark@v0.1.2/LICENSE.txt: - -Source code in this repository is licensed under the Apache License Version 2.0, -an Apache compatible license. - - --------------------------------------------------------------------------------- -Dependency : github.com/elastic/go-seccomp-bpf -Version: v1.4.0 +Version: v0.2.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-seccomp-bpf@v1.4.0/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-quark@v0.2.0/LICENSE.txt: Apache License @@ -14970,43 +15382,255 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/go-seccomp-bpf@ -------------------------------------------------------------------------------- -Dependency : github.com/elastic/go-sfdc -Version: v0.0.0-20241010131323-8e176480d727 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-sfdc@v0.0.0-20241010131323-8e176480d727/LICENSE.txt: - -MIT License - -Copyright (c) 2019 Robert Sean Justice - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - --------------------------------------------------------------------------------- -Dependency : github.com/elastic/go-structform -Version: v0.0.10 +Dependency : github.com/elastic/go-seccomp-bpf +Version: v1.4.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-structform@v0.0.10/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-seccomp-bpf@v1.4.0/LICENSE.txt: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/go-sfdc +Version: v0.0.0-20241010131323-8e176480d727 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-sfdc@v0.0.0-20241010131323-8e176480d727/LICENSE.txt: + +MIT License + +Copyright (c) 2019 Robert Sean Justice + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/go-structform +Version: v0.0.10 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-structform@v0.0.10/LICENSE: Apache License Version 2.0, January 2004 @@ -16057,6 +16681,29 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/mito@v1.15.0/LI limitations under the License. +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/mock-es +Version: v0.0.0-20240712014503-e5b47ece0015 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/mock-es@v0.0.0-20240712014503-e5b47ece0015/LICENSE: + +Copyright 2024 Elasticsearch B.V. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + -------------------------------------------------------------------------------- Dependency : github.com/elastic/tk-btf Version: v0.1.0 @@ -25581,6 +26228,43 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- +Dependency : golang.org/x/term +Version: v0.24.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/term@v0.24.0/LICENSE: + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + -------------------------------------------------------------------------------- Dependency : golang.org/x/text Version: v0.18.0 @@ -34733,11 +35417,11 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/aws/p -------------------------------------------------------------------------------- Dependency : github.com/aws/aws-sdk-go-v2/internal/configsources -Version: v1.3.16 +Version: v1.3.17 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/internal/configsources@v1.3.16/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/internal/configsources@v1.3.17/LICENSE.txt: Apache License @@ -34945,11 +35629,11 @@ Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/inter -------------------------------------------------------------------------------- Dependency : github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 -Version: v2.6.16 +Version: v2.6.17 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2@v2.6.16/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2@v2.6.17/LICENSE.txt: Apache License @@ -48374,6 +49058,36 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/mileusna/useragent +Version: v1.3.4 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/mileusna/useragent@v1.3.4/LICENSE.md: + +MIT License + +Copyright (c) 2017 Miloš Mileusnić + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + -------------------------------------------------------------------------------- Dependency : github.com/minio/asm2plan9s Version: v0.0.0-20200509001527-cdd76441f9d8 @@ -55026,6 +55740,218 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/pdat limitations under the License. +-------------------------------------------------------------------------------- +Dependency : go.opentelemetry.io/collector/pdata/testdata +Version: v0.109.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/collector/pdata/testdata@v0.109.0/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + -------------------------------------------------------------------------------- Dependency : go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc Version: v0.49.0 @@ -57052,43 +57978,6 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------------- -Dependency : golang.org/x/term -Version: v0.24.0 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/golang.org/x/term@v0.24.0/LICENSE: - -Copyright 2009 The Go Authors. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google LLC nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -------------------------------------------------------------------------------- Dependency : golang.org/x/xerrors Version: v0.0.0-20231012003039-104605ab7028 diff --git a/auditbeat/main_test.go b/auditbeat/main_test.go index f91bc1f9482..8b16fe63a58 100644 --- a/auditbeat/main_test.go +++ b/auditbeat/main_test.go @@ -24,6 +24,7 @@ import ( "testing" "github.com/elastic/beats/v7/auditbeat/cmd" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" ) @@ -34,11 +35,14 @@ func init() { systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(*testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/catalog-info.yaml b/catalog-info.yaml index 61a12a15cd6..a0eca0c2c9f 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -1149,6 +1149,49 @@ spec: everyone: access_level: BUILD_AND_READ +--- +# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/rre.schema.json +apiVersion: backstage.io/v1alpha1 +kind: Resource +metadata: + name: beats-macos-tests + description: 'Runs of Beats macOS tests' + links: + - title: Pipeline + url: https://buildkite.com/elastic/beats-macos-tests +spec: + type: buildkite-pipeline + owner: group:ingest-fp + system: buildkite + implementation: + apiVersion: buildkite.elastic.dev/v1 + kind: Pipeline + metadata: + name: beats-macos-tests + description: 'Runs of Beats macOS tests' + spec: + repository: elastic/beats + pipeline_file: ".buildkite/macos-tests-pipeline.yml" + cancel_intermediate_builds: false + provider_settings: + trigger_mode: none + env: + ELASTIC_SLACK_NOTIFICATIONS_ENABLED: 'true' + SLACK_NOTIFICATIONS_CHANNEL: '#ingest-notifications' + SLACK_NOTIFICATIONS_ON_SUCCESS: 'false' + schedules: + Weekly main: + branch: main + cronline: 0 0 * * 0 + message: Weekly build of macOS tests + teams: + ingest-fp: + access_level: MANAGE_BUILD_AND_READ + release-eng: + access_level: BUILD_AND_READ + everyone: + access_level: BUILD_AND_READ + --- # yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/rre.schema.json apiVersion: backstage.io/v1alpha1 @@ -1200,3 +1243,4 @@ spec: access_level: BUILD_AND_READ everyone: access_level: BUILD_AND_READ + diff --git a/dev-tools/mage/crossbuild.go b/dev-tools/mage/crossbuild.go index 972531c25a8..ede35e08d8a 100644 --- a/dev-tools/mage/crossbuild.go +++ b/dev-tools/mage/crossbuild.go @@ -221,11 +221,11 @@ func CrossBuildImage(platform string) (string, error) { switch { case platform == "darwin/amd64": - tagSuffix = "darwin-debian10" + tagSuffix = "darwin-debian11" case platform == "darwin/arm64": - tagSuffix = "darwin-arm64-debian10" + tagSuffix = "darwin-arm64-debian11" case platform == "darwin/universal": - tagSuffix = "darwin-arm64-debian10" + tagSuffix = "darwin-arm64-debian11" case platform == "linux/arm64": tagSuffix = "arm" case platform == "linux/armv5": @@ -235,13 +235,13 @@ func CrossBuildImage(platform string) (string, error) { case platform == "linux/armv7": tagSuffix = "armhf" case strings.HasPrefix(platform, "linux/mips"): - tagSuffix = "mips-debian10" + tagSuffix = "mips-debian11" case strings.HasPrefix(platform, "linux/ppc"): - tagSuffix = "ppc-debian10" + tagSuffix = "ppc-debian11" case platform == "linux/s390x": - tagSuffix = "s390x-debian10" + tagSuffix = "s390x-debian11" case strings.HasPrefix(platform, "linux"): - tagSuffix = "main-debian10" + tagSuffix = "main-debian11" } goVersion, err := GoVersion() diff --git a/dev-tools/notice/overrides.json b/dev-tools/notice/overrides.json index a50cac02e0f..bb82c97ebe4 100644 --- a/dev-tools/notice/overrides.json +++ b/dev-tools/notice/overrides.json @@ -19,4 +19,3 @@ {"name": "github.com/JohnCGriffin/overflow", "licenceType": "MIT"} {"name": "github.com/elastic/ebpfevents", "licenceType": "Apache-2.0"} {"name": "go.opentelemetry.io/collector/config/configopaque", "licenceType": "Apache-2.0"} -{"name": "github.com/elastic/go-quark", "licenceType": "Apache-2.0"} diff --git a/docs/devguide/contributing.asciidoc b/docs/devguide/contributing.asciidoc index fb9c23dab73..0637052b96c 100644 --- a/docs/devguide/contributing.asciidoc +++ b/docs/devguide/contributing.asciidoc @@ -102,7 +102,9 @@ recommend that you install it. === Update scripts The Beats use a variety of scripts based on Python, make and mage to generate configuration files -and documentation. The primary command used for this is: +and documentation. Ensure to use the version of python listed in the https://github.com/elastic/beats/blob/main/.python-version[.python-version] file. + +The primary command for updating generated files is: [source,shell] -------------------------------------------------------------------------------- diff --git a/filebeat/beater/filebeat.go b/filebeat/beater/filebeat.go index 9d9cb220d4e..815b6fabfde 100644 --- a/filebeat/beater/filebeat.go +++ b/filebeat/beater/filebeat.go @@ -18,6 +18,7 @@ package beater import ( + "context" "flag" "fmt" "path/filepath" @@ -195,14 +196,16 @@ func (fb *Filebeat) setupPipelineLoaderCallback(b *beat.Beat) error { overwritePipelines := true b.OverwritePipelinesCallback = func(esConfig *conf.C) error { - esClient, err := eslegclient.NewConnectedClient(esConfig, "Filebeat") + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + esClient, err := eslegclient.NewConnectedClient(ctx, esConfig, "Filebeat") if err != nil { return err } // When running the subcommand setup, configuration from modules.d directories // have to be loaded using cfg.Reloader. Otherwise those configurations are skipped. - pipelineLoaderFactory := newPipelineLoaderFactory(b.Config.Output.Config()) + pipelineLoaderFactory := newPipelineLoaderFactory(ctx, b.Config.Output.Config()) enableAllFilesets, _ := b.BeatConfig.Bool("config.modules.enable_all_filesets", -1) forceEnableModuleFilesets, _ := b.BeatConfig.Bool("config.modules.force_enable_module_filesets", -1) filesetOverrides := fileset.FilesetOverrides{ @@ -322,14 +325,6 @@ func (fb *Filebeat) Run(b *beat.Beat) error { outDone := make(chan struct{}) // outDone closes down all active pipeline connections pipelineConnector := channel.NewOutletFactory(outDone).Create - // Create a ES connection factory for dynamic modules pipeline loading - var pipelineLoaderFactory fileset.PipelineLoaderFactory - if b.Config.Output.Name() == "elasticsearch" { - pipelineLoaderFactory = newPipelineLoaderFactory(b.Config.Output.Config()) - } else { - logp.Warn(pipelinesWarning) - } - inputsLogger := logp.NewLogger("input") v2Inputs := fb.pluginFactory(b.Info, inputsLogger, stateStore) v2InputLoader, err := v2.NewLoader(inputsLogger, v2Inputs, "type", cfg.DefaultType) @@ -350,8 +345,22 @@ func (fb *Filebeat) Run(b *beat.Beat) error { compat.RunnerFactory(inputsLogger, b.Info, v2InputLoader), input.NewRunnerFactory(pipelineConnector, registrar, fb.done), )) - moduleLoader := fileset.NewFactory(inputLoader, b.Info, pipelineLoaderFactory, config.OverwritePipelines) + // Create a ES connection factory for dynamic modules pipeline loading + var pipelineLoaderFactory fileset.PipelineLoaderFactory + // The pipelineFactory needs a context to control the connections to ES, + // when the pipelineFactory/ESClient are not needed any more the context + // must be cancelled. This pipeline factory will be used by the moduleLoader + // that is run by a crawler, whenever this crawler is stopped we also cancel + // the context. + pipelineFactoryCtx, cancelPipelineFactoryCtx := context.WithCancel(context.Background()) + defer cancelPipelineFactoryCtx() + if b.Config.Output.Name() == "elasticsearch" { + pipelineLoaderFactory = newPipelineLoaderFactory(pipelineFactoryCtx, b.Config.Output.Config()) + } else { + logp.Warn(pipelinesWarning) + } + moduleLoader := fileset.NewFactory(inputLoader, b.Info, pipelineLoaderFactory, config.OverwritePipelines) crawler, err := newCrawler(inputLoader, moduleLoader, config.Inputs, fb.done, *once) if err != nil { logp.Err("Could not init crawler: %v", err) @@ -389,6 +398,7 @@ func (fb *Filebeat) Run(b *beat.Beat) error { err = crawler.Start(fb.pipeline, config.ConfigInput, config.ConfigModules) if err != nil { crawler.Stop() + cancelPipelineFactoryCtx() return fmt.Errorf("Failed to start crawler: %w", err) } @@ -444,6 +454,7 @@ func (fb *Filebeat) Run(b *beat.Beat) error { modules.Stop() adiscover.Stop() crawler.Stop() + cancelPipelineFactoryCtx() timeout := fb.config.ShutdownTimeout // Checks if on shutdown it should wait for all events to be published @@ -487,9 +498,9 @@ func (fb *Filebeat) Stop() { } // Create a new pipeline loader (es client) factory -func newPipelineLoaderFactory(esConfig *conf.C) fileset.PipelineLoaderFactory { +func newPipelineLoaderFactory(ctx context.Context, esConfig *conf.C) fileset.PipelineLoaderFactory { pipelineLoaderFactory := func() (fileset.PipelineLoader, error) { - esClient, err := eslegclient.NewConnectedClient(esConfig, "Filebeat") + esClient, err := eslegclient.NewConnectedClient(ctx, esConfig, "Filebeat") if err != nil { return nil, fmt.Errorf("Error creating Elasticsearch client: %w", err) } diff --git a/filebeat/cmd/generate.go b/filebeat/cmd/generate.go index 582038716a1..1e5a4b1a819 100644 --- a/filebeat/cmd/generate.go +++ b/filebeat/cmd/generate.go @@ -26,6 +26,7 @@ import ( "github.com/elastic/beats/v7/filebeat/generator/fields" "github.com/elastic/beats/v7/filebeat/generator/fileset" "github.com/elastic/beats/v7/filebeat/generator/module" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/common/cli" "github.com/elastic/elastic-agent-libs/paths" ) @@ -63,7 +64,9 @@ func genGenerateModuleCmd() *cobra.Command { } genModuleCmd.Flags().String("modules-path", defaultHomePath, "Path to modules directory") + cfgfile.AddAllowedBackwardsCompatibleFlag("modules-path") genModuleCmd.Flags().String("es-beats", defaultHomePath, "Path to Elastic Beats") + cfgfile.AddAllowedBackwardsCompatibleFlag("es-beats") return genModuleCmd } @@ -88,7 +91,9 @@ func genGenerateFilesetCmd() *cobra.Command { } genFilesetCmd.Flags().String("modules-path", defaultHomePath, "Path to modules directory") + cfgfile.AddAllowedBackwardsCompatibleFlag("modules-path") genFilesetCmd.Flags().String("es-beats", defaultHomePath, "Path to Elastic Beats") + cfgfile.AddAllowedBackwardsCompatibleFlag("es-beats") return genFilesetCmd } @@ -113,7 +118,9 @@ func genGenerateFieldsCmd() *cobra.Command { } genFieldsCmd.Flags().String("es-beats", defaultHomePath, "Path to Elastic Beats") + cfgfile.AddAllowedBackwardsCompatibleFlag("es-beats") genFieldsCmd.Flags().Bool("without-documentation", false, "Do not add description fields") + cfgfile.AddAllowedBackwardsCompatibleFlag("without-documentation") return genFieldsCmd } diff --git a/filebeat/cmd/root.go b/filebeat/cmd/root.go index 2420a03efdb..48e9c9d74bc 100644 --- a/filebeat/cmd/root.go +++ b/filebeat/cmd/root.go @@ -26,6 +26,7 @@ import ( "github.com/elastic/beats/v7/filebeat/fileset" "github.com/elastic/beats/v7/filebeat/include" "github.com/elastic/beats/v7/filebeat/input" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd" "github.com/elastic/beats/v7/libbeat/cmd/instance" @@ -49,7 +50,9 @@ func FilebeatSettings(moduleNameSpace string) instance.Settings { } runFlags := pflag.NewFlagSet(Name, pflag.ExitOnError) runFlags.AddGoFlag(flag.CommandLine.Lookup("once")) + cfgfile.AddAllowedBackwardsCompatibleFlag("once") runFlags.AddGoFlag(flag.CommandLine.Lookup("modules")) + cfgfile.AddAllowedBackwardsCompatibleFlag("modules") return instance.Settings{ RunFlags: runFlags, Name: Name, @@ -66,8 +69,10 @@ func FilebeatSettings(moduleNameSpace string) instance.Settings { func Filebeat(inputs beater.PluginFactory, settings instance.Settings) *cmd.BeatsRootCmd { command := cmd.GenRootCmdWithSettings(beater.New(inputs), settings) command.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("M")) + cfgfile.AddAllowedBackwardsCompatibleFlag("M") command.TestCmd.Flags().AddGoFlag(flag.CommandLine.Lookup("modules")) command.SetupCmd.Flags().AddGoFlag(flag.CommandLine.Lookup("modules")) + cfgfile.AddAllowedBackwardsCompatibleFlag("modules") command.AddCommand(cmd.GenModulesCmd(Name, "", buildModulesManager)) command.AddCommand(genGenerateCmd()) return command diff --git a/filebeat/docs/include/use-journald.asciidoc b/filebeat/docs/include/use-journald.asciidoc deleted file mode 100644 index 12cb33c0c6c..00000000000 --- a/filebeat/docs/include/use-journald.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -*`var.use_journald`*:: - -A boolean that when set to `true` will read logs from Journald. When -Journald is used all events contain the tag `journald` - -*`var.use_files`*:: - -A boolean that when set to `true` will read logs from the log files -defined by `vars.paths`. - -If neither `var.use_journald` nor `var.use_files` are set (or both are -`false`) {beatname_uc} will auto-detect the source for the logs. diff --git a/filebeat/docs/inputs/input-kafka.asciidoc b/filebeat/docs/inputs/input-kafka.asciidoc index 96836a63a38..ee0cd7842a4 100644 --- a/filebeat/docs/inputs/input-kafka.asciidoc +++ b/filebeat/docs/inputs/input-kafka.asciidoc @@ -60,6 +60,9 @@ might work as well, but are not supported. The `kafka` input supports the following configuration options plus the <<{beatname_lc}-input-{type}-common-options>> described later. +NOTE: If you're using {agent} with a Kafka input and need to increase throughput, we recommend scaling horizontally by additional {agents} to read from the Kafka topic. +Note that each {agent} reads concurrently from each of the partitions it has been assigned. + [float] [[kafka-hosts]] ===== `hosts` diff --git a/filebeat/docs/modules/system.asciidoc b/filebeat/docs/modules/system.asciidoc index 88cb1f78a1c..1866f2d5c25 100644 --- a/filebeat/docs/modules/system.asciidoc +++ b/filebeat/docs/modules/system.asciidoc @@ -23,7 +23,7 @@ include::../include/gs-link.asciidoc[] === Compatibility This module was tested with logs from OSes like Ubuntu 12.04, Centos 7, and -macOS Sierra. For Debian 12 Journald is used to read the system logs. +macOS Sierra. This module is not available for Windows. @@ -65,15 +65,11 @@ include::../include/config-option-intro.asciidoc[] include::../include/var-paths.asciidoc[] -include::../include/use-journald.asciidoc[] - [float] ==== `auth` fileset settings include::../include/var-paths.asciidoc[] -include::../include/use-journald.asciidoc[] - *`var.tags`*:: A list of tags to include in events. Including `forwarded` indicates that the diff --git a/filebeat/filebeat.reference.yml b/filebeat/filebeat.reference.yml index a1af7b861d5..14e9f276fb4 100644 --- a/filebeat/filebeat.reference.yml +++ b/filebeat/filebeat.reference.yml @@ -21,18 +21,7 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: - # Force using journald to collect system logs - #var.use_journald: true|false - - # Force using log files to collect system logs - #var.use_files: true|false - - # If use_journald and use_files are false, then - # Filebeat will autodetect whether use to journald - # to collect system logs. - - # Input configuration (advanced). - # Any input configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. #input: @@ -44,23 +33,6 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: - # Force using journald to collect system logs - #var.use_journald: true|false - - # Force using log files to collect system logs - #var.use_files: true|false - - # If use_journald and use_files are false, then - # Filebeat will autodetect whether use to journald - # to collect system logs. - - # A list of tags to include in events. Including 'forwarded' - # indicates that the events did not originate on this host and - # causes host.name to not be added to events. Include - # 'preserve_orginal_event' causes the pipeline to retain the raw log - # in event.original. Defaults to []. - #var.tags: [] - # Input configuration (advanced). Any input configuration option # can be added under this section. #input: diff --git a/filebeat/fileset/modules_integration_test.go b/filebeat/fileset/modules_integration_test.go index 0d5ad2172c0..ffb149e53b3 100644 --- a/filebeat/fileset/modules_integration_test.go +++ b/filebeat/fileset/modules_integration_test.go @@ -20,6 +20,7 @@ package fileset import ( + "context" "encoding/json" "path/filepath" "testing" @@ -268,7 +269,9 @@ func getTestingElasticsearch(t eslegtest.TestLogger) *eslegclient.Connection { conn.Encoder = eslegclient.NewJSONEncoder(nil, false) - err = conn.Connect() + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + err = conn.Connect(ctx) if err != nil { t.Fatal(err) panic(err) // panic in case TestLogger did not stop test diff --git a/filebeat/fileset/pipelines_test.go b/filebeat/fileset/pipelines_test.go index a358b0da9be..ac6aa5035de 100644 --- a/filebeat/fileset/pipelines_test.go +++ b/filebeat/fileset/pipelines_test.go @@ -20,6 +20,7 @@ package fileset import ( + "context" "net/http" "net/http/httptest" "testing" @@ -101,7 +102,9 @@ func TestLoadPipelinesWithMultiPipelineFileset(t *testing.T) { }) require.NoError(t, err) - err = testESClient.Connect() + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + err = testESClient.Connect(ctx) require.NoError(t, err) err = testRegistry.LoadPipelines(testESClient, false) diff --git a/filebeat/input/journald/config.go b/filebeat/input/journald/config.go index d354baaacf5..56f9388f503 100644 --- a/filebeat/input/journald/config.go +++ b/filebeat/input/journald/config.go @@ -38,6 +38,9 @@ var includeMatchesWarnOnce sync.Once // Config stores the options of a journald input. type config struct { + // ID is the input ID, each instance must have a unique ID + ID string `config:"id"` + // Paths stores the paths to the journal files to be read. Paths []string `config:"paths"` diff --git a/filebeat/input/journald/environment_test.go b/filebeat/input/journald/environment_test.go index 209a2e2dfd8..57f75163e92 100644 --- a/filebeat/input/journald/environment_test.go +++ b/filebeat/input/journald/environment_test.go @@ -27,7 +27,7 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" v2 "github.com/elastic/beats/v7/filebeat/input/v2" "github.com/elastic/beats/v7/libbeat/beat" @@ -107,7 +107,7 @@ func (e *inputTestingEnvironment) waitUntilEventCount(count int) { e.t.Helper() msg := strings.Builder{} fmt.Fprintf(&msg, "did not find the expected %d events", count) - assert.Eventually(e.t, func() bool { + require.Eventually(e.t, func() bool { sum := len(e.pipeline.GetAllEvents()) if sum == count { return true diff --git a/filebeat/input/journald/input.go b/filebeat/input/journald/input.go index 20e46bd0cc2..0ab3c548177 100644 --- a/filebeat/input/journald/input.go +++ b/filebeat/input/journald/input.go @@ -42,6 +42,7 @@ type journalReader interface { } type journald struct { + ID string Backoff time.Duration MaxBackoff time.Duration Since time.Duration @@ -108,6 +109,7 @@ func Configure(cfg *conf.C) ([]cursor.Source, cursor.Input, error) { } return sources, &journald{ + ID: config.ID, Since: config.Since, Seek: config.Seek, Matches: journalfield.IncludeMatches(config.Matches), @@ -124,7 +126,7 @@ func (inp *journald) Name() string { return pluginName } func (inp *journald) Test(src cursor.Source, ctx input.TestContext) error { reader, err := journalctl.New( - ctx.Logger, + ctx.Logger.With("input_id", inp.ID), ctx.Cancelation, inp.Units, inp.Identifiers, @@ -149,7 +151,9 @@ func (inp *journald) Run( cursor cursor.Cursor, publisher cursor.Publisher, ) error { - logger := ctx.Logger.With("path", src.Name()) + logger := ctx.Logger. + With("path", src.Name()). + With("input_id", inp.ID) currentCheckpoint := initCheckpoint(logger, cursor) mode := inp.Seek diff --git a/filebeat/input/journald/input_filtering_test.go b/filebeat/input/journald/input_filtering_test.go index c9ddec9c046..1aa58d1f8bc 100644 --- a/filebeat/input/journald/input_filtering_test.go +++ b/filebeat/input/journald/input_filtering_test.go @@ -274,9 +274,11 @@ func TestInputSeek(t *testing.T) { env.waitUntilEventCount(len(testCase.expectedMessages)) - for idx, event := range env.pipeline.GetAllEvents() { - if got, expected := event.Fields["message"], testCase.expectedMessages[idx]; got != expected { - t.Fatalf("expecting event message %q, got %q", expected, got) + if !t.Failed() { + for idx, event := range env.pipeline.GetAllEvents() { + if got, expected := event.Fields["message"], testCase.expectedMessages[idx]; got != expected { + t.Fatalf("expecting event message %q, got %q", expected, got) + } } } }) diff --git a/filebeat/input/journald/input_parsers_test.go b/filebeat/input/journald/input_parsers_test.go index 720f53b8ce8..c1c2c6f6bb5 100644 --- a/filebeat/input/journald/input_parsers_test.go +++ b/filebeat/input/journald/input_parsers_test.go @@ -31,31 +31,41 @@ import ( // it only tests a single parser, but that is enough to ensure // we're correctly using the parsers func TestInputParsers(t *testing.T) { - inputParsersExpected := []string{"1st line\n2nd line\n3rd line", "4th line\n5th line\n6th line"} env := newInputTestingEnvironment(t) - inp := env.mustCreateInput(mapstr.M{ - "paths": []string{path.Join("testdata", "input-multiline-parser.journal")}, - "include_matches.match": []string{"_SYSTEMD_USER_UNIT=log-service.service"}, + "paths": []string{path.Join("testdata", "ndjson-parser.journal")}, "parsers": []mapstr.M{ { - "multiline": mapstr.M{ - "type": "count", - "count_lines": 3, + "ndjson": mapstr.M{ + "target": "", }, }, }, }) ctx, cancelInput := context.WithCancel(context.Background()) + t.Cleanup(cancelInput) env.startInput(ctx, inp) - env.waitUntilEventCount(len(inputParsersExpected)) + env.waitUntilEventCount(1) + event := env.pipeline.clients[0].GetEvents()[0] + + foo, isString := event.Fields["foo"].(string) + if !isString { + t.Errorf("expecting field 'foo' to be string, got %T", event.Fields["foo"]) + } - for idx, event := range env.pipeline.clients[0].GetEvents() { - if got, expected := event.Fields["message"], inputParsersExpected[idx]; got != expected { - t.Errorf("expecting event message %q, got %q", expected, got) - } + answer, isInt := event.Fields["answer"].(int64) + if !isInt { + t.Errorf("expecting field 'answer' to be int64, got %T", event.Fields["answer"]) } - cancelInput() + // The JSON in the test journal is: '{"foo": "bar", "answer":42}' + expectedFoo := "bar" + expectedAnswer := int64(42) + if foo != expectedFoo { + t.Errorf("expecting 'foo' from the Journal JSON to be '%s' got '%s' instead", expectedFoo, foo) + } + if answer != expectedAnswer { + t.Errorf("expecting 'answer' from the Journal JSON to be '%d' got '%d' instead", expectedAnswer, answer) + } } diff --git a/filebeat/input/journald/input_test.go b/filebeat/input/journald/input_test.go index 09dd8d1a485..b82663c5262 100644 --- a/filebeat/input/journald/input_test.go +++ b/filebeat/input/journald/input_test.go @@ -39,59 +39,19 @@ import ( "github.com/elastic/elastic-agent-libs/mapstr" ) -// How to write to journal from CLI: -// https://www.baeldung.com/linux/systemd-journal-message-terminal +func TestInputCanReadAllBoots(t *testing.T) { + env := newInputTestingEnvironment(t) + cfg := mapstr.M{ + "paths": []string{path.Join("testdata", "multiple-boots.journal")}, + } + inp := env.mustCreateInput(cfg) -// TestGenerateJournalEntries generates entries in the user's journal. -// It is kept commented out at the top of the file as reference and -// easy access. -// -// How to generate a journal file with only the entries you want: -// 1. Add the dependencies for this test -// go get github.com/ssgreg/journald -// 2. Uncomment and run the test: -// 3. Add the following import: -// journaldlogger "github.com/ssgreg/journald" -// 4. Get a VM, ssh into it, make sure you can access the test from it -// 5. Find the journal file, usually at /var/log/journal//user-1000.journal -// 7. Clean and rotate the journal -// sudo journalctl --vacuum-time=1s -// sudo journalctl --rotate -// 8. Run this test: `go test -run=TestGenerateJournalEntries` -// 9. Copy the journal file somewhere else -// cp /var/log/journal/21282bcb80a74c08a0d14a047372256c/user-1000.journal /tmp/foo.journal -// 10. Read the journal file: -// journalctl --file=/tmp/foo.journal -n 10 -// 11. Read the journal with all fields as JSON -// journalctl --file=/tmp/foo.journal -n 10 -o json -// func TestGenerateJournalEntries(t *testing.T) { -// fields := []map[string]any{ -// { -// "BAR": "bar", -// }, -// { -// "FOO": "foo", -// }, -// { -// "BAR": "bar", -// "FOO": "foo", -// }, -// { -// "FOO_BAR": "foo", -// }, -// { -// "FOO_BAR": "bar", -// }, -// { -// "FOO_BAR": "foo bar", -// }, -// } -// for i, m := range fields { -// if err := journaldlogger.Send(fmt.Sprintf("message %d", i), journaldlogger.PriorityInfo, m); err != nil { -// t.Fatal(err) -// } -// } -// } + ctx, cancelInput := context.WithCancel(context.Background()) + t.Cleanup(cancelInput) + + env.startInput(ctx, inp) + env.waitUntilEventCount(6) +} func TestInputFieldsTranslation(t *testing.T) { // A few random keys to verify diff --git a/filebeat/input/journald/pkg/journalctl/jctlmock_test.go b/filebeat/input/journald/pkg/journalctl/jctlmock_test.go index c9244a5fa43..9fed391de5e 100644 --- a/filebeat/input/journald/pkg/journalctl/jctlmock_test.go +++ b/filebeat/input/journald/pkg/journalctl/jctlmock_test.go @@ -18,6 +18,8 @@ // Code generated by moq; DO NOT EDIT. // github.com/matryer/moq +//go:build linux + package journalctl import ( @@ -39,7 +41,7 @@ var _ Jctl = &JctlMock{} // KillFunc: func() error { // panic("mock out the Kill method") // }, -// NextFunc: func(canceler input.Canceler) ([]byte, error) { +// NextFunc: func(canceler input.Canceler) ([]byte, bool, error) { // panic("mock out the Next method") // }, // } @@ -53,7 +55,7 @@ type JctlMock struct { KillFunc func() error // NextFunc mocks the Next method. - NextFunc func(canceler input.Canceler) ([]byte, error) + NextFunc func(canceler input.Canceler) ([]byte, bool, error) // calls tracks calls to the methods. calls struct { @@ -98,7 +100,7 @@ func (mock *JctlMock) KillCalls() []struct { } // Next calls NextFunc. -func (mock *JctlMock) Next(canceler input.Canceler) ([]byte, error) { +func (mock *JctlMock) Next(canceler input.Canceler) ([]byte, bool, error) { if mock.NextFunc == nil { panic("JctlMock.NextFunc: method is nil but Jctl.Next was just called") } diff --git a/filebeat/input/journald/pkg/journalctl/journalctl.go b/filebeat/input/journald/pkg/journalctl/journalctl.go index 54bcb208b82..b015b896e3d 100644 --- a/filebeat/input/journald/pkg/journalctl/journalctl.go +++ b/filebeat/input/journald/pkg/journalctl/journalctl.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +//go:build linux + package journalctl import ( @@ -22,8 +24,10 @@ import ( "errors" "fmt" "io" + "io/fs" "os/exec" "strings" + "sync" input "github.com/elastic/beats/v7/filebeat/input/v2" "github.com/elastic/elastic-agent-libs/logp" @@ -37,6 +41,7 @@ type journalctl struct { logger *logp.Logger canceler input.Canceler + waitDone sync.WaitGroup } // Factory returns an instance of journalctl ready to use. @@ -95,7 +100,31 @@ func Factory(canceller input.Canceler, logger *logp.Logger, binary string, args data, err := reader.ReadBytes('\n') if err != nil { if !errors.Is(err, io.EOF) { - logger.Errorf("cannot read from journalctl stdout: %s", err) + var logError = false + var pathError *fs.PathError + if errors.As(err, &pathError) { + // Because we're reading from the stdout from a process that will + // eventually exit, it can happen that when reading we get the + // fs.PathError below instead of an io.EOF. This is expected, + // it only means the process has exited, its stdout has been + // closed and there is nothing else for us to read. + // This is expected and does not cause any data loss. + // So we log at level debug to have it in our logs if ever needed + // while avoiding adding error level logs on user's deployments + // for situations that are well handled. + if pathError.Op == "read" && + pathError.Path == "|0" && + pathError.Err.Error() == "file already closed" { + logger.Debugf("cannot read from journalctl stdout: '%s'", err) + } else { + logError = true + } + } else { + logError = true + } + if logError { + logger.Errorf("cannot read from journalctl stdout: '%s'", err) + } } return } @@ -118,10 +147,13 @@ func Factory(canceller input.Canceler, logger *logp.Logger, binary string, args // Whenever the journalctl process exits, the `Wait` call returns, // if there was an error it is logged and this goroutine exits. + jctl.waitDone.Add(1) go func() { + defer jctl.waitDone.Done() if err := cmd.Wait(); err != nil { jctl.logger.Errorf("journalctl exited with an error, exit code %d ", cmd.ProcessState.ExitCode()) } + jctl.logger.Debugf("journalctl exit code: %d", cmd.ProcessState.ExitCode()) }() return &jctl, nil @@ -130,18 +162,31 @@ func Factory(canceller input.Canceler, logger *logp.Logger, binary string, args // Kill Terminates the journalctl process using a SIGKILL. func (j *journalctl) Kill() error { j.logger.Debug("sending SIGKILL to journalctl") - err := j.cmd.Process.Kill() - return err + return j.cmd.Process.Kill() } -func (j *journalctl) Next(cancel input.Canceler) ([]byte, error) { +// Next returns the next journal entry (as JSON). If `finished` is true, then +// journalctl finished returning all data and exited successfully, if journalctl +// exited unexpectedly, then `err` is non-nil, `finished` is false and an empty +// byte array is returned. +func (j *journalctl) Next(cancel input.Canceler) ([]byte, bool, error) { select { case <-cancel.Done(): - return []byte{}, ErrCancelled + return []byte{}, false, ErrCancelled case d, open := <-j.dataChan: if !open { - return []byte{}, errors.New("no more data to read, journalctl might have exited unexpectedly") + // Wait for the process to exit, so we can read the exit code. + j.waitDone.Wait() + if j.cmd.ProcessState.ExitCode() == 0 { + return []byte{}, true, nil + } + return []byte{}, + false, + fmt.Errorf( + "no more data to read, journalctl exited unexpectedly, exit code: %d", + j.cmd.ProcessState.ExitCode()) } - return d, nil + + return d, false, nil } } diff --git a/filebeat/input/journald/pkg/journalctl/mode.go b/filebeat/input/journald/pkg/journalctl/mode.go index 5f0c60386b2..ac61bb55458 100644 --- a/filebeat/input/journald/pkg/journalctl/mode.go +++ b/filebeat/input/journald/pkg/journalctl/mode.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +//go:build linux + package journalctl import "fmt" diff --git a/filebeat/input/journald/pkg/journalctl/mode_test.go b/filebeat/input/journald/pkg/journalctl/mode_test.go index 9e63a3169d0..545ff08207f 100644 --- a/filebeat/input/journald/pkg/journalctl/mode_test.go +++ b/filebeat/input/journald/pkg/journalctl/mode_test.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +//go:build linux + package journalctl import ( diff --git a/filebeat/input/journald/pkg/journalctl/reader.go b/filebeat/input/journald/pkg/journalctl/reader.go index b530e942b23..c654a17dfdf 100644 --- a/filebeat/input/journald/pkg/journalctl/reader.go +++ b/filebeat/input/journald/pkg/journalctl/reader.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +//go:build linux + package journalctl import ( @@ -58,10 +60,25 @@ type JctlFactory func(canceller input.Canceler, logger *logp.Logger, binary stri // //go:generate moq --fmt gofmt -out jctlmock_test.go . Jctl type Jctl interface { - Next(input.Canceler) ([]byte, error) + // Next returns the next journal entry. If there is no entry available + // next will block until there is an entry or cancel is cancelled. + // + // If cancel is cancelled, Next returns a zero value JournalEntry + // and ErrCancelled. + // + // If finished is true, then journalctl returned all messages + // and exited successfully + Next(input.Canceler) (data []byte, finished bool, err error) Kill() error } +type readerState uint8 + +const ( + readingOldEntriesState readerState = iota + followingState +) + // Reader reads entries from journald by calling `jouranlctl` // and reading its output. // @@ -74,36 +91,54 @@ type Jctl interface { // More details can be found in the PR introducing this feature and related // issues. PR: https://github.com/elastic/beats/pull/40061. type Reader struct { - args []string + // logger is the logger for the reader + logger *logp.Logger + + // jctlLogger is the logger for the code controlling + // the journalctl process + jctlLogger *logp.Logger + + // args are arguments for journalctl that never change, + // like the message filters, format, etc + args []string + + // firstRunArgs are the arguments used in the first call to + // journalctl that will be replaced by the cursor argument + // once data has been ingested + firstRunArgs []string + + // cursor is the jornalctl cursor, it is also stored in Filebeat's registry cursor string - logger *logp.Logger canceler input.Canceler jctl Jctl jctlFactory JctlFactory backoff backoff.Backoff + state readerState } // handleSeekAndCursor returns the correct arguments for seek and cursor. // If there is a cursor, only the cursor is used, seek is ignored. // If there is no cursor, then seek is used -func handleSeekAndCursor(mode SeekMode, since time.Duration, cursor string) []string { +// The bool parameter indicates whether there might be messages from +// the previous boots +func handleSeekAndCursor(mode SeekMode, since time.Duration, cursor string) ([]string, bool) { if cursor != "" { - return []string{"--after-cursor", cursor} + return []string{"--after-cursor", cursor}, true } switch mode { case SeekSince: - return []string{"--since", time.Now().Add(since).Format(sinceTimeFormat)} + return []string{"--since", time.Now().Add(since).Format(sinceTimeFormat)}, true case SeekTail: - return []string{"--since", "now"} + return []string{"--since", "now"}, false case SeekHead: - return []string{"--no-tail"} + return []string{"--no-tail"}, true default: // That should never happen - return []string{} + return []string{}, false } } @@ -146,7 +181,9 @@ func New( ) (*Reader, error) { logger = logger.Named("reader") - args := []string{"--utc", "--output=json", "--follow"} + + args := []string{"--utc", "--output=json", "--no-pager"} + if file != "" && file != localSystemJournalID { args = append(args, "--file", file) } @@ -171,26 +208,43 @@ func New( args = append(args, "--facility", fmt.Sprintf("%d", facility)) } - otherArgs := handleSeekAndCursor(mode, since, cursor) - - jctl, err := newJctl(canceler, logger.Named("journalctl-runner"), "journalctl", append(args, otherArgs...)...) - if err != nil { - return &Reader{}, err + firstRunArgs, prevBoots := handleSeekAndCursor(mode, since, cursor) + state := readingOldEntriesState // Initial state + if !prevBoots { + state = followingState } r := Reader{ - args: args, - cursor: cursor, - jctl: jctl, - logger: logger, + logger: logger, + jctlLogger: logger.Named("journalctl-runner"), + + args: args, + firstRunArgs: firstRunArgs, + + state: state, + cursor: cursor, + canceler: canceler, jctlFactory: newJctl, backoff: backoff.NewExpBackoff(canceler.Done(), 100*time.Millisecond, 2*time.Second), } + if err := r.newJctl(firstRunArgs...); err != nil { + return &Reader{}, err + } + return &r, nil } +func (r *Reader) newJctl(extraArgs ...string) error { + args := append(r.args, extraArgs...) + + jctl, err := r.jctlFactory(r.canceler, r.jctlLogger, "journalctl", args...) + r.jctl = jctl + + return err +} + // Close stops the `journalctl` process and waits for all // goroutines to return, the canceller passed to `New` should // be cancelled before `Close` is called @@ -210,78 +264,144 @@ func (r *Reader) Close() error { // If cancel is cancelled, Next returns a zero value JournalEntry // and ErrCancelled. func (r *Reader) Next(cancel input.Canceler) (JournalEntry, error) { - d, err := r.jctl.Next(cancel) + msg, finished, err := r.jctl.Next(cancel) // Check if the input has been cancelled select { case <-cancel.Done(): - // Input has been cancelled, ignore the message? - return JournalEntry{}, err + // The caller is responsible for calling Reader.Close to terminate + // journalctl. Cancelling this canceller only means this Next call was + // cancelled. Because the input has been cancelled, we ignore the message + // and any error it might have returned. + return JournalEntry{}, ErrCancelled default: - // Two options: - // - No error, go parse the message - // - Error, journalctl is not running any more, restart it + // Three options: + // - Journalctl finished reading messages from previous boots + // successfully, restart it with --follow flag. + // - Error, journalctl exited with an error, restart it in the same + // mode it was running. + // - No error, skip the default block and go parse the message + + var extraArgs []string + var restart bool + + // First of all: handle the error, if any if err != nil { r.logger.Warnf("reader error: '%s', restarting...", err) - // Copy r.args and if needed, add the cursor flag - args := append([]string{}, r.args...) - if r.cursor != "" { - args = append(args, "--after-cursor", r.cursor) + restart = true + + if r.cursor == "" && r.state == readingOldEntriesState { + // Corner case: journalctl exited with an error before reading the + // 1st message. This means we don't have a cursor and need to restart + // it with the initial arguments. + extraArgs = append(extraArgs, r.firstRunArgs...) + } else if r.cursor != "" { + // There is a cursor, so just append it to our arguments + extraArgs = append(extraArgs, "--after-cursor", r.cursor) + + // Last, but not least, add "--follow" if we're in following mode + if r.state == followingState { + extraArgs = append(extraArgs, "--follow") + } } + // Handle backoff + // // If the last restart (if any) was more than 5s ago, // recreate the backoff and do not wait. // We recreate the backoff so r.backoff.Last().IsZero() // will return true next time it's called making us to // wait in case jouranlctl crashes in less than 5s. - if !r.backoff.Last().IsZero() && time.Now().Sub(r.backoff.Last()) > 5*time.Second { + if !r.backoff.Last().IsZero() && time.Since(r.backoff.Last()) > 5*time.Second { r.backoff = backoff.NewExpBackoff(cancel.Done(), 100*time.Millisecond, 2*time.Second) } else { r.backoff.Wait() } + } - jctl, err := r.jctlFactory(r.canceler, r.logger.Named("journalctl-runner"), "journalctl", args...) - if err != nil { + // If journalctl finished reading the messages from previous boots + // and exited successfully + if finished { + restart = true + extraArgs = append(extraArgs, "--follow") + if r.cursor != "" { + // If there is a cursor, only use the cursor and the follow argument + extraArgs = append(extraArgs, "--after-cursor", r.cursor) + } else { + // If there is no cursor, it means the first successfully run + // did not return any event, so we have to restart with the + // --follow and all the initial args. + + extraArgs = append(extraArgs, r.firstRunArgs...) + } + + r.state = followingState + r.logger.Info("finished reading journal entries from all boots, restarting journalctl with follow flag") + } + + // Restart journalctl if needed + if restart { + if err := r.newJctl(extraArgs...); err != nil { // If we cannot restart journalct, there is nothing we can do. return JournalEntry{}, fmt.Errorf("cannot restart journalctl: %w", err) } - r.jctl = jctl // Return an empty message and wait for the input to call us again return JournalEntry{}, ErrRestarting } } + return r.handleMessage(msg) +} + +func (r *Reader) handleMessage(msg []byte) (JournalEntry, error) { fields := map[string]any{} - if err := json.Unmarshal(d, &fields); err != nil { - r.logger.Error("journal event cannot be parsed as map[string]any, look at the events log file for the raw journal event") + if err := json.Unmarshal(msg, &fields); err != nil { + r.logger.Error("journal event cannot be parsed as map[string]any, " + + "look at the events log file for the raw journal event") + // Log raw data to events log file - msg := fmt.Sprintf("data cannot be parsed as map[string]any JSON: '%s'", string(d)) - r.logger.Errorw(msg, logp.TypeKey, logp.EventType, "error.message", err.Error()) + msg := fmt.Sprintf("data cannot be parsed as map[string]any. Data: '%s'", + string(msg)) + r.logger.Errorw( + msg, + "error.message", err.Error(), + logp.TypeKey, logp.EventType) + return JournalEntry{}, fmt.Errorf("cannot decode Journald JSON: %w", err) } ts, isString := fields["__REALTIME_TIMESTAMP"].(string) if !isString { - return JournalEntry{}, fmt.Errorf("'__REALTIME_TIMESTAMP': '%[1]v', type %[1]T is not a string", fields["__REALTIME_TIMESTAMP"]) + return JournalEntry{}, + fmt.Errorf("'__REALTIME_TIMESTAMP': '%[1]v', type %[1]T is not a string", + fields["__REALTIME_TIMESTAMP"]) } unixTS, err := strconv.ParseUint(ts, 10, 64) if err != nil { - return JournalEntry{}, fmt.Errorf("could not convert '__REALTIME_TIMESTAMP' to uint64: %w", err) + return JournalEntry{}, + fmt.Errorf("could not convert '__REALTIME_TIMESTAMP' to uint64: %w", + err) } monotomicTs, isString := fields["__MONOTONIC_TIMESTAMP"].(string) if !isString { - return JournalEntry{}, fmt.Errorf("'__MONOTONIC_TIMESTAMP': '%[1]v', type %[1]T is not a string", fields["__MONOTONIC_TIMESTAMP"]) + return JournalEntry{}, + fmt.Errorf("'__MONOTONIC_TIMESTAMP': '%[1]v', type %[1]T is not a string", + fields["__MONOTONIC_TIMESTAMP"]) } monotonicTSInt, err := strconv.ParseUint(monotomicTs, 10, 64) if err != nil { - return JournalEntry{}, fmt.Errorf("could not convert '__MONOTONIC_TIMESTAMP' to uint64: %w", err) + return JournalEntry{}, + fmt.Errorf("could not convert '__MONOTONIC_TIMESTAMP' to uint64: %w", + err) } cursor, isString := fields["__CURSOR"].(string) if !isString { - return JournalEntry{}, fmt.Errorf("'_CURSOR': '%[1]v', type %[1]T is not a string", fields["_CURSOR"]) + return JournalEntry{}, + fmt.Errorf("'_CURSOR': '%[1]v', type %[1]T is not a string", + fields["_CURSOR"]) } // Update our cursor so we can restart journalctl if needed diff --git a/filebeat/input/journald/pkg/journalctl/reader_test.go b/filebeat/input/journald/pkg/journalctl/reader_test.go index af3837fd09c..4690d9cdf83 100644 --- a/filebeat/input/journald/pkg/journalctl/reader_test.go +++ b/filebeat/input/journald/pkg/journalctl/reader_test.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +//go:build linux + package journalctl import ( @@ -48,8 +50,8 @@ func TestEventWithNonStringData(t *testing.T) { for idx, rawEvent := range testCases { t.Run(fmt.Sprintf("test %d", idx), func(t *testing.T) { mock := JctlMock{ - NextFunc: func(canceler input.Canceler) ([]byte, error) { - return rawEvent, nil + NextFunc: func(canceler input.Canceler) ([]byte, bool, error) { + return rawEvent, false, nil }, } r := Reader{ @@ -72,8 +74,8 @@ func TestRestartsJournalctlOnError(t *testing.T) { ctx := context.Background() mock := JctlMock{ - NextFunc: func(canceler input.Canceler) ([]byte, error) { - return jdEvent, errors.New("journalctl exited with code 42") + NextFunc: func(canceler input.Canceler) ([]byte, bool, error) { + return jdEvent, false, errors.New("journalctl exited with code 42") }, } @@ -90,8 +92,8 @@ func TestRestartsJournalctlOnError(t *testing.T) { // If calls have been made, change the Next function to always succeed // and return it - mock.NextFunc = func(canceler input.Canceler) ([]byte, error) { - return jdEvent, nil + mock.NextFunc = func(canceler input.Canceler) ([]byte, bool, error) { + return jdEvent, false, nil } return &mock, nil diff --git a/filebeat/input/journald/pkg/journalfield/conv.go b/filebeat/input/journald/pkg/journalfield/conv.go index a7a6994c0bb..4c7575114d2 100644 --- a/filebeat/input/journald/pkg/journalfield/conv.go +++ b/filebeat/input/journald/pkg/journalfield/conv.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +//go:build linux + package journalfield import ( diff --git a/filebeat/input/journald/pkg/journalfield/default_other.go b/filebeat/input/journald/pkg/journalfield/default_other.go deleted file mode 100644 index 1d645e162a0..00000000000 --- a/filebeat/input/journald/pkg/journalfield/default_other.go +++ /dev/null @@ -1,46 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -//go:build !linux - -package journalfield - -// journaldEventFields provides default field mappings and conversions rules. -var journaldEventFields = FieldConversion{ - // provided by systemd journal - "COREDUMP_UNIT": text("journald.coredump.unit"), - "COREDUMP_USER_UNIT": text("journald.coredump.user_unit"), - "OBJECT_AUDIT_LOGINUID": integer("journald.object.audit.login_uid"), - "OBJECT_AUDIT_SESSION": integer("journald.object.audit.session"), - "OBJECT_CMDLINE": text("journald.object.process.command_line"), - "OBJECT_COMM": text("journald.object.process.name"), - "OBJECT_EXE": text("journald.object.process.executable"), - "OBJECT_GID": integer("journald.object.gid"), - "OBJECT_PID": integer("journald.object.pid"), - "OBJECT_SYSTEMD_OWNER_UID": integer("journald.object.systemd.owner_uid"), - "OBJECT_SYSTEMD_SESSION": text("journald.object.systemd.session"), - "OBJECT_SYSTEMD_UNIT": text("journald.object.systemd.unit"), - "OBJECT_SYSTEMD_USER_UNIT": text("journald.object.systemd.user_unit"), - "OBJECT_UID": integer("journald.object.uid"), - "_KERNEL_DEVICE": text("journald.kernel.device"), - "_KERNEL_SUBSYSTEM": text("journald.kernel.subsystem"), - "_SYSTEMD_INVOCATION_ID": text("systemd.invocation_id"), - "_SYSTEMD_USER_SLICE": text("systemd.user_slice"), - "_UDEV_DEVLINK": text("journald.kernel.device_symlinks"), - "_UDEV_DEVNODE": text("journald.kernel.device_node_path"), - "_UDEV_SYSNAME": text("journald.kernel.device_name"), -} diff --git a/filebeat/input/journald/pkg/journalfield/matcher.go b/filebeat/input/journald/pkg/journalfield/matcher.go index 07d4e6ba753..8f44579f263 100644 --- a/filebeat/input/journald/pkg/journalfield/matcher.go +++ b/filebeat/input/journald/pkg/journalfield/matcher.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +//go:build linux + package journalfield import ( diff --git a/filebeat/input/journald/testdata/multiple-boots.export b/filebeat/input/journald/testdata/multiple-boots.export new file mode 100644 index 00000000000..91e5488470b --- /dev/null +++ b/filebeat/input/journald/testdata/multiple-boots.export @@ -0,0 +1,86 @@ +__CURSOR=s=8c7196499b954413a742eb1e2107fa5d;i=1;b=0ffe5f74a4bd49ca8597eb05fe1a512a;m=39f445;t=6225212a5b6da;x=3f056d2626450d83 +__REALTIME_TIMESTAMP=1726585755776730 +__MONOTONIC_TIMESTAMP=3798085 +_BOOT_ID=0ffe5f74a4bd49ca8597eb05fe1a512a +_SOURCE_MONOTONIC_TIMESTAMP=0 +_TRANSPORT=kernel +PRIORITY=5 +SYSLOG_FACILITY=0 +SYSLOG_IDENTIFIER=kernel +MESSAGE=Linux version 6.1.0-25-amd64 (debian-kernel@lists.debian.org) (gcc-12 (Debian 12.2.0-14) 12.2.0, GNU ld (GNU Binutils for Debian) 2.40) #1 SMP PREEMPT_DYNAMIC Debian 6.1.106-3 (2024-08-26) +_MACHINE_ID=ad88a1859979427ea1a7c24f0ae0320a +_HOSTNAME=Debian12 +_RUNTIME_SCOPE=system + +__CURSOR=s=8c7196499b954413a742eb1e2107fa5d;i=2;b=0ffe5f74a4bd49ca8597eb05fe1a512a;m=39f452;t=6225212a5b6e7;x=67b36f81fa43ba68 +__REALTIME_TIMESTAMP=1726585755776743 +__MONOTONIC_TIMESTAMP=3798098 +_BOOT_ID=0ffe5f74a4bd49ca8597eb05fe1a512a +_SOURCE_MONOTONIC_TIMESTAMP=0 +_TRANSPORT=kernel +SYSLOG_FACILITY=0 +SYSLOG_IDENTIFIER=kernel +_MACHINE_ID=ad88a1859979427ea1a7c24f0ae0320a +_HOSTNAME=Debian12 +_RUNTIME_SCOPE=system +PRIORITY=6 +MESSAGE=Command line: BOOT_IMAGE=/boot/vmlinuz-6.1.0-25-amd64 root=UUID=3841998b-4e88-4231-93c8-3fc24b549223 ro quiet + +Sep 17 11:26:36 Debian12 kernel: Linux version 6.1.0-25-amd64 (debian-kernel@lists.debian.org) (gcc-12 (Debian 12.2.0-14) 12.2.0, GNU ld (GNU Binutils for Debian) 2.40) #1 SMP PREEMPT_DYNAMIC Debian 6.1.106-3 (2024-08-26) +Sep 17 11:26:36 Debian12 kernel: Command line: BOOT_IMAGE=/boot/vmlinuz-6.1.0-25-amd64 root=UUID=3841998b-4e88-4231-93c8-3fc24b549223 ro quiet +__CURSOR=s=8c7196499b954413a742eb1e2107fa5d;i=22e3;b=457105b2d84547a4b4549f0eaa700b61;m=35bc29;t=6227ecec5b11f;x=a46eaad8c3930985 +__REALTIME_TIMESTAMP=1726777890550047 +__MONOTONIC_TIMESTAMP=3521577 +_BOOT_ID=457105b2d84547a4b4549f0eaa700b61 +_SOURCE_MONOTONIC_TIMESTAMP=0 +_TRANSPORT=kernel +PRIORITY=5 +SYSLOG_FACILITY=0 +SYSLOG_IDENTIFIER=kernel +MESSAGE=Linux version 6.1.0-25-amd64 (debian-kernel@lists.debian.org) (gcc-12 (Debian 12.2.0-14) 12.2.0, GNU ld (GNU Binutils for Debian) 2.40) #1 SMP PREEMPT_DYNAMIC Debian 6.1.106-3 (2024-08-26) +_MACHINE_ID=ad88a1859979427ea1a7c24f0ae0320a +_HOSTNAME=Debian12 +_RUNTIME_SCOPE=system + +__CURSOR=s=8c7196499b954413a742eb1e2107fa5d;i=22e4;b=457105b2d84547a4b4549f0eaa700b61;m=35bc37;t=6227ecec5b12d;x=fcd8a87f1f95be6e +__REALTIME_TIMESTAMP=1726777890550061 +__MONOTONIC_TIMESTAMP=3521591 +_BOOT_ID=457105b2d84547a4b4549f0eaa700b61 +_SOURCE_MONOTONIC_TIMESTAMP=0 +_TRANSPORT=kernel +SYSLOG_FACILITY=0 +SYSLOG_IDENTIFIER=kernel +_MACHINE_ID=ad88a1859979427ea1a7c24f0ae0320a +_HOSTNAME=Debian12 +_RUNTIME_SCOPE=system +PRIORITY=6 +MESSAGE=Command line: BOOT_IMAGE=/boot/vmlinuz-6.1.0-25-amd64 root=UUID=3841998b-4e88-4231-93c8-3fc24b549223 ro quiet + +__CURSOR=s=8c7196499b954413a742eb1e2107fa5d;i=451d;b=e2fca45429e54522bb2927112eb8e0b5;m=2aad67;t=6228fba6fbe98;x=ab82fca7956545cf +__REALTIME_TIMESTAMP=1726850563817112 +__MONOTONIC_TIMESTAMP=2796903 +_BOOT_ID=e2fca45429e54522bb2927112eb8e0b5 +_SOURCE_MONOTONIC_TIMESTAMP=0 +_TRANSPORT=kernel +PRIORITY=5 +SYSLOG_FACILITY=0 +SYSLOG_IDENTIFIER=kernel +MESSAGE=Linux version 6.1.0-25-amd64 (debian-kernel@lists.debian.org) (gcc-12 (Debian 12.2.0-14) 12.2.0, GNU ld (GNU Binutils for Debian) 2.40) #1 SMP PREEMPT_DYNAMIC Debian 6.1.106-3 (2024-08-26) +_MACHINE_ID=ad88a1859979427ea1a7c24f0ae0320a +_HOSTNAME=Debian12 +_RUNTIME_SCOPE=system + +__CURSOR=s=8c7196499b954413a742eb1e2107fa5d;i=451e;b=e2fca45429e54522bb2927112eb8e0b5;m=2aad75;t=6228fba6fbea7;x=f334fe004963f224 +__REALTIME_TIMESTAMP=1726850563817127 +__MONOTONIC_TIMESTAMP=2796917 +_BOOT_ID=e2fca45429e54522bb2927112eb8e0b5 +_SOURCE_MONOTONIC_TIMESTAMP=0 +_TRANSPORT=kernel +SYSLOG_FACILITY=0 +SYSLOG_IDENTIFIER=kernel +_MACHINE_ID=ad88a1859979427ea1a7c24f0ae0320a +_HOSTNAME=Debian12 +_RUNTIME_SCOPE=system +PRIORITY=6 +MESSAGE=Command line: BOOT_IMAGE=/boot/vmlinuz-6.1.0-25-amd64 root=UUID=3841998b-4e88-4231-93c8-3fc24b549223 ro quiet + diff --git a/filebeat/module/system/syslog/test/debian-12.journal b/filebeat/input/journald/testdata/multiple-boots.journal similarity index 99% rename from filebeat/module/system/syslog/test/debian-12.journal rename to filebeat/input/journald/testdata/multiple-boots.journal index f4c01a22c3f..668b82162d6 100644 Binary files a/filebeat/module/system/syslog/test/debian-12.journal and b/filebeat/input/journald/testdata/multiple-boots.journal differ diff --git a/filebeat/input/journald/testdata/ndjson-parser.export b/filebeat/input/journald/testdata/ndjson-parser.export new file mode 100644 index 00000000000..0a24b593f77 Binary files /dev/null and b/filebeat/input/journald/testdata/ndjson-parser.export differ diff --git a/filebeat/module/system/auth/test/debian-12.journal b/filebeat/input/journald/testdata/ndjson-parser.journal similarity index 99% rename from filebeat/module/system/auth/test/debian-12.journal rename to filebeat/input/journald/testdata/ndjson-parser.journal index 3195198e604..aa4aa7960f3 100644 Binary files a/filebeat/module/system/auth/test/debian-12.journal and b/filebeat/input/journald/testdata/ndjson-parser.journal differ diff --git a/filebeat/input/systemlogs/input.go b/filebeat/input/systemlogs/input.go index 7badfda760c..eadc5a8565a 100644 --- a/filebeat/input/systemlogs/input.go +++ b/filebeat/input/systemlogs/input.go @@ -20,6 +20,7 @@ package systemlogs import ( "errors" "fmt" + "os" "path/filepath" "github.com/elastic/beats/v7/filebeat/channel" @@ -92,7 +93,7 @@ func PluginV2(logger *logp.Logger, store cursor.StateStore) v2.Plugin { return v2.Plugin{ Name: pluginName, - Stability: feature.Stable, + Stability: feature.Experimental, Deprecated: false, Info: "system-logs input", Doc: "The system-logs input collects system logs on Linux by reading them from journald or traditional log files", @@ -145,10 +146,20 @@ func useJournald(c *conf.C) (bool, error) { if err != nil { return false, fmt.Errorf("cannot resolve glob: %w", err) } - if len(paths) != 0 { - // We found at least one system log file, - // journald will not be used, return early - logger.Info( + + for _, p := range paths { + stat, err := os.Stat(p) + if err != nil { + return false, fmt.Errorf("cannot stat '%s': %w", p, err) + } + + // Ignore directories + if stat.IsDir() { + continue + } + + // We found one file, return early + logger.Infof( "using log input because file(s) was(were) found when testing glob '%s'", g) return false, nil @@ -156,48 +167,9 @@ func useJournald(c *conf.C) (bool, error) { } // if no system log files are found, then use jounrald - return true, nil -} - -func toJournaldConfig(cfg *conf.C) (*conf.C, error) { //nolint:unused // It's used on Linux - newCfg, err := cfg.Child("journald", -1) - if err != nil { - return nil, fmt.Errorf("cannot extract 'journald' block: %w", err) - } - - if _, err := cfg.Remove("journald", -1); err != nil { - return nil, err - } - - if _, err := cfg.Remove("type", -1); err != nil { - return nil, err - } - - if _, err := cfg.Remove("files", -1); err != nil { - return nil, err - } + logger.Info("no files were found, using journald input") - if _, err := cfg.Remove("use_journald", -1); err != nil { - return nil, err - } - - if _, err := cfg.Remove("use_files", -1); err != nil { - return nil, err - } - - if err := newCfg.Merge(cfg); err != nil { - return nil, err - } - - if err := newCfg.SetString("type", -1, "journald"); err != nil { - return nil, fmt.Errorf("cannot set 'type': %w", err) - } - - if err := cfg.SetString("type", -1, pluginName); err != nil { - return nil, fmt.Errorf("cannot set type back to '%s': %w", pluginName, err) - } - - return newCfg, nil + return true, nil } func toFilesConfig(cfg *conf.C) (*conf.C, error) { diff --git a/filebeat/input/systemlogs/input_linux.go b/filebeat/input/systemlogs/input_linux.go index 5a98c270b97..98a59361c0b 100644 --- a/filebeat/input/systemlogs/input_linux.go +++ b/filebeat/input/systemlogs/input_linux.go @@ -47,3 +47,44 @@ func configure(cfg *conf.C) ([]cursor.Source, cursor.Input, error) { return journald.Configure(journaldCfg) } + +func toJournaldConfig(cfg *conf.C) (*conf.C, error) { + newCfg, err := cfg.Child("journald", -1) + if err != nil { + return nil, fmt.Errorf("cannot extract 'journald' block: %w", err) + } + + if _, err := cfg.Remove("journald", -1); err != nil { + return nil, err + } + + if _, err := cfg.Remove("type", -1); err != nil { + return nil, err + } + + if _, err := cfg.Remove("files", -1); err != nil { + return nil, err + } + + if _, err := cfg.Remove("use_journald", -1); err != nil { + return nil, err + } + + if _, err := cfg.Remove("use_files", -1); err != nil { + return nil, err + } + + if err := newCfg.Merge(cfg); err != nil { + return nil, err + } + + if err := newCfg.SetString("type", -1, "journald"); err != nil { + return nil, fmt.Errorf("cannot set 'type': %w", err) + } + + if err := cfg.SetString("type", -1, pluginName); err != nil { + return nil, fmt.Errorf("cannot set type back to '%s': %w", pluginName, err) + } + + return newCfg, nil +} diff --git a/filebeat/input/systemlogs/input_linux_test.go b/filebeat/input/systemlogs/input_linux_test.go new file mode 100644 index 00000000000..251ef6cae67 --- /dev/null +++ b/filebeat/input/systemlogs/input_linux_test.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux + +package systemlogs + +import ( + "testing" + + conf "github.com/elastic/elastic-agent-libs/config" +) + +func TestJournaldInputIsCreated(t *testing.T) { + c := map[string]any{ + "files.paths": []string{"/file/does/not/exist"}, + // The 'journald' object needs to exist for the input to be instantiated + "journald.enabled": true, + } + + cfg := conf.MustNewConfigFrom(c) + + _, inp, err := configure(cfg) + if err != nil { + t.Fatalf("did not expect an error calling newV1Input: %s", err) + } + + type namer interface { + Name() string + } + + i, isNamer := inp.(namer) + if !isNamer { + t.Fatalf("expecting an instance of *log.Input, got '%T' instead", inp) + } + + if got, expected := i.Name(), "journald"; got != expected { + t.Fatalf("expecting '%s' input, got '%s'", expected, got) + } +} diff --git a/filebeat/input/systemlogs/input_test.go b/filebeat/input/systemlogs/input_test.go new file mode 100644 index 00000000000..6e5526f1736 --- /dev/null +++ b/filebeat/input/systemlogs/input_test.go @@ -0,0 +1,145 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package systemlogs + +import ( + "os" + "testing" + + "github.com/elastic/beats/v7/filebeat/channel" + "github.com/elastic/beats/v7/filebeat/input" + "github.com/elastic/beats/v7/filebeat/input/log" + "github.com/elastic/beats/v7/libbeat/beat" + conf "github.com/elastic/elastic-agent-libs/config" +) + +func generateFile(t *testing.T) string { + // Create a know file for testing, the content is not relevant + // it just needs to exist + knwonFile, err := os.CreateTemp(t.TempDir(), t.Name()+"knwonFile*") + if err != nil { + t.Fatalf("cannot create temporary file: %s", err) + } + + if _, err := knwonFile.WriteString("Bowties are cool"); err != nil { + t.Fatalf("cannot write to temporary file '%s': %s", knwonFile.Name(), err) + } + knwonFile.Close() + + return knwonFile.Name() +} + +func TestUseJournald(t *testing.T) { + filename := generateFile(t) + + testCases := map[string]struct { + cfg map[string]any + useJournald bool + expectErr bool + }{ + "No files found": { + cfg: map[string]any{ + "files.paths": []string{"/file/does/not/exist"}, + }, + useJournald: true, + }, + "File exists": { + cfg: map[string]any{ + "files.paths": []string{filename}, + }, + useJournald: false, + }, + "use_journald is true": { + cfg: map[string]any{ + "use_journald": true, + "journald": struct{}{}, + }, + useJournald: true, + }, + "use_files is true": { + cfg: map[string]any{ + "use_files": true, + "journald": nil, + "files": struct{}{}, + }, + useJournald: false, + }, + "use_journald and use_files are true": { + cfg: map[string]any{ + "use_files": true, + "use_journald": true, + "journald": struct{}{}, + }, + useJournald: false, + expectErr: true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + cfg := conf.MustNewConfigFrom(tc.cfg) + + useJournald, err := useJournald(cfg) + if !tc.expectErr && err != nil { + t.Fatalf("did not expect an error calling 'useJournald': %s", err) + } + if tc.expectErr && err == nil { + t.Fatal("expecting an error when calling 'userJournald', got none") + } + + if useJournald != tc.useJournald { + t.Fatalf("expecting 'useJournald' to be %t, got %t", + tc.useJournald, useJournald) + } + }) + } +} + +func TestLogInputIsInstantiated(t *testing.T) { + filename := generateFile(t) + c := map[string]any{ + "files.paths": []string{filename}, + } + + cfg := conf.MustNewConfigFrom(c) + + inp, err := newV1Input(cfg, connectorMock{}, input.Context{}) + if err != nil { + t.Fatalf("did not expect an error calling newV1Input: %s", err) + } + _, isLogInput := inp.(*log.Input) + if !isLogInput { + t.Fatalf("expecting an instance of *log.Input, got '%T' instead", inp) + } +} + +type connectorMock struct{} + +func (mock connectorMock) Connect(c *conf.C) (channel.Outleter, error) { + return outleterMock{}, nil +} + +func (mock connectorMock) ConnectWith(c *conf.C, clientConfig beat.ClientConfig) (channel.Outleter, error) { + return outleterMock{}, nil +} + +type outleterMock struct{} + +func (o outleterMock) Close() error { return nil } +func (o outleterMock) Done() <-chan struct{} { return make(chan struct{}) } +func (o outleterMock) OnEvent(beat.Event) bool { return false } diff --git a/filebeat/main_test.go b/filebeat/main_test.go index 0f989a77a35..b73c88438e6 100644 --- a/filebeat/main_test.go +++ b/filebeat/main_test.go @@ -26,6 +26,7 @@ import ( fbcmd "github.com/elastic/beats/v7/filebeat/cmd" inputs "github.com/elastic/beats/v7/filebeat/input/default-inputs" + "github.com/elastic/beats/v7/libbeat/cfgfile" cmd "github.com/elastic/beats/v7/libbeat/cmd" "github.com/elastic/beats/v7/libbeat/tests/system/template" ) @@ -40,11 +41,14 @@ func init() { systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") fbCommand = fbcmd.Filebeat(inputs.Init, fbcmd.FilebeatSettings("")) fbCommand.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") fbCommand.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(t *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { if err := fbCommand.Execute(); err != nil { os.Exit(1) diff --git a/filebeat/module/system/README.md b/filebeat/module/system/README.md deleted file mode 100644 index 2471264cfcf..00000000000 --- a/filebeat/module/system/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Journald tests (Debian 12) -The tests for the journald input (currently only used for Debian 12 -testing) require journal files (test files ending in `.journal`), those -files are generated using `systemd-journal-remote` (see the [Journald -input README.md](../../input/journald/README.md) for more details). - -The source for those journal files are the `.export` files in the test -folder. Those files are the raw output of `journalctl -o export`. They -are added here because journal files format change with different -versions of journald, which can cause `journalclt` to fail reading -them, which leads to test failures. So if tests start failing because -`journalctl` cannot read the journal files as expected, new ones can -easily be generated with the same version of journalctl used on CI -and the original dataset. diff --git a/filebeat/module/system/_meta/config.reference.yml b/filebeat/module/system/_meta/config.reference.yml index 04160dfb1bf..3c7a0b43d49 100644 --- a/filebeat/module/system/_meta/config.reference.yml +++ b/filebeat/module/system/_meta/config.reference.yml @@ -7,18 +7,7 @@ # Filebeat will choose the paths depending on your OS. #var.paths: - # Force using journald to collect system logs - #var.use_journald: true|false - - # Force using log files to collect system logs - #var.use_files: true|false - - # If use_journald and use_files are false, then - # Filebeat will autodetect whether use to journald - # to collect system logs. - - # Input configuration (advanced). - # Any input configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. #input: @@ -30,23 +19,6 @@ # Filebeat will choose the paths depending on your OS. #var.paths: - # Force using journald to collect system logs - #var.use_journald: true|false - - # Force using log files to collect system logs - #var.use_files: true|false - - # If use_journald and use_files are false, then - # Filebeat will autodetect whether use to journald - # to collect system logs. - - # A list of tags to include in events. Including 'forwarded' - # indicates that the events did not originate on this host and - # causes host.name to not be added to events. Include - # 'preserve_orginal_event' causes the pipeline to retain the raw log - # in event.original. Defaults to []. - #var.tags: [] - # Input configuration (advanced). Any input configuration option # can be added under this section. #input: diff --git a/filebeat/module/system/_meta/config.yml b/filebeat/module/system/_meta/config.yml index f95f3e5969d..c1fe882374d 100644 --- a/filebeat/module/system/_meta/config.yml +++ b/filebeat/module/system/_meta/config.yml @@ -7,16 +7,6 @@ # Filebeat will choose the paths depending on your OS. #var.paths: - # Force using journald to collect system logs - #var.use_journald: true|false - - # Force using log files to collect system logs - #var.use_files: true|false - - # If use_journald and use_files are false, then - # Filebeat will autodetect whether use to journald - # to collect system logs. - # Authorization logs auth: enabled: false @@ -24,20 +14,3 @@ # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: - - # Force using journald to collect system logs - #var.use_journald: true|false - - # Force using log files to collect system logs - #var.use_files: true|false - - # If use_journald and use_files are false, then - # Filebeat will autodetect whether use to journald - # to collect system logs. - - # A list of tags to include in events. Including forwarded - # indicates that the events did not originate on this host and - # causes host.name to not be added to events. Include - # preserve_orginal_event causes the pipeline to retain the raw log - # in event.original. Defaults to []. - #var.tags: [] diff --git a/filebeat/module/system/_meta/docs.asciidoc b/filebeat/module/system/_meta/docs.asciidoc index 1aaca678963..6d9209eafe2 100644 --- a/filebeat/module/system/_meta/docs.asciidoc +++ b/filebeat/module/system/_meta/docs.asciidoc @@ -16,7 +16,7 @@ include::../include/gs-link.asciidoc[] === Compatibility This module was tested with logs from OSes like Ubuntu 12.04, Centos 7, and -macOS Sierra. For Debian 12 Journald is used to read the system logs. +macOS Sierra. This module is not available for Windows. @@ -58,15 +58,11 @@ include::../include/config-option-intro.asciidoc[] include::../include/var-paths.asciidoc[] -include::../include/use-journald.asciidoc[] - [float] ==== `auth` fileset settings include::../include/var-paths.asciidoc[] -include::../include/use-journald.asciidoc[] - *`var.tags`*:: A list of tags to include in events. Including `forwarded` indicates that the diff --git a/filebeat/module/system/auth/config/auth.yml b/filebeat/module/system/auth/config/auth.yml index 3affe320fb0..466b55078af 100644 --- a/filebeat/module/system/auth/config/auth.yml +++ b/filebeat/module/system/auth/config/auth.yml @@ -1,33 +1,17 @@ -type: system-logs -{{ if .use_journald }} -use_journald: true +type: log +paths: +{{ range $i, $path := .paths }} + - {{$path}} {{ end }} +exclude_files: [".gz$"] -{{ if .use_files }} -use_files: true -{{ end }} +multiline: + pattern: "^\\s" + match: after -tags: {{ .tags | tojson }} processors: - add_locale: ~ -publisher_pipeline.disable_host: {{ inList .tags "forwarded" }} - -journald: - id: system-auth - facilities: - - 4 - - 10 - -files: - id: system-auth - paths: - {{ range $i, $path := .paths }} - - {{$path}} - {{ end }} - exclude_files: [".gz$"] - - multiline: - pattern: "^\\s" - match: after +tags: {{ .tags | tojson }} +publisher_pipeline.disable_host: {{ inList .tags "forwarded" }} diff --git a/filebeat/module/system/auth/ingest/entrypoint.yml b/filebeat/module/system/auth/ingest/entrypoint.yml deleted file mode 100644 index 7da5fc4a5d4..00000000000 --- a/filebeat/module/system/auth/ingest/entrypoint.yml +++ /dev/null @@ -1,18 +0,0 @@ -description: Entrypoint Pipeline for system/auth Filebeat module -processors: - - set: - field: event.ingested - copy_from: _ingest.timestamp - - script: - source: | - if(ctx?.journald != null){ - ctx['auth_pipeline'] = '{< IngestPipeline "journald" >}'; - return; - } - ctx['auth_pipeline'] = '{< IngestPipeline "files" >}'; - return; - - pipeline: - name: "{{ auth_pipeline }}" - - remove: - ignore_failure: true - field: "auth_pipeline" diff --git a/filebeat/module/system/auth/ingest/files.yml b/filebeat/module/system/auth/ingest/files.yml deleted file mode 100644 index 557747b6400..00000000000 --- a/filebeat/module/system/auth/ingest/files.yml +++ /dev/null @@ -1,63 +0,0 @@ ---- -description: Pipeline for parsing system authorization and secure logs. -processors: - - rename: - if: ctx.event?.original == null - field: message - target_field: event.original - ignore_missing: true - - grok: - description: Grok the message header. - tag: grok-message-header - field: event.original - pattern_definitions: - GREEDYMULTILINE: '(.|\n)*' - TIMESTAMP: (?:%{TIMESTAMP_ISO8601}|%{SYSLOGTIMESTAMP}) - patterns: - - '^%{TIMESTAMP:system.auth.timestamp} %{SYSLOGHOST:host.hostname}? %{DATA:process.name}(?:\[%{POSINT:process.pid:long}\])?:%{SPACE}%{GREEDYMULTILINE:_temp.message}$' - - pipeline: - description: Grok specific auth messages. - name: '{< IngestPipeline "grok-auth-messages" >}' - on_failure: - - rename: - description: Leave the unmatched content in message. - field: _temp.message - target_field: message - - remove: - field: _temp - - pipeline: - name: "{< IngestPipeline "common" >}" - - date: - if: ctx.event?.timezone == null - field: system.auth.timestamp - target_field: '@timestamp' - formats: - - MMM d HH:mm:ss - - MMM dd HH:mm:ss - - ISO8601 - on_failure: - - append: - field: error.message - value: '{{{ _ingest.on_failure_message }}}' - - date: - if: ctx.event?.timezone != null - field: system.auth.timestamp - target_field: '@timestamp' - formats: - - MMM d HH:mm:ss - - MMM dd HH:mm:ss - - ISO8601 - timezone: '{{{ event.timezone }}}' - on_failure: - - append: - field: error.message - value: '{{{ _ingest.on_failure_message }}}' - - remove: - field: system.auth.timestamp - - set: - field: input.type - value: log -on_failure: - - set: - field: error.message - value: '{{{ _ingest.on_failure_message }}}' diff --git a/filebeat/module/system/auth/ingest/grok-auth-messages.yml b/filebeat/module/system/auth/ingest/grok-auth-messages.yml deleted file mode 100644 index fc09abbff5e..00000000000 --- a/filebeat/module/system/auth/ingest/grok-auth-messages.yml +++ /dev/null @@ -1,14 +0,0 @@ -description: Journald Pipeline for system/auth Filebeat module -processors: - - grok: - description: Grok specific auth messages. - tag: grok-specific-messages - field: _temp.message - ignore_missing: true - patterns: - - '^%{DATA:system.auth.ssh.event} %{DATA:system.auth.ssh.method} for (invalid user)?%{DATA:user.name} from %{IPORHOST:source.address} port %{NUMBER:source.port:long} ssh2(: %{GREEDYDATA:system.auth.ssh.signature})?' - - '^%{DATA:system.auth.ssh.event} user %{DATA:user.name} from %{IPORHOST:source.address}' - - '^Did not receive identification string from %{IPORHOST:system.auth.ssh.dropped_ip}' - - '^%{DATA:user.name} :( %{DATA:system.auth.sudo.error} ;)? TTY=%{DATA:system.auth.sudo.tty} ; PWD=%{DATA:system.auth.sudo.pwd} ; USER=%{DATA:system.auth.sudo.user} ; COMMAND=%{GREEDYDATA:system.auth.sudo.command}' - - '^new group: name=%{DATA:group.name}, GID=%{NUMBER:group.id}' - - '^new user: name=%{DATA:user.name}, UID=%{NUMBER:user.id}, GID=%{NUMBER:group.id}, home=%{DATA:system.auth.useradd.home}, shell=%{DATA:system.auth.useradd.shell}$' diff --git a/filebeat/module/system/auth/ingest/journald.yml b/filebeat/module/system/auth/ingest/journald.yml deleted file mode 100644 index bb43dd63cf5..00000000000 --- a/filebeat/module/system/auth/ingest/journald.yml +++ /dev/null @@ -1,33 +0,0 @@ -description: Journald Pipeline for system/auth Filebeat module -processors: - - rename: - field: "journald.process.name" - target_field: process.name - - rename: - field: message - target_field: _temp.message - - pipeline: - description: Grok specific auth messages. - name: '{< IngestPipeline "grok-auth-messages" >}' - ignore_failure: true - - rename: - field: _temp.message - target_field: message - - pipeline: - name: "{< IngestPipeline "common" >}" - - remove: - description: Remove the extra fields added by the Journald input - ignore_missing: true - field: - - journald - - process.thread - - syslog - - systemd - - message_id - - set: - field: input.type - value: journald -on_failure: - - set: - field: error.message - value: '{{{ _ingest.on_failure_message }}}' diff --git a/filebeat/module/system/auth/ingest/common.yml b/filebeat/module/system/auth/ingest/pipeline.yml similarity index 67% rename from filebeat/module/system/auth/ingest/common.yml rename to filebeat/module/system/auth/ingest/pipeline.yml index 75c2a8e46a9..c89ef94b28a 100644 --- a/filebeat/module/system/auth/ingest/common.yml +++ b/filebeat/module/system/auth/ingest/pipeline.yml @@ -1,5 +1,42 @@ -description: Common steps for Journald and log files from system/auth Filebeat module +--- +description: Pipeline for parsing system authorization and secure logs. processors: + - set: + field: event.ingested + copy_from: _ingest.timestamp + - rename: + if: ctx.event?.original == null + field: message + target_field: event.original + ignore_missing: true + - grok: + description: Grok the message header. + tag: grok-message-header + field: event.original + pattern_definitions: + GREEDYMULTILINE: '(.|\n)*' + TIMESTAMP: (?:%{TIMESTAMP_ISO8601}|%{SYSLOGTIMESTAMP}) + patterns: + - '^%{TIMESTAMP:system.auth.timestamp} %{SYSLOGHOST:host.hostname}? %{DATA:process.name}(?:\[%{POSINT:process.pid:long}\])?:%{SPACE}%{GREEDYMULTILINE:_temp.message}$' + - grok: + description: Grok specific auth messages. + tag: grok-specific-messages + field: _temp.message + ignore_missing: true + patterns: + - '^%{DATA:system.auth.ssh.event} %{DATA:system.auth.ssh.method} for (invalid user)?%{DATA:user.name} from %{IPORHOST:source.address} port %{NUMBER:source.port:long} ssh2(: %{GREEDYDATA:system.auth.ssh.signature})?' + - '^%{DATA:system.auth.ssh.event} user %{DATA:user.name} from %{IPORHOST:source.address}' + - '^Did not receive identification string from %{IPORHOST:system.auth.ssh.dropped_ip}' + - '^%{DATA:user.name} :( %{DATA:system.auth.sudo.error} ;)? TTY=%{DATA:system.auth.sudo.tty} ; PWD=%{DATA:system.auth.sudo.pwd} ; USER=%{DATA:system.auth.sudo.user} ; COMMAND=%{GREEDYDATA:system.auth.sudo.command}' + - '^new group: name=%{DATA:group.name}, GID=%{NUMBER:group.id}' + - '^new user: name=%{DATA:user.name}, UID=%{NUMBER:user.id}, GID=%{NUMBER:group.id}, home=%{DATA:system.auth.useradd.home}, shell=%{DATA:system.auth.useradd.shell}$' + on_failure: + - rename: + description: Leave the unmatched content in message. + field: _temp.message + target_field: message + - remove: + field: _temp - grok: description: Grok usernames from PAM messages. tag: grok-pam-users @@ -70,6 +107,33 @@ processors: on_failure: - remove: field: system.auth.ssh.dropped_ip + - date: + if: ctx.event?.timezone == null + field: system.auth.timestamp + target_field: '@timestamp' + formats: + - MMM d HH:mm:ss + - MMM dd HH:mm:ss + - ISO8601 + on_failure: + - append: + field: error.message + value: '{{{ _ingest.on_failure_message }}}' + - date: + if: ctx.event?.timezone != null + field: system.auth.timestamp + target_field: '@timestamp' + formats: + - MMM d HH:mm:ss + - MMM dd HH:mm:ss + - ISO8601 + timezone: '{{{ event.timezone }}}' + on_failure: + - append: + field: error.message + value: '{{{ _ingest.on_failure_message }}}' + - remove: + field: system.auth.timestamp - geoip: field: source.ip target_field: source.geo @@ -170,3 +234,7 @@ processors: if: "ctx?.tags == null || !(ctx.tags.contains('preserve_original_event'))" ignore_failure: true ignore_missing: true +on_failure: + - set: + field: error.message + value: '{{{ _ingest.on_failure_message }}}' diff --git a/filebeat/module/system/auth/manifest.yml b/filebeat/module/system/auth/manifest.yml index fefc51a88a4..bf1a3623cf1 100644 --- a/filebeat/module/system/auth/manifest.yml +++ b/filebeat/module/system/auth/manifest.yml @@ -12,16 +12,6 @@ var: os.windows: [] - name: tags default: [] - - name: use_journald - default: false - - name: use_files - default: false - -ingest_pipeline: - - ingest/entrypoint.yml - - ingest/files.yml - - ingest/journald.yml - - ingest/grok-auth-messages.yml - - ingest/common.yml +ingest_pipeline: ingest/pipeline.yml input: config/auth.yml diff --git a/filebeat/module/system/auth/test/debian-12.export b/filebeat/module/system/auth/test/debian-12.export deleted file mode 100644 index 583416f6c7b..00000000000 Binary files a/filebeat/module/system/auth/test/debian-12.export and /dev/null differ diff --git a/filebeat/module/system/auth/test/debian-12.journal-expected.json b/filebeat/module/system/auth/test/debian-12.journal-expected.json deleted file mode 100644 index 2ef69b76b22..00000000000 --- a/filebeat/module/system/auth/test/debian-12.journal-expected.json +++ /dev/null @@ -1,383 +0,0 @@ -[ - { - "event.action": "ssh_login", - "event.category": [ - "authentication", - "session" - ], - "event.dataset": "system.auth", - "event.kind": "event", - "event.module": "system", - "event.outcome": "success", - "event.timezone": "-02:00", - "event.type": [ - "info" - ], - "fileset.name": "auth", - "host.hostname": "vagrant-debian-12", - "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", - "input.type": "journald", - "log.syslog.facility.code": 4, - "log.syslog.priority": 6, - "message": "Accepted publickey for vagrant from 10.0.2.2 port 48274 ssh2: ED25519 SHA256:k1kjhwoH/H3w31MbGOIGd7qxrkSQJnoAN0eYJVHDmmI", - "process.args": [ - "\"sshd: vagrant [priv]\"" - ], - "process.args_count": 1, - "process.command_line": "\"sshd: vagrant [priv]\"", - "process.name": "sshd", - "process.pid": 26538, - "related.hosts": [ - "vagrant-debian-12" - ], - "related.ip": [ - "10.0.2.2" - ], - "related.user": [ - "vagrant" - ], - "service.type": "system", - "source.address": "10.0.2.2", - "source.ip": "10.0.2.2", - "source.port": 48274, - "system.auth.ssh.event": "Accepted", - "system.auth.ssh.method": "publickey", - "system.auth.ssh.signature": "ED25519 SHA256:k1kjhwoH/H3w31MbGOIGd7qxrkSQJnoAN0eYJVHDmmI", - "user.group.id": "0", - "user.id": "0", - "user.name": "vagrant" - }, - { - "event.action": "ssh_login", - "event.category": [ - "authentication", - "session" - ], - "event.dataset": "system.auth", - "event.kind": "event", - "event.module": "system", - "event.outcome": "success", - "event.timezone": "-02:00", - "event.type": [ - "info" - ], - "fileset.name": "auth", - "host.hostname": "vagrant-debian-12", - "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", - "input.type": "journald", - "log.syslog.facility.code": 4, - "log.syslog.priority": 6, - "message": "Accepted password for vagrant from 192.168.42.119 port 55310 ssh2", - "process.args": [ - "\"sshd: vagrant [priv]\"" - ], - "process.args_count": 1, - "process.command_line": "\"sshd: vagrant [priv]\"", - "process.name": "sshd", - "process.pid": 1710, - "related.hosts": [ - "vagrant-debian-12" - ], - "related.ip": [ - "192.168.42.119" - ], - "related.user": [ - "vagrant" - ], - "service.type": "system", - "source.address": "192.168.42.119", - "source.ip": "192.168.42.119", - "source.port": 55310, - "system.auth.ssh.event": "Accepted", - "system.auth.ssh.method": "password", - "user.group.id": "0", - "user.id": "0", - "user.name": "vagrant" - }, - { - "event.action": "ssh_login", - "event.category": [ - "authentication" - ], - "event.dataset": "system.auth", - "event.kind": "event", - "event.module": "system", - "event.outcome": "failure", - "event.timezone": "-02:00", - "event.type": [ - "info" - ], - "fileset.name": "auth", - "host.hostname": "vagrant-debian-12", - "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", - "input.type": "journald", - "log.syslog.facility.code": 4, - "log.syslog.priority": 6, - "message": "Invalid user test from 192.168.42.119 port 48890", - "process.args": [ - "\"sshd: unknown [priv]\"" - ], - "process.args_count": 1, - "process.command_line": "\"sshd: unknown [priv]\"", - "process.name": "sshd", - "process.pid": 1721, - "related.hosts": [ - "vagrant-debian-12" - ], - "related.ip": [ - "192.168.42.119" - ], - "related.user": [ - "test" - ], - "service.type": "system", - "source.address": "192.168.42.119", - "source.ip": "192.168.42.119", - "system.auth.ssh.event": "Invalid", - "user.group.id": "0", - "user.id": "0", - "user.name": "test" - }, - { - "event.action": "ssh_login", - "event.category": [ - "authentication" - ], - "event.dataset": "system.auth", - "event.kind": "event", - "event.module": "system", - "event.outcome": "failure", - "event.timezone": "-02:00", - "event.type": [ - "info" - ], - "fileset.name": "auth", - "host.hostname": "vagrant-debian-12", - "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", - "input.type": "journald", - "log.syslog.facility.code": 4, - "log.syslog.priority": 6, - "message": "Failed password for root from 192.168.42.119 port 46632 ssh2", - "process.args": [ - "\"sshd: root [priv]\"" - ], - "process.args_count": 1, - "process.command_line": "\"sshd: root [priv]\"", - "process.name": "sshd", - "process.pid": 1723, - "related.hosts": [ - "vagrant-debian-12" - ], - "related.ip": [ - "192.168.42.119" - ], - "related.user": [ - "root" - ], - "service.type": "system", - "source.address": "192.168.42.119", - "source.ip": "192.168.42.119", - "source.port": 46632, - "system.auth.ssh.event": "Failed", - "system.auth.ssh.method": "password", - "user.group.id": "0", - "user.id": "0", - "user.name": "root" - }, - { - "event.action": "ssh_login", - "event.category": [ - "authentication" - ], - "event.dataset": "system.auth", - "event.kind": "event", - "event.module": "system", - "event.outcome": "failure", - "event.timezone": "-02:00", - "event.type": [ - "info" - ], - "fileset.name": "auth", - "host.hostname": "vagrant-debian-12", - "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", - "input.type": "journald", - "log.syslog.facility.code": 4, - "log.syslog.priority": 6, - "message": "Failed password for root from 192.168.42.119 port 46632 ssh2", - "process.args": [ - "\"sshd: root [priv]\"" - ], - "process.args_count": 1, - "process.command_line": "\"sshd: root [priv]\"", - "process.name": "sshd", - "process.pid": 1723, - "related.hosts": [ - "vagrant-debian-12" - ], - "related.ip": [ - "192.168.42.119" - ], - "related.user": [ - "root" - ], - "service.type": "system", - "source.address": "192.168.42.119", - "source.ip": "192.168.42.119", - "source.port": 46632, - "system.auth.ssh.event": "Failed", - "system.auth.ssh.method": "password", - "user.group.id": "0", - "user.id": "0", - "user.name": "root" - }, - { - "event.action": "ssh_login", - "event.category": [ - "authentication" - ], - "event.dataset": "system.auth", - "event.kind": "event", - "event.module": "system", - "event.outcome": "failure", - "event.timezone": "-02:00", - "event.type": [ - "info" - ], - "fileset.name": "auth", - "host.hostname": "vagrant-debian-12", - "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", - "input.type": "journald", - "log.syslog.facility.code": 4, - "log.syslog.priority": 6, - "message": "Failed password for root from 192.168.42.119 port 46632 ssh2", - "process.args": [ - "\"sshd: root [priv]\"" - ], - "process.args_count": 1, - "process.command_line": "\"sshd: root [priv]\"", - "process.name": "sshd", - "process.pid": 1723, - "related.hosts": [ - "vagrant-debian-12" - ], - "related.ip": [ - "192.168.42.119" - ], - "related.user": [ - "root" - ], - "service.type": "system", - "source.address": "192.168.42.119", - "source.ip": "192.168.42.119", - "source.port": 46632, - "system.auth.ssh.event": "Failed", - "system.auth.ssh.method": "password", - "user.group.id": "0", - "user.id": "0", - "user.name": "root" - }, - { - "event.dataset": "system.auth", - "event.kind": "event", - "event.module": "system", - "event.timezone": "-02:00", - "fileset.name": "auth", - "host.hostname": "vagrant-debian-12", - "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", - "input.type": "journald", - "log.syslog.facility.code": 10, - "log.syslog.priority": 5, - "message": " vagrant : TTY=pts/2 ; PWD=/home/vagrant ; USER=root ; COMMAND=/usr/bin/emacs /etc/ssh/sshd_config", - "process.args": [ - "sudo", - "emacs", - "/etc/ssh/sshd_config" - ], - "process.args_count": 3, - "process.command_line": "sudo emacs /etc/ssh/sshd_config", - "process.name": "sudo", - "process.pid": 1582, - "related.hosts": [ - "vagrant-debian-12" - ], - "related.user": [ - " vagrant", - "root" - ], - "service.type": "system", - "system.auth.sudo.command": "/usr/bin/emacs /etc/ssh/sshd_config", - "system.auth.sudo.pwd": "/home/vagrant", - "system.auth.sudo.tty": "pts/2", - "system.auth.sudo.user": "root", - "user.effective.name": "root", - "user.group.id": "1000", - "user.id": "1000", - "user.name": " vagrant" - }, - { - "event.category": [ - "iam" - ], - "event.dataset": "system.auth", - "event.kind": "event", - "event.module": "system", - "event.outcome": "success", - "event.timezone": "-02:00", - "event.type": [ - "creation", - "group" - ], - "fileset.name": "auth", - "group.id": "1001", - "group.name": "test", - "host.hostname": "vagrant-debian-12", - "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", - "input.type": "journald", - "log.syslog.facility.code": 10, - "log.syslog.priority": 6, - "message": "new group: name=test, GID=1001", - "process.args": [ - "/sbin/groupadd", - "-g", - "1001", - "test" - ], - "process.args_count": 4, - "process.command_line": "/sbin/groupadd -g 1001 test", - "process.name": "groupadd", - "process.pid": 1743, - "related.hosts": [ - "vagrant-debian-12" - ], - "service.type": "system", - "user.effective.group.id": "0", - "user.effective.id": "0", - "user.id": "1000" - }, - { - "event.dataset": "system.auth", - "event.kind": "event", - "event.module": "system", - "event.timezone": "-02:00", - "fileset.name": "auth", - "host.hostname": "vagrant-debian-12", - "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", - "input.type": "journald", - "log.syslog.facility.code": 4, - "log.syslog.priority": 6, - "message": "Session 8 logged out. Waiting for processes to exit.", - "process.args": [ - "/lib/systemd/systemd-logind" - ], - "process.args_count": 1, - "process.command_line": "/lib/systemd/systemd-logind", - "process.name": "systemd-logind", - "process.pid": 316, - "related.hosts": [ - "vagrant-debian-12" - ], - "service.type": "system", - "user.group.id": "0", - "user.id": "0" - } -] \ No newline at end of file diff --git a/filebeat/module/system/syslog/config/syslog.yml b/filebeat/module/system/syslog/config/syslog.yml index 3bec875d272..e7f238d8af8 100644 --- a/filebeat/module/system/syslog/config/syslog.yml +++ b/filebeat/module/system/syslog/config/syslog.yml @@ -1,44 +1,15 @@ -type: system-logs - -{{ if .use_journald }} -use_journald: true +type: log +paths: +{{ range $i, $path := .paths }} + - {{$path}} {{ end }} - -{{ if .use_files }} -use_files: true -{{ end }} - +exclude_files: [".gz$"] +multiline: + pattern: "^\\s" + match: after processors: - add_locale: ~ - add_fields: target: '' fields: ecs.version: 1.12.0 - -journald: - id: system-syslog - facilities: - - 0 - - 1 - - 2 - - 3 - - 5 - - 6 - - 7 - - 8 - - 9 - - 11 - - 12 - - 15 - -files: - id: system-syslog - paths: - {{ range $i, $path := .paths }} - - {{$path}} - {{ end }} - - exclude_files: [".gz$"] - multiline: - pattern: "^\\s" - match: after diff --git a/filebeat/module/system/syslog/ingest/entrypoint.yml b/filebeat/module/system/syslog/ingest/entrypoint.yml deleted file mode 100644 index e9f3fbc3977..00000000000 --- a/filebeat/module/system/syslog/ingest/entrypoint.yml +++ /dev/null @@ -1,15 +0,0 @@ -description: Entrypoint Pipeline for system/syslog Filebeat module -processors: - - script: - source: | - if(ctx?.journald != null){ - ctx['syslog_pipeline'] = '{< IngestPipeline "journald" >}'; - return; - } - ctx['syslog_pipeline'] = '{< IngestPipeline "files" >}'; - return; - - pipeline: - name: "{{ syslog_pipeline }}" - - remove: - ignore_failure: true - field: "syslog_pipeline" diff --git a/filebeat/module/system/syslog/ingest/journald.yml b/filebeat/module/system/syslog/ingest/journald.yml deleted file mode 100644 index 68400c8f507..00000000000 --- a/filebeat/module/system/syslog/ingest/journald.yml +++ /dev/null @@ -1,32 +0,0 @@ -description: Journald Pipeline for system/syslog Filebeat module -processors: -- set: - field: event.ingested - copy_from: _ingest.timestamp -- set: - field: "process.pid" - value: '{{ journald.pid }}' -- set: - field: event.kind - value: event -- append: - field: related.hosts - value: "{{host.hostname}}" - if: "ctx.host?.hostname != null && ctx.host?.hostname != ''" - allow_duplicates: false -- remove: - description: Remove the extra fields added by the Journald input - ignore_missing: true - field: - - journald - - process.thread - - syslog - - systemd - - message_id -- set: - field: input.type - value: journald -on_failure: -- set: - field: error.message - value: '{{ _ingest.on_failure_message }}' diff --git a/filebeat/module/system/syslog/ingest/files.yml b/filebeat/module/system/syslog/ingest/pipeline.yml similarity index 97% rename from filebeat/module/system/syslog/ingest/files.yml rename to filebeat/module/system/syslog/ingest/pipeline.yml index f4f5930e198..b1352f2ad62 100644 --- a/filebeat/module/system/syslog/ingest/files.yml +++ b/filebeat/module/system/syslog/ingest/pipeline.yml @@ -59,9 +59,6 @@ processors: value: "{{host.hostname}}" if: "ctx.host?.hostname != null && ctx.host?.hostname != ''" allow_duplicates: false -- set: - field: input.type - value: log on_failure: - set: field: error.message diff --git a/filebeat/module/system/syslog/manifest.yml b/filebeat/module/system/syslog/manifest.yml index 5112ddc5c15..39a34e56ca3 100644 --- a/filebeat/module/system/syslog/manifest.yml +++ b/filebeat/module/system/syslog/manifest.yml @@ -8,14 +8,6 @@ var: os.darwin: - /var/log/system.log* os.windows: [] - - name: use_journald - default: false - - name: use_files - default: false - -ingest_pipeline: - - ingest/entrypoint.yml - - ingest/files.yml - - ingest/journald.yml +ingest_pipeline: ingest/pipeline.yml input: config/syslog.yml diff --git a/filebeat/module/system/syslog/test/debian-12.export b/filebeat/module/system/syslog/test/debian-12.export deleted file mode 100644 index 780bd46990e..00000000000 Binary files a/filebeat/module/system/syslog/test/debian-12.export and /dev/null differ diff --git a/filebeat/module/system/syslog/test/debian-12.journal-expected.json b/filebeat/module/system/syslog/test/debian-12.journal-expected.json deleted file mode 100644 index 3e9b606be26..00000000000 --- a/filebeat/module/system/syslog/test/debian-12.journal-expected.json +++ /dev/null @@ -1,63 +0,0 @@ -[ - { - "event.dataset": "system.syslog", - "event.kind": "event", - "event.module": "system", - "event.timezone": "-02:00", - "fileset.name": "syslog", - "host.hostname": "vagrant-debian-12", - "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", - "input.type": "journald", - "log.syslog.facility.code": 3, - "log.syslog.priority": 6, - "message": "Stopped target getty.target - Login Prompts.", - "process.args": [ - "/sbin/init" - ], - "process.args_count": 1, - "process.command_line": "/sbin/init", - "process.pid": "1", - "related.hosts": [ - "vagrant-debian-12" - ], - "service.type": "system", - "user.group.id": "0", - "user.id": "0" - }, - { - "event.dataset": "system.syslog", - "event.kind": "event", - "event.module": "system", - "event.timezone": "-02:00", - "fileset.name": "syslog", - "host.hostname": "vagrant-debian-12", - "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", - "input.type": "journald", - "log.syslog.facility.code": 0, - "log.syslog.priority": 6, - "message": "Console: switching to colour frame buffer device 160x50", - "process.pid": "", - "related.hosts": [ - "vagrant-debian-12" - ], - "service.type": "system" - }, - { - "event.dataset": "system.syslog", - "event.kind": "event", - "event.module": "system", - "event.timezone": "-02:00", - "fileset.name": "syslog", - "host.hostname": "bookworm", - "host.id": "5e6dc8fe417f4ea383e2afaa731f5d8a", - "input.type": "journald", - "log.syslog.facility.code": 0, - "log.syslog.priority": 6, - "message": "thermal_sys: Registered thermal governor 'power_allocator'", - "process.pid": "", - "related.hosts": [ - "bookworm" - ], - "service.type": "system" - } -] \ No newline at end of file diff --git a/filebeat/modules.d/system.yml.disabled b/filebeat/modules.d/system.yml.disabled index 809b32de2ed..1302c6374da 100644 --- a/filebeat/modules.d/system.yml.disabled +++ b/filebeat/modules.d/system.yml.disabled @@ -10,16 +10,6 @@ # Filebeat will choose the paths depending on your OS. #var.paths: - # Force using journald to collect system logs - #var.use_journald: true|false - - # Force using log files to collect system logs - #var.use_files: true|false - - # If use_journald and use_files are false, then - # Filebeat will autodetect whether use to journald - # to collect system logs. - # Authorization logs auth: enabled: false @@ -27,20 +17,3 @@ # Set custom paths for the log files. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: - - # Force using journald to collect system logs - #var.use_journald: true|false - - # Force using log files to collect system logs - #var.use_files: true|false - - # If use_journald and use_files are false, then - # Filebeat will autodetect whether use to journald - # to collect system logs. - - # A list of tags to include in events. Including forwarded - # indicates that the events did not originate on this host and - # causes host.name to not be added to events. Include - # preserve_orginal_event causes the pipeline to retain the raw log - # in event.original. Defaults to []. - #var.tags: [] diff --git a/filebeat/tests/integration/journald_test.go b/filebeat/tests/integration/journald_test.go index 447e49b82bb..712d2db4871 100644 --- a/filebeat/tests/integration/journald_test.go +++ b/filebeat/tests/integration/journald_test.go @@ -75,7 +75,7 @@ func generateJournaldLogs(t *testing.T, ctx context.Context, syslogID string, ma //go:embed testdata/filebeat_journald.yml var journaldInputCfg string -func TestJournaldInput(t *testing.T) { +func TestJournaldInputRunsAndRecoversFromJournalctlFailures(t *testing.T) { filebeat := integration.NewBeat( t, "filebeat", @@ -90,9 +90,12 @@ func TestJournaldInput(t *testing.T) { filebeat.WriteConfigFile(yamlCfg) filebeat.Start() + // On a normal execution we run journalclt twice, the first time to read all messages from the + // previous boot until 'now' and the second one with the --follow flag that should keep on running. + filebeat.WaitForLogs("journalctl started with PID", 10*time.Second, "journalctl did not start") filebeat.WaitForLogs("journalctl started with PID", 10*time.Second, "journalctl did not start") - pidLine := filebeat.GetLogLine("journalctl started with PID") + pidLine := filebeat.GetLastLogLine("journalctl started with PID") logEntry := struct{ Message string }{} if err := json.Unmarshal([]byte(pidLine), &logEntry); err != nil { t.Errorf("could not parse PID log entry as JSON: %s", err) @@ -105,7 +108,7 @@ func TestJournaldInput(t *testing.T) { // Kill journalctl if err := syscall.Kill(pid, syscall.SIGKILL); err != nil { - t.Fatalf("coluld not kill journalctl with PID %d: %s", pid, err) + t.Fatalf("coluld not kill journalctl with PID %d: %s", pid, err) } go generateJournaldLogs(t, context.Background(), syslogID, 5) diff --git a/filebeat/tests/integration/systemlogs_all_test.go b/filebeat/tests/integration/systemlogs_all_test.go new file mode 100644 index 00000000000..cbc0c50c129 --- /dev/null +++ b/filebeat/tests/integration/systemlogs_all_test.go @@ -0,0 +1,126 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package integration + +import ( + "bufio" + _ "embed" + "encoding/json" + "errors" + "io" + "os" + "path/filepath" + "testing" + "time" + + cp "github.com/otiai10/copy" + "github.com/stretchr/testify/require" +) + +//go:embed testdata/filebeat_system_module.yml +var systemModuleCfg string + +func copyModulesDir(t *testing.T, dst string) { + pwd, err := os.Getwd() + if err != nil { + t.Fatalf("cannot get the current directory: %s", err) + } + localModules := filepath.Join(pwd, "../", "../", "module") + localModulesD := filepath.Join(pwd, "../", "../", "modules.d") + + if err := cp.Copy(localModules, filepath.Join(dst, "module")); err != nil { + t.Fatalf("cannot copy 'module' folder to test folder: %s", err) + } + if err := cp.Copy(localModulesD, filepath.Join(dst, "modules.d")); err != nil { + t.Fatalf("cannot copy 'modules.d' folder to test folder: %s", err) + } +} + +//nolint:unused,nolintlint // necessary on Linux +func waitForAllFilesets(t *testing.T, outputGlob string, msgAndArgs ...any) { + require.Eventually( + t, + findFilesetNames(t, outputGlob), + time.Minute, + 10*time.Millisecond, + msgAndArgs...) +} + +//nolint:unused,nolintlint // necessary on Linux +func findFilesetNames(t *testing.T, outputGlob string) func() bool { + f := func() bool { + files, err := filepath.Glob(outputGlob) + if err != nil { + t.Fatalf("cannot get files list for glob '%s': '%s'", outputGlob, err) + } + + if len(files) > 1 { + t.Fatalf( + "only a single output file is supported, found: %d. Files: %s", + len(files), + files, + ) + } + + foundSyslog := false + foundAuth := false + + file, err := os.Open(files[0]) + if err != nil { + t.Fatalf("cannot open '%s': '%s'", files[0], err) + } + defer file.Close() + + r := bufio.NewReader(file) + for { + line, err := r.ReadBytes('\n') + if err != nil { + if errors.Is(err, io.EOF) { + break + } else { + t.Fatalf("cannot read '%s': '%s", file.Name(), err) + } + } + + data := struct { + Fileset struct { + Name string `json:"name"` + } `json:"fileset"` + }{} + + if err := json.Unmarshal(line, &data); err != nil { + t.Fatalf("cannot parse output line as JSON: %s", err) + } + + switch data.Fileset.Name { + case "syslog": + foundSyslog = true + case "auth": + foundAuth = true + } + + if foundAuth && foundSyslog { + return true + } + } + + return false + } + + return f +} diff --git a/filebeat/tests/integration/systemlogs_linux_test.go b/filebeat/tests/integration/systemlogs_linux_test.go new file mode 100644 index 00000000000..88af84734af --- /dev/null +++ b/filebeat/tests/integration/systemlogs_linux_test.go @@ -0,0 +1,67 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration && linux + +package integration + +import ( + "fmt" + "path/filepath" + "testing" + "time" + + "github.com/elastic/beats/v7/libbeat/tests/integration" +) + +// TestSystemLogsCanUseJournald aims to ensure the system-logs input can +// correctly choose and start a journald input when the globs defined in +// var.paths do not resolve to any file. +func TestSystemModuleCanUseJournaldInput(t *testing.T) { + t.Skip("The system module is not using the system-logs input at the moment") + filebeat := integration.NewBeat( + t, + "filebeat", + "../../filebeat.test", + ) + workDir := filebeat.TempDir() + copyModulesDir(t, workDir) + + // As the name says, we want this folder to exist bu t be empty + globWithoutFiles := filepath.Join(filebeat.TempDir(), "this-folder-does-not-exist") + yamlCfg := fmt.Sprintf(systemModuleCfg, globWithoutFiles, globWithoutFiles, workDir) + + filebeat.WriteConfigFile(yamlCfg) + filebeat.Start() + + filebeat.WaitForLogs( + "no files were found, using journald input", + 10*time.Second, + "system-logs did not select journald input") + filebeat.WaitForLogs( + "journalctl started with PID", + 10*time.Second, + "system-logs did not start journald input") + + // Scan every event in the output until at least one from + // each fileset (auth, syslog) is found. + waitForAllFilesets( + t, + filepath.Join(workDir, "output*.ndjson"), + "did not find events from both filesets: 'auth' and 'syslog'", + ) +} diff --git a/filebeat/tests/integration/systemlogs_other_test.go b/filebeat/tests/integration/systemlogs_other_test.go new file mode 100644 index 00000000000..42fc61b426d --- /dev/null +++ b/filebeat/tests/integration/systemlogs_other_test.go @@ -0,0 +1,56 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration + +package integration + +import ( + "fmt" + "path" + "testing" + "time" + + "github.com/elastic/beats/v7/libbeat/tests/integration" +) + +func TestSystemLogsCanUseLogInput(t *testing.T) { + t.Skip("The system module is not using the system-logs input at the moment") + filebeat := integration.NewBeat( + t, + "filebeat", + "../../filebeat.test", + ) + workDir := filebeat.TempDir() + copyModulesDir(t, workDir) + + logFilePath := path.Join(workDir, "syslog") + integration.GenerateLogFile(t, logFilePath, 5, false) + yamlCfg := fmt.Sprintf(systemModuleCfg, logFilePath, logFilePath, workDir) + + filebeat.WriteConfigFile(yamlCfg) + filebeat.Start() + + filebeat.WaitForLogs( + "using log input because file(s) was(were) found", + 10*time.Second, + "system-logs did not select the log input") + filebeat.WaitForLogs( + "Harvester started for paths:", + 10*time.Second, + "system-logs did not start the log input") +} diff --git a/filebeat/tests/integration/testdata/filebeat_system_module.yml b/filebeat/tests/integration/testdata/filebeat_system_module.yml new file mode 100644 index 00000000000..d781aa1590a --- /dev/null +++ b/filebeat/tests/integration/testdata/filebeat_system_module.yml @@ -0,0 +1,29 @@ +filebeat.modules: + - module: system + syslog: + enabled: true + var.paths: + - "%s" + auth: + enabled: true + var.paths: + - "%s" + +path.home: %s + +queue.mem: + flush.timeout: 0 + +output: + file: + path: ${path.home} + filename: "output" + rotate_every_kb: 500000 # 500mb + +logging: + level: debug + selectors: + - input + - input.journald + - input.journald.reader + - input.journald.reader.journalctl-runner diff --git a/filebeat/tests/integration/translate_ldap_attribute_test.go b/filebeat/tests/integration/translate_ldap_attribute_test.go new file mode 100644 index 00000000000..e2b0f877efc --- /dev/null +++ b/filebeat/tests/integration/translate_ldap_attribute_test.go @@ -0,0 +1,216 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration + +package integration + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "path" + "path/filepath" + "testing" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/client" + "github.com/docker/go-connections/nat" + "github.com/go-ldap/ldap/v3" + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/libbeat/tests/integration" + "github.com/elastic/elastic-agent-autodiscover/docker" +) + +const translateguidCfg = ` +filebeat.inputs: + - type: filestream + id: "test-translateguidCfg" + paths: + - %s + +queue.mem: + flush.min_events: 1 + flush.timeout: 0.1s + +path.home: %s + +output.file: + path: ${path.home} + filename: "output-file" + +logging: + metrics: + enabled: false + +processors: + - add_fields: + fields: + guid: '%s' + - translate_ldap_attribute: + field: fields.guid + target_field: fields.common_name + ldap_address: 'ldap://localhost:1389' + ldap_base_dn: 'dc=example,dc=org' + ldap_bind_user: 'cn=admin,dc=example,dc=org' + ldap_bind_password: 'adminpassword' + ldap_search_attribute: 'entryUUID' +` + +func TestTranslateGUIDWithLDAP(t *testing.T) { + startOpenldapContainer(t) + + var entryUUID string + require.Eventually(t, func() bool { + var err error + entryUUID, err = getLDAPUserEntryUUID() + return err == nil + }, 10*time.Second, time.Second) + + filebeat := integration.NewBeat( + t, + "filebeat", + "../../filebeat.test", + ) + tempDir := filebeat.TempDir() + + // 1. Generate the log file path + logFilePath := path.Join(tempDir, "log.log") + integration.GenerateLogFile(t, logFilePath, 1, false) + + // 2. Write configuration file and start Filebeat + filebeat.WriteConfigFile( + fmt.Sprintf(translateguidCfg, logFilePath, tempDir, entryUUID), + ) + filebeat.Start() + + var outputFile string + require.Eventually(t, func() bool { + outputFiles, err := filepath.Glob(path.Join(tempDir, "output-file-*.ndjson")) + if err != nil { + return false + } + if len(outputFiles) != 1 { + return false + } + outputFile = outputFiles[0] + return true + }, 10*time.Second, time.Second) + + // 3. Wait for the event with the expected translated guid + filebeat.WaitFileContains( + outputFile, + fmt.Sprintf(`"fields":{"guid":"%s","common_name":["User1","user01"]}`, entryUUID), + 10*time.Second, + ) +} + +func startOpenldapContainer(t *testing.T) { + ctx := context.Background() + c, err := docker.NewClient(client.DefaultDockerHost, nil, nil) + if err != nil { + t.Fatal(err) + } + + reader, err := c.ImagePull(ctx, "bitnami/openldap:2", image.PullOptions{}) + if err != nil { + t.Fatal(err) + } + if _, err = io.Copy(os.Stdout, reader); err != nil { + t.Fatal(err) + } + reader.Close() + + resp, err := c.ContainerCreate(ctx, + &container.Config{ + Image: "bitnami/openldap:2", + ExposedPorts: nat.PortSet{ + "1389/tcp": struct{}{}, + }, + Env: []string{ + "LDAP_URI=ldap://openldap:1389", + "LDAP_BASE=dc=example,dc=org", + "LDAP_BIND_DN=cn=admin,dc=example,dc=org", + "LDAP_BIND_PASSWORD=adminpassword", + }, + }, + &container.HostConfig{ + PortBindings: nat.PortMap{ + "1389/tcp": []nat.PortBinding{ + { + HostIP: "0.0.0.0", + HostPort: "1389", + }, + }, + }, + }, nil, nil, "") + if err != nil { + t.Fatal(err) + } + + if err := c.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil { + t.Fatal(err) + } + + t.Cleanup(func() { + defer c.Close() + if err := c.ContainerRemove(ctx, resp.ID, container.RemoveOptions{RemoveVolumes: true, Force: true}); err != nil { + t.Error(err) + } + }) +} + +func getLDAPUserEntryUUID() (string, error) { + // Connect to the LDAP server + l, err := ldap.DialURL("ldap://localhost:1389") + if err != nil { + return "", fmt.Errorf("failed to connect to LDAP server: %w", err) + } + defer l.Close() + + err = l.Bind("cn=admin,dc=example,dc=org", "adminpassword") + if err != nil { + return "", fmt.Errorf("failed to bind to LDAP server: %w", err) + } + + searchRequest := ldap.NewSearchRequest( + "dc=example,dc=org", + ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 1, 0, false, + "(cn=User1)", []string{"entryUUID"}, nil, + ) + + sr, err := l.Search(searchRequest) + if err != nil { + return "", fmt.Errorf("failed to execute search: %w", err) + } + + // Process search results + if len(sr.Entries) == 0 { + return "", errors.New("no entries found for the specified username.") + } + entry := sr.Entries[0] + entryUUID := entry.GetAttributeValue("entryUUID") + if entryUUID == "" { + return "", errors.New("entryUUID is empty") + } + return entryUUID, nil +} diff --git a/go.mod b/go.mod index 75aba460236..e3178b6f5d0 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/apoydence/eachers v0.0.0-20181020210610-23942921fe77 // indirect github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 github.com/aws/aws-lambda-go v1.44.0 - github.com/aws/aws-sdk-go-v2 v1.30.4 + github.com/aws/aws-sdk-go-v2 v1.30.5 github.com/aws/aws-sdk-go-v2/config v1.27.29 github.com/aws/aws-sdk-go-v2/credentials v1.17.29 github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.40.5 @@ -179,6 +179,8 @@ require ( github.com/apache/arrow/go/v14 v14.0.2 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.13 + github.com/aws/aws-sdk-go-v2/service/apigateway v1.25.8 + github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.22.8 github.com/aws/aws-sdk-go-v2/service/cloudformation v1.53.5 github.com/aws/aws-sdk-go-v2/service/health v1.26.4 github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.5 @@ -189,12 +191,13 @@ require ( github.com/elastic/bayeux v1.0.5 github.com/elastic/ebpfevents v0.6.0 github.com/elastic/elastic-agent-autodiscover v0.9.0 - github.com/elastic/elastic-agent-libs v0.12.1 + github.com/elastic/elastic-agent-libs v0.17.1 github.com/elastic/elastic-agent-system-metrics v0.11.1 github.com/elastic/go-elasticsearch/v8 v8.14.0 - github.com/elastic/go-quark v0.1.2 + github.com/elastic/go-quark v0.2.0 github.com/elastic/go-sfdc v0.0.0-20241010131323-8e176480d727 github.com/elastic/mito v1.15.0 + github.com/elastic/mock-es v0.0.0-20240712014503-e5b47ece0015 github.com/elastic/tk-btf v0.1.0 github.com/elastic/toutoumomoma v0.0.0-20240626215117-76e39db18dfb github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15 @@ -224,6 +227,7 @@ require ( go.opentelemetry.io/collector/consumer v0.109.0 go.opentelemetry.io/collector/pdata v1.15.0 go.opentelemetry.io/collector/receiver v0.109.0 + golang.org/x/term v0.24.0 google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f gopkg.in/natefinch/lumberjack.v2 v2.2.1 ) @@ -255,8 +259,8 @@ require ( github.com/apache/thrift v0.19.0 // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.16 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 // indirect @@ -339,6 +343,7 @@ require ( github.com/mattn/go-ieproxy v0.0.1 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/mileusna/useragent v1.3.4 // indirect github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 // indirect github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect @@ -382,7 +387,6 @@ require ( go.opentelemetry.io/otel/trace v1.29.0 // indirect go.uber.org/ratelimit v0.3.1 // indirect golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3 // indirect - golang.org/x/term v0.24.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 3eadfda4509..93bce761422 100644 --- a/go.sum +++ b/go.sum @@ -159,8 +159,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/aws/aws-lambda-go v1.44.0 h1:Xp9PANXKsSJ23IhE4ths592uWTCEewswPhSH9qpAuQQ= github.com/aws/aws-lambda-go v1.44.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7RfgJv23DymV8A= github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2 v1.30.4 h1:frhcagrVNrzmT95RJImMHgabt99vkXGslubDaDagTk8= -github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= +github.com/aws/aws-sdk-go-v2 v1.30.5 h1:mWSRTwQAb0aLE17dSzztCVJWI9+cRMgqebndjwDyK0g= +github.com/aws/aws-sdk-go-v2 v1.30.5/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw= github.com/aws/aws-sdk-go-v2/config v1.27.29 h1:+ZPKb3u9Up4KZWLGTtpTmC5T3XmRD1ZQ8XQjRCHUvJw= @@ -171,14 +171,18 @@ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 h1:yjwoSyDZF8Jth+mUk5lSPJ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12/go.mod h1:fuR57fAgMk7ot3WcNQfb6rSEn+SUffl7ri+aa8uKysI= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.13 h1:X8EeaOjl91c8sP14NG8EHx5ZxXLJg0tHDp+KQSghp28= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.13/go.mod h1:kEI/h2bETfm09LSd7xEEH2qcU1cd//+5HH4Le7p9JgY= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 h1:TNyt/+X43KJ9IJJMjKfa3bNTiZbUP7DeCxfbTROESwY= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16/go.mod h1:2DwJF39FlNAUiX5pAc0UNeiz16lK2t7IaFcm0LFHEgc= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 h1:jYfy8UPmd+6kJW5YhY0L1/KftReOGxI/4NtVSTh9O/I= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16/go.mod h1:7ZfEPZxkW42Afq4uQB8H2E2e6ebh6mXTueEpYzjCzcs= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 h1:pI7Bzt0BJtYA0N/JEC6B8fJ4RBrEMi1LBrkMdFYNSnQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17/go.mod h1:Dh5zzJYMtxfIjYW+/evjQ8uj2OyR/ve2KROHGHlSFqE= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 h1:Mqr/V5gvrhA2gvgnF42Zh5iMiQNcOYthFYwCyrnuWlc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17/go.mod h1:aLJpZlCmjE+V+KtN1q1uyZkfnUWpQGpbsn89XPKyzfU= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.16 h1:mimdLQkIX1zr8GIPY1ZtALdBQGxcASiBd2MOp8m/dMc= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.16/go.mod h1:YHk6owoSwrIsok+cAH9PENCOGoH5PU2EllX4vLtSrsY= +github.com/aws/aws-sdk-go-v2/service/apigateway v1.25.8 h1:CgEyY7gfTf7lHYcCi7+w6jJ1XQBugjpadtsuN3TGxdQ= +github.com/aws/aws-sdk-go-v2/service/apigateway v1.25.8/go.mod h1:z99ur4Ha5540t8hb5XtqV/UMOnEoEZK22lhr5ZBS0zw= +github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.22.8 h1:SWBNBbVbThg5Hdi3hWbVaDFjV/OyPbuqZLu4N+mj/Es= +github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.22.8/go.mod h1:lz2IT8gzzSwao0Pa6uMSdCIPsprmgCkW83q6sHGZFDw= github.com/aws/aws-sdk-go-v2/service/cloudformation v1.53.5 h1:YeTVIy7cJLeahs7K0jQGDGAd1YYND/to/z8N3kqZBhY= github.com/aws/aws-sdk-go-v2/service/cloudformation v1.53.5/go.mod h1:y45SdA9v+dLlweaqwAQMoFeXqdRvgwevafa2X8iTqZQ= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.40.5 h1:/YvqO1j75i4leoV+Z3a5s/dAlEszf2wTKBW8jc3Gd4s= @@ -338,8 +342,8 @@ github.com/elastic/elastic-agent-autodiscover v0.9.0 h1:+iWIKh0u3e8I+CJa3FfWe9h0 github.com/elastic/elastic-agent-autodiscover v0.9.0/go.mod h1:5iUxLHhVdaGSWYTveSwfJEY4RqPXTG13LPiFoxcpFd4= github.com/elastic/elastic-agent-client/v7 v7.15.0 h1:nDB7v8TBoNuD6IIzC3z7Q0y+7bMgXoT2DsHfolO2CHE= github.com/elastic/elastic-agent-client/v7 v7.15.0/go.mod h1:6h+f9QdIr3GO2ODC0Y8+aEXRwzbA5W4eV4dd/67z7nI= -github.com/elastic/elastic-agent-libs v0.12.1 h1:5jkxMx15Bna8cq7/Sz/XUIVUXfNWiJ80iSk4ICQ7KJ0= -github.com/elastic/elastic-agent-libs v0.12.1/go.mod h1:5CR02awPrBr+tfmjBBK+JI+dMmHNQjpVY24J0wjbC7M= +github.com/elastic/elastic-agent-libs v0.17.1 h1:1MXoc1eHGE8hCdVJ9+qiGiZAGeHzT2QBVVzD/oxwqeU= +github.com/elastic/elastic-agent-libs v0.17.1/go.mod h1:5CR02awPrBr+tfmjBBK+JI+dMmHNQjpVY24J0wjbC7M= github.com/elastic/elastic-agent-system-metrics v0.11.1 h1:BxViQHnqxvvi/65rj3mGwG6Eto6ldFCTnuDTUJnakaU= github.com/elastic/elastic-agent-system-metrics v0.11.1/go.mod h1:3QiMu9wTKJFvpCN+5klgGqasTMNKJbgY3xcoN1KQXJk= github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHoSjSRSJCApolgfthA= @@ -363,8 +367,8 @@ github.com/elastic/go-lumber v0.1.2-0.20220819171948-335fde24ea0f h1:TsPpU5EAwlt github.com/elastic/go-lumber v0.1.2-0.20220819171948-335fde24ea0f/go.mod h1:HHaWnZamYKWsR9/eZNHqRHob8iQDKnchHmmskT/SKko= github.com/elastic/go-perf v0.0.0-20191212140718-9c656876f595 h1:q8n4QjcLa4q39Q3fqHRknTBXBtegjriHFrB42YKgXGI= github.com/elastic/go-perf v0.0.0-20191212140718-9c656876f595/go.mod h1:s09U1b4P1ZxnKx2OsqY7KlHdCesqZWIhyq0Gs/QC/Us= -github.com/elastic/go-quark v0.1.2 h1:Hnov9q8D9ofS976SODWWYAZ23IpgPILxTUCiccmhw0c= -github.com/elastic/go-quark v0.1.2/go.mod h1:/ngqgumD/Z5vnFZ4XPN2kCbxnEfG5/Uc+bRvOBabVVA= +github.com/elastic/go-quark v0.2.0 h1:r2BL4NzvhESrrL/yA3AcHt8mwF7fvQDssBAUiOL1sdg= +github.com/elastic/go-quark v0.2.0/go.mod h1:/ngqgumD/Z5vnFZ4XPN2kCbxnEfG5/Uc+bRvOBabVVA= github.com/elastic/go-seccomp-bpf v1.4.0 h1:6y3lYrEHrLH9QzUgOiK8WDqmPaMnnB785WxibCNIOH4= github.com/elastic/go-seccomp-bpf v1.4.0/go.mod h1:wIMxjTbKpWGQk4CV9WltlG6haB4brjSH/dvAohBPM1I= github.com/elastic/go-sfdc v0.0.0-20241010131323-8e176480d727 h1:yuiN60oaQUz2PtNpNhDI2H6zrCdfiiptmNdwV5WUaKA= @@ -383,6 +387,8 @@ github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/u github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/mito v1.15.0 h1:MicOxLSVkgU2Aonbh3i+++66Wl5wvD8y9gALK8PQDYs= github.com/elastic/mito v1.15.0/go.mod h1:J+wCf4HccW2YoSFmZMGu+d06gN+WmnIlj5ehBqine74= +github.com/elastic/mock-es v0.0.0-20240712014503-e5b47ece0015 h1:z8cC8GASpPo8yKlbnXI36HQ/BM9wYjhBPNbDjAWm0VU= +github.com/elastic/mock-es v0.0.0-20240712014503-e5b47ece0015/go.mod h1:qH9DX/Dmflz6EAtaks/+2SsdQzecVAKE174Zl66hk7E= github.com/elastic/pkcs8 v1.0.0 h1:HhitlUKxhN288kcNcYkjW6/ouvuwJWd9ioxpjnD9jVA= github.com/elastic/pkcs8 v1.0.0/go.mod h1:ipsZToJfq1MxclVTwpG7U/bgeDtf+0HkUiOxebk95+0= github.com/elastic/sarama v1.19.1-0.20220310193331-ebc2b0d8eef3 h1:FzA0/n4iMt8ojGDGRoiFPSHFvvdVIvxOxyLtiFnrLBM= @@ -703,6 +709,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5 github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= +github.com/mileusna/useragent v1.3.4 h1:MiuRRuvGjEie1+yZHO88UBYg8YBC/ddF6T7F56i3PCk= +github.com/mileusna/useragent v1.3.4/go.mod h1:3d8TOmwL/5I8pJjyVDteHtgDGcefrFUX4ccGOMKNYYc= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI= @@ -945,6 +953,8 @@ go.opentelemetry.io/collector/pdata v1.15.0 h1:q/T1sFpRKJnjDrUsHdJ6mq4uSqViR/f92 go.opentelemetry.io/collector/pdata v1.15.0/go.mod h1:2wcsTIiLAJSbqBq/XUUYbi+cP+N87d0jEJzmb9nT19U= go.opentelemetry.io/collector/pdata/pprofile v0.109.0 h1:5lobQKeHk8p4WC7KYbzL6ZqqX3eSizsdmp5vM8pQFBs= go.opentelemetry.io/collector/pdata/pprofile v0.109.0/go.mod h1:lXIifCdtR5ewO17JAYTUsclMqRp6h6dCowoXHhGyw8Y= +go.opentelemetry.io/collector/pdata/testdata v0.109.0 h1:gvIqy6juvqFET/6zi+zUOH1KZY/vtEDZW55u7gJ/hEo= +go.opentelemetry.io/collector/pdata/testdata v0.109.0/go.mod h1:zRttU/F5QMQ6ZXBMXCoSVG3EORTZLTK+UUS0VoMoT44= go.opentelemetry.io/collector/receiver v0.109.0 h1:DTOM7xaDl7FUGQIjvjmWZn03JUE+aG4mJzWWfb7S8zw= go.opentelemetry.io/collector/receiver v0.109.0/go.mod h1:jeiCHaf3PE6aXoZfHF5Uexg7aztu+Vkn9LVw0YDKm6g= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= diff --git a/heartbeat/beater/heartbeat.go b/heartbeat/beater/heartbeat.go index 9a849f6bc7e..227b375ee90 100644 --- a/heartbeat/beater/heartbeat.go +++ b/heartbeat/beater/heartbeat.go @@ -88,7 +88,7 @@ func New(b *beat.Beat, rawConfig *conf.C) (beat.Beater, error) { if b.Config.Output.Name() == "elasticsearch" && !b.Manager.Enabled() { // Connect to ES and setup the State loader if the output is not managed by agent // Note this, intentionally, blocks until connected or max attempts reached - esClient, err := makeESClient(b.Config.Output.Config(), 3, 2*time.Second) + esClient, err := makeESClient(context.TODO(), b.Config.Output.Config(), 3, 2*time.Second) if err != nil { if parsedConfig.RunOnce { trace.Abort() @@ -275,7 +275,7 @@ func (bt *Heartbeat) RunCentralMgmtMonitors(b *beat.Beat) { } // Backoff panics with 0 duration, set to smallest unit - esClient, err := makeESClient(outCfg.Config(), 1, 1*time.Nanosecond) + esClient, err := makeESClient(context.TODO(), outCfg.Config(), 1, 1*time.Nanosecond) if err != nil { logp.L().Warnf("skipping monitor state management during managed reload: %w", err) } else { @@ -324,7 +324,7 @@ func (bt *Heartbeat) Stop() { } // makeESClient establishes an ES connection meant to load monitors' state -func makeESClient(cfg *conf.C, attempts int, wait time.Duration) (*eslegclient.Connection, error) { +func makeESClient(ctx context.Context, cfg *conf.C, attempts int, wait time.Duration) (*eslegclient.Connection, error) { var ( esClient *eslegclient.Connection err error @@ -353,7 +353,7 @@ func makeESClient(cfg *conf.C, attempts int, wait time.Duration) (*eslegclient.C } for i := 0; i < attempts; i++ { - esClient, err = eslegclient.NewConnectedClient(newCfg, "Heartbeat") + esClient, err = eslegclient.NewConnectedClient(ctx, newCfg, "Heartbeat") if err == nil { connectDelay.Reset() return esClient, nil diff --git a/heartbeat/beater/heartbeat_test.go b/heartbeat/beater/heartbeat_test.go index 669811dc4c8..279366a0e7e 100644 --- a/heartbeat/beater/heartbeat_test.go +++ b/heartbeat/beater/heartbeat_test.go @@ -18,6 +18,7 @@ package beater import ( + "context" "testing" "time" @@ -39,7 +40,7 @@ func TestMakeESClient(t *testing.T) { anyAttempt := 1 anyDuration := 1 * time.Second - _, _ = makeESClient(origCfg, anyAttempt, anyDuration) + _, _ = makeESClient(context.Background(), origCfg, anyAttempt, anyDuration) timeout, err := origCfg.Int("timeout", -1) require.NoError(t, err) diff --git a/heartbeat/cmd/root.go b/heartbeat/cmd/root.go index fd95013c6fe..a1f80a7cda7 100644 --- a/heartbeat/cmd/root.go +++ b/heartbeat/cmd/root.go @@ -22,6 +22,7 @@ import ( "github.com/elastic/beats/v7/heartbeat/beater" "github.com/elastic/beats/v7/heartbeat/include" + "github.com/elastic/beats/v7/libbeat/cfgfile" cmd "github.com/elastic/beats/v7/libbeat/cmd" "github.com/elastic/beats/v7/libbeat/cmd/instance" "github.com/elastic/beats/v7/libbeat/ecs" @@ -81,6 +82,7 @@ func Initialize(settings instance.Settings) *cmd.BeatsRootCmd { ` setup.ResetFlags() setup.Flags().Bool(cmd.IndexManagementKey, false, "Setup all components related to Elasticsearch index management, including template, ilm policy and rollover alias") + cfgfile.AddAllowedBackwardsCompatibleFlag(cmd.IndexManagementKey) return rootCmd } diff --git a/heartbeat/main_test.go b/heartbeat/main_test.go index a806e1588bd..18cc332b63a 100644 --- a/heartbeat/main_test.go +++ b/heartbeat/main_test.go @@ -24,6 +24,7 @@ import ( "testing" "github.com/elastic/beats/v7/heartbeat/cmd" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" ) @@ -33,11 +34,14 @@ func init() { testing.Init() systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(_ *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/heartbeat/monitors/wrappers/monitorstate/testutil.go b/heartbeat/monitors/wrappers/monitorstate/testutil.go index 28a6c260655..be58dcdb924 100644 --- a/heartbeat/monitors/wrappers/monitorstate/testutil.go +++ b/heartbeat/monitors/wrappers/monitorstate/testutil.go @@ -18,6 +18,7 @@ package monitorstate import ( + "context" "encoding/json" "testing" @@ -50,7 +51,9 @@ func IntegES(t *testing.T) (esc *eslegclient.Connection) { conn.Encoder = eslegclient.NewJSONEncoder(nil, false) - err = conn.Connect() + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + err = conn.Connect(ctx) if err != nil { t.Fatal(err) panic(err) // panic in case TestLogger did not stop test diff --git a/libbeat/cfgfile/cfgfile.go b/libbeat/cfgfile/cfgfile.go index ca19af8cb9f..14e38c5ab7d 100644 --- a/libbeat/cfgfile/cfgfile.go +++ b/libbeat/cfgfile/cfgfile.go @@ -18,9 +18,12 @@ package cfgfile import ( + "flag" "fmt" "os" "path/filepath" + "strings" + "sync" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/fleetmode" @@ -28,39 +31,72 @@ import ( "github.com/elastic/elastic-agent-libs/logp" ) -// Command line flags. +// Evil package level globals var ( - // The default config cannot include the beat name as it is not initialized - // when this variable is created. See ChangeDefaultCfgfileFlag which should - // be called prior to flags.Parse(). - configfiles = config.StringArrFlag(nil, "c", "beat.yml", "Configuration file, relative to path.config") - overwrites = config.SettingFlag(nil, "E", "Configuration overwrite") - - // Additional default settings, that must be available for variable expansion - defaults = config.MustNewConfigFrom(map[string]interface{}{ - "path": map[string]interface{}{ - "home": ".", // to be initialized by beat - "config": "${path.home}", - "data": fmt.Sprint("${path.home}", string(os.PathSeparator), "data"), - "logs": fmt.Sprint("${path.home}", string(os.PathSeparator), "logs"), - }, + once sync.Once + configfiles *config.StringsFlag + overwrites *config.C + defaults *config.C + homePath *string + configPath *string + allowedBackwardsCompatibleFlags []string +) + +func Initialize() { + once.Do(func() { + // The default config cannot include the beat name as + // it is not initialized when this variable is + // created. See ChangeDefaultCfgfileFlag which should + // be called prior to flags.Parse(). + configfiles = config.StringArrFlag(nil, "c", "beat.yml", "Configuration file, relative to path.config") + AddAllowedBackwardsCompatibleFlag("c") + overwrites = config.SettingFlag(nil, "E", "Configuration overwrite") + AddAllowedBackwardsCompatibleFlag("E") + defaults = config.MustNewConfigFrom(map[string]interface{}{ + "path": map[string]interface{}{ + "home": ".", // to be initialized by beat + "config": "${path.home}", + "data": filepath.Join("${path.home}", "data"), + "logs": filepath.Join("${path.home}", "logs"), + }, + }) + homePath = config.ConfigOverwriteFlag(nil, overwrites, "path.home", "path.home", "", "Home path") + AddAllowedBackwardsCompatibleFlag("path.home") + configPath = config.ConfigOverwriteFlag(nil, overwrites, "path.config", "path.config", "", "Configuration path") + AddAllowedBackwardsCompatibleFlag("path.config") + _ = config.ConfigOverwriteFlag(nil, overwrites, "path.data", "path.data", "", "Data path") + AddAllowedBackwardsCompatibleFlag("path.data") + _ = config.ConfigOverwriteFlag(nil, overwrites, "path.logs", "path.logs", "", "Logs path") + AddAllowedBackwardsCompatibleFlag("path.logs") }) +} - // home-path CLI flag (initialized in init) - homePath *string - configPath *string -) +func isAllowedBackwardsCompatibleFlag(f string) bool { + for _, existing := range allowedBackwardsCompatibleFlags { + if existing == f { + return true + } + } + return false +} -func init() { - // add '-path.x' options overwriting paths in 'overwrites' config - makePathFlag := func(name, usage string) *string { - return config.ConfigOverwriteFlag(nil, overwrites, name, name, "", usage) +func AddAllowedBackwardsCompatibleFlag(f string) { + if isAllowedBackwardsCompatibleFlag(f) { + return } + allowedBackwardsCompatibleFlags = append(allowedBackwardsCompatibleFlags, f) +} - homePath = makePathFlag("path.home", "Home path") - configPath = makePathFlag("path.config", "Configuration path") - makePathFlag("path.data", "Data path") - makePathFlag("path.logs", "Logs path") +func ConvertFlagsForBackwardsCompatibility() { + // backwards compatibility workaround, convert -flags to --flags: + for i, arg := range os.Args[1:] { + if strings.HasPrefix(arg, "-") && !strings.HasPrefix(arg, "--") { + candidate, _, _ := strings.Cut(strings.TrimPrefix(arg, "-"), "=") + if isAllowedBackwardsCompatibleFlag(candidate) { + os.Args[1+i] = "-" + arg + } + } + } } // OverrideChecker checks if a config should be overwritten. @@ -73,9 +109,11 @@ type ConditionalOverride struct { Config *config.C } -// ChangeDefaultCfgfileFlag replaces the value and default value for the `-c` -// flag so that it reflects the beat name. +// ChangeDefaultCfgfileFlag replaces the value and default value for +// the `-c` flag so that it reflects the beat name. It will call +// Initialize() to register the `-c` flags func ChangeDefaultCfgfileFlag(beatName string) error { + Initialize() configfiles.SetDefault(beatName + ".yml") return nil } @@ -96,8 +134,12 @@ func GetDefaultCfgfile() string { return cfg } -// HandleFlags adapts default config settings based on command line flags. +// HandleFlags adapts default config settings based on command line +// flags. This also stores if -E management.enabled=true was set on +// command line to determine if running the Beat under agent. It will +// call Initialize() to register the flags like `-E`. func HandleFlags() error { + Initialize() // default for the home path is the binary location home, err := filepath.Abs(filepath.Dir(os.Args[0])) if err != nil { @@ -114,6 +156,27 @@ func HandleFlags() error { common.PrintConfigDebugf(overwrites, "CLI setting overwrites (-E flag):") } + // Enable check to see if beat is running under Agent + // This is stored in a package so the modules which don't have + // access to the config can check this value. + type management struct { + Enabled bool `config:"management.enabled"` + } + var managementSettings management + cfgFlag := flag.Lookup("E") + if cfgFlag == nil { + fleetmode.SetAgentMode(false) + return nil + } + cfgObject, _ := cfgFlag.Value.(*config.SettingsFlag) + cliCfg := cfgObject.Config() + + err = cliCfg.Unpack(&managementSettings) + if err != nil { + fleetmode.SetAgentMode(false) + return nil //nolint:nilerr // unpacking failing isn't an error for this case + } + fleetmode.SetAgentMode(managementSettings.Enabled) return nil } @@ -220,8 +283,11 @@ func SetConfigPath(path string) { *configPath = path } -// GetPathConfig returns ${path.config}. If ${path.config} is not set, ${path.home} is returned. +// GetPathConfig returns ${path.config}. If ${path.config} is not set, +// ${path.home} is returned. It will call Initialize to ensure that +// `path.config` and `path.home` are set. func GetPathConfig() string { + Initialize() if *configPath != "" { return *configPath } else if *homePath != "" { diff --git a/libbeat/cmd/export/dashboard.go b/libbeat/cmd/export/dashboard.go index 7b878e00788..4a4e13167f8 100644 --- a/libbeat/cmd/export/dashboard.go +++ b/libbeat/cmd/export/dashboard.go @@ -22,6 +22,7 @@ import ( "github.com/spf13/cobra" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd/instance" "github.com/elastic/beats/v7/libbeat/dashboards" "github.com/elastic/beats/v7/libbeat/version" @@ -101,8 +102,11 @@ func GenDashboardCmd(settings instance.Settings) *cobra.Command { } genTemplateConfigCmd.Flags().String("id", "", "Dashboard id") + cfgfile.AddAllowedBackwardsCompatibleFlag("id") genTemplateConfigCmd.Flags().String("yml", "", "Yaml file containing list of dashboard ID and filename pairs") + cfgfile.AddAllowedBackwardsCompatibleFlag("yml") genTemplateConfigCmd.Flags().String("folder", "", "Target folder to save exported assets") + cfgfile.AddAllowedBackwardsCompatibleFlag("folder") return genTemplateConfigCmd } diff --git a/libbeat/cmd/export/ilm_policy.go b/libbeat/cmd/export/ilm_policy.go index 60c97920fd7..557c62c8aae 100644 --- a/libbeat/cmd/export/ilm_policy.go +++ b/libbeat/cmd/export/ilm_policy.go @@ -20,6 +20,7 @@ package export import ( "github.com/spf13/cobra" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd/instance" "github.com/elastic/beats/v7/libbeat/idxmgmt" "github.com/elastic/beats/v7/libbeat/idxmgmt/lifecycle" @@ -57,7 +58,9 @@ func GenGetILMPolicyCmd(settings instance.Settings) *cobra.Command { } genTemplateConfigCmd.Flags().String("es.version", settings.Version, "Elasticsearch version") + cfgfile.AddAllowedBackwardsCompatibleFlag("es.version") genTemplateConfigCmd.Flags().String("dir", "", "Specify directory for printing policy files. By default policies are printed to stdout.") + cfgfile.AddAllowedBackwardsCompatibleFlag("dir") return genTemplateConfigCmd } diff --git a/libbeat/cmd/export/index_pattern.go b/libbeat/cmd/export/index_pattern.go index 6b8b8c6839f..863fc7a72cf 100644 --- a/libbeat/cmd/export/index_pattern.go +++ b/libbeat/cmd/export/index_pattern.go @@ -22,6 +22,7 @@ import ( "github.com/spf13/cobra" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd/instance" "github.com/elastic/beats/v7/libbeat/kibana" libversion "github.com/elastic/elastic-agent-libs/version" @@ -67,6 +68,7 @@ func GenIndexPatternConfigCmd(settings instance.Settings) *cobra.Command { } genTemplateConfigCmd.Flags().String("es.version", settings.Version, "Elasticsearch version") + cfgfile.AddAllowedBackwardsCompatibleFlag("es.version") return genTemplateConfigCmd } diff --git a/libbeat/cmd/export/template.go b/libbeat/cmd/export/template.go index ffd957961ef..45a83b98681 100644 --- a/libbeat/cmd/export/template.go +++ b/libbeat/cmd/export/template.go @@ -20,6 +20,7 @@ package export import ( "github.com/spf13/cobra" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd/instance" "github.com/elastic/beats/v7/libbeat/idxmgmt" "github.com/elastic/beats/v7/libbeat/idxmgmt/lifecycle" @@ -59,8 +60,11 @@ func GenTemplateConfigCmd(settings instance.Settings) *cobra.Command { } genTemplateConfigCmd.Flags().String("es.version", settings.Version, "Elasticsearch version") + cfgfile.AddAllowedBackwardsCompatibleFlag("es.version") genTemplateConfigCmd.Flags().Bool("noilm", false, "Generate template with ILM disabled") + cfgfile.AddAllowedBackwardsCompatibleFlag("noilm") genTemplateConfigCmd.Flags().String("dir", "", "Specify directory for printing template files. By default templates are printed to stdout.") + cfgfile.AddAllowedBackwardsCompatibleFlag("dir") return genTemplateConfigCmd } diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index 1a6250fad4d..6332ebac39b 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -898,7 +898,9 @@ func (b *Beat) Setup(settings Settings, bt beat.Creator, setup SetupSettings) er if !isElasticsearchOutput(outCfg.Name()) { return fmt.Errorf("index management requested but the Elasticsearch output is not configured/enabled") } - esClient, err := eslegclient.NewConnectedClient(outCfg.Config(), b.Info.Beat) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + esClient, err := eslegclient.NewConnectedClient(ctx, outCfg.Config(), b.Info.Beat) if err != nil { return err } @@ -974,9 +976,11 @@ func (b *Beat) Setup(settings Settings, bt beat.Creator, setup SetupSettings) er }()) } -// handleFlags parses the command line flags. It invokes the HandleFlags -// callback if implemented by the Beat. +// handleFlags converts -flag to --flags, parses the command line +// flags, and it invokes the HandleFlags callback if implemented by +// the Beat. func (b *Beat) handleFlags() error { + cfgfile.ConvertFlagsForBackwardsCompatibility() flag.Parse() return cfgfile.HandleFlags() } diff --git a/libbeat/cmd/instance/beat_integration_test.go b/libbeat/cmd/instance/beat_integration_test.go index baf7657665d..1bca1400de6 100644 --- a/libbeat/cmd/instance/beat_integration_test.go +++ b/libbeat/cmd/instance/beat_integration_test.go @@ -27,6 +27,7 @@ import ( "time" "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd/instance" "github.com/elastic/beats/v7/libbeat/mock" "github.com/elastic/elastic-agent-libs/config" @@ -92,7 +93,9 @@ func TestMonitoringNameFromConfig(t *testing.T) { defer wg.Done() // Set the configuration file path flag so the beat can read it + cfgfile.Initialize() _ = flag.Set("c", "testdata/mockbeat.yml") + cfgfile.AddAllowedBackwardsCompatibleFlag("c") _ = instance.Run(mock.Settings, func(_ *beat.Beat, _ *config.C) (beat.Beater, error) { return &mockBeat, nil }) diff --git a/libbeat/cmd/instance/imports_common.go b/libbeat/cmd/instance/imports_common.go index be4174c0ea0..eb33bc27fe3 100644 --- a/libbeat/cmd/instance/imports_common.go +++ b/libbeat/cmd/instance/imports_common.go @@ -43,6 +43,7 @@ import ( _ "github.com/elastic/beats/v7/libbeat/processors/registered_domain" _ "github.com/elastic/beats/v7/libbeat/processors/script" _ "github.com/elastic/beats/v7/libbeat/processors/syslog" + _ "github.com/elastic/beats/v7/libbeat/processors/translate_ldap_attribute" _ "github.com/elastic/beats/v7/libbeat/processors/translate_sid" _ "github.com/elastic/beats/v7/libbeat/processors/urldecode" _ "github.com/elastic/beats/v7/libbeat/publisher/includes" // Register publisher pipeline modules diff --git a/libbeat/cmd/keystore.go b/libbeat/cmd/keystore.go index 7f5ecd78c41..cfc39202069 100644 --- a/libbeat/cmd/keystore.go +++ b/libbeat/cmd/keystore.go @@ -21,14 +21,15 @@ import ( "bufio" "errors" "fmt" - "io/ioutil" + "io" "os" "strings" "syscall" "github.com/spf13/cobra" - tml "golang.org/x/crypto/ssh/terminal" + "golang.org/x/term" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd/instance" "github.com/elastic/beats/v7/libbeat/common/cli" "github.com/elastic/beats/v7/libbeat/common/terminal" @@ -38,7 +39,7 @@ import ( func getKeystore(settings instance.Settings) (keystore.Keystore, error) { b, err := instance.NewInitializedBeat(settings) if err != nil { - return nil, fmt.Errorf("error initializing beat: %s", err) + return nil, fmt.Errorf("error initializing beat: %w", err) } return b.Keystore(), nil @@ -74,6 +75,7 @@ func genCreateKeystoreCmd(settings instance.Settings) *cobra.Command { }), } command.Flags().BoolVar(&flagForce, "force", false, "override the existing keystore") + cfgfile.AddAllowedBackwardsCompatibleFlag("force") return command } @@ -92,7 +94,9 @@ func genAddKeystoreCmd(settings instance.Settings) *cobra.Command { }), } command.Flags().BoolVar(&flagStdin, "stdin", false, "Use the stdin as the source of the secret") + cfgfile.AddAllowedBackwardsCompatibleFlag("stdin") command.Flags().BoolVar(&flagForce, "force", false, "Override the existing key") + cfgfile.AddAllowedBackwardsCompatibleFlag("force") return command } @@ -132,27 +136,27 @@ func createKeystore(settings instance.Settings, force bool) error { writableKeystore, err := keystore.AsWritableKeystore(store) if err != nil { - return fmt.Errorf("error creating the keystore: %s", err) + return fmt.Errorf("error creating the keystore: %w", err) } - if store.IsPersisted() == true && force == false { + if store.IsPersisted() && !force { response := terminal.PromptYesNo("A keystore already exists, Overwrite?", false) - if response == true { + if response { err := writableKeystore.Create(true) if err != nil { - return fmt.Errorf("error creating the keystore: %s", err) + return fmt.Errorf("error creating the keystore: %w", err) } } else { - fmt.Println("Exiting without creating keystore.") + fmt.Printf("Exiting without creating %s keystore.", settings.Name) //nolint:forbidigo //needs refactor return nil } } else { err := writableKeystore.Create(true) if err != nil { - return fmt.Errorf("Error creating the keystore: %s", err) + return fmt.Errorf("Error creating the keystore: %w", err) } } - fmt.Printf("Created %s keystore\n", settings.Name) + fmt.Printf("Created %s keystore\n", settings.Name) //nolint:forbidigo //needs refactor return nil } @@ -167,32 +171,32 @@ func addKey(store keystore.Keystore, keys []string, force, stdin bool) error { writableKeystore, err := keystore.AsWritableKeystore(store) if err != nil { - return fmt.Errorf("error creating the keystore: %s", err) + return fmt.Errorf("error creating the keystore: %w", err) } - if store.IsPersisted() == false { - if force == false { + if !store.IsPersisted() { + if !force { answer := terminal.PromptYesNo("The keystore does not exist. Do you want to create it?", false) - if answer == false { + if !answer { return errors.New("exiting without creating keystore") } } err := writableKeystore.Create(true) if err != nil { - return fmt.Errorf("could not create keystore, error: %s", err) + return fmt.Errorf("could not create keystore, error: %w", err) } - fmt.Println("Created keystore") + fmt.Println("Created keystore") //nolint:forbidigo //needs refactor } key := strings.TrimSpace(keys[0]) - value, err := store.Retrieve(key) - if value != nil && force == false { - if stdin == true { + value, _ := store.Retrieve(key) + if value != nil && !force { + if stdin { return fmt.Errorf("the settings %s already exist in the keystore use `--force` to replace it", key) } answer := terminal.PromptYesNo(fmt.Sprintf("Setting %s already exists, Overwrite?", key), false) - if answer == false { - fmt.Println("Exiting without modifying keystore.") + if !answer { + fmt.Println("Exiting without modifying keystore.") //nolint:forbidigo //needs refactor return nil } } @@ -200,25 +204,25 @@ func addKey(store keystore.Keystore, keys []string, force, stdin bool) error { var keyValue []byte if stdin { reader := bufio.NewReader(os.Stdin) - keyValue, err = ioutil.ReadAll(reader) + keyValue, err = io.ReadAll(reader) if err != nil { return fmt.Errorf("could not read input from stdin") } } else { - fmt.Printf("Enter value for %s: ", key) - keyValue, err = tml.ReadPassword(int(syscall.Stdin)) - fmt.Println() + fmt.Printf("Enter value for %s: ", key) //nolint:forbidigo //needs refactor + keyValue, err = term.ReadPassword(int(syscall.Stdin)) //nolint:unconvert,nolintlint //necessary on Windows + fmt.Println() //nolint:forbidigo //needs refactor if err != nil { - return fmt.Errorf("could not read value from the input, error: %s", err) + return fmt.Errorf("could not read value from the input, error: %w", err) } } if err = writableKeystore.Store(key, keyValue); err != nil { - return fmt.Errorf("could not add the key in the keystore, error: %s", err) + return fmt.Errorf("could not add the key in the keystore, error: %w", err) } if err = writableKeystore.Save(); err != nil { - return fmt.Errorf("fail to save the keystore: %s", err) + return fmt.Errorf("fail to save the keystore: %w", err) } else { - fmt.Println("Successfully updated the keystore") + fmt.Println("Successfully updated the keystore") //nolint:forbidigo //needs refactor } return nil } @@ -230,10 +234,10 @@ func removeKey(store keystore.Keystore, keys []string) error { writableKeystore, err := keystore.AsWritableKeystore(store) if err != nil { - return fmt.Errorf("error deleting the keystore: %s", err) + return fmt.Errorf("error deleting the keystore: %w", err) } - if store.IsPersisted() == false { + if !store.IsPersisted() { return errors.New("the keystore doesn't exist. Use the 'create' command to create one") } @@ -244,12 +248,12 @@ func removeKey(store keystore.Keystore, keys []string) error { return fmt.Errorf("could not find key '%v' in the keystore", key) } - writableKeystore.Delete(key) + _ = writableKeystore.Delete(key) err = writableKeystore.Save() if err != nil { - return fmt.Errorf("could not update the keystore with the changes, key: %s, error: %v", key, err) + return fmt.Errorf("could not update the keystore with the changes, key: %s, error: %w", key, err) } - fmt.Printf("successfully removed key: %s\n", key) + fmt.Printf("successfully removed key: %s\n", key) //nolint:forbidigo //needs refactor } return nil } @@ -257,14 +261,14 @@ func removeKey(store keystore.Keystore, keys []string) error { func list(store keystore.Keystore) error { listingKeystore, err := keystore.AsListingKeystore(store) if err != nil { - return fmt.Errorf("error listing the keystore: %s", err) + return fmt.Errorf("error listing the keystore: %w", err) } keys, err := listingKeystore.List() if err != nil { - return fmt.Errorf("could not read values from the keystore, error: %s", err) + return fmt.Errorf("could not read values from the keystore, error: %w", err) } for _, key := range keys { - fmt.Println(key) + fmt.Println(key) //nolint:forbidigo //needs refactor } return nil } diff --git a/libbeat/cmd/root.go b/libbeat/cmd/root.go index 589d706fc61..cbe2f7f8f6e 100644 --- a/libbeat/cmd/root.go +++ b/libbeat/cmd/root.go @@ -21,7 +21,6 @@ import ( "flag" "fmt" "os" - "strings" "github.com/spf13/cobra" @@ -33,15 +32,6 @@ import ( "github.com/elastic/beats/v7/libbeat/outputs/elasticsearch" ) -func init() { - // backwards compatibility workaround, convert -flags to --flags: - for i, arg := range os.Args[1:] { - if strings.HasPrefix(arg, "-") && !strings.HasPrefix(arg, "--") && len(arg) > 2 { - os.Args[1+i] = "-" + arg - } - } -} - // BeatsRootCmd handles all application command line interface, parses user // flags and runs subcommands type BeatsRootCmd struct { @@ -76,6 +66,7 @@ func GenRootCmdWithSettings(beatCreator beat.Creator, settings instance.Settings rootCmd.Use = settings.Name // Due to a dependence upon the beat name, the default config file path + cfgfile.Initialize() err := cfgfile.ChangeDefaultCfgfileFlag(settings.Name) if err != nil { panic(fmt.Errorf("failed to set default config file path: %w", err)) @@ -96,18 +87,30 @@ func GenRootCmdWithSettings(beatCreator beat.Creator, settings instance.Settings // Persistent flags, common across all subcommands rootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("E")) + cfgfile.AddAllowedBackwardsCompatibleFlag("E") rootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("c")) + cfgfile.AddAllowedBackwardsCompatibleFlag("c") rootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("d")) + cfgfile.AddAllowedBackwardsCompatibleFlag("d") rootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("v")) + cfgfile.AddAllowedBackwardsCompatibleFlag("v") rootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("e")) + cfgfile.AddAllowedBackwardsCompatibleFlag("e") rootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("environment")) + cfgfile.AddAllowedBackwardsCompatibleFlag("environment") rootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("path.config")) + cfgfile.AddAllowedBackwardsCompatibleFlag("path.config") rootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("path.data")) + cfgfile.AddAllowedBackwardsCompatibleFlag("path.data") rootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("path.logs")) + cfgfile.AddAllowedBackwardsCompatibleFlag("path.logs") rootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("path.home")) + cfgfile.AddAllowedBackwardsCompatibleFlag("path.home") rootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("strict.perms")) + cfgfile.AddAllowedBackwardsCompatibleFlag("strict.perms") if f := flag.CommandLine.Lookup("plugin"); f != nil { rootCmd.PersistentFlags().AddGoFlag(f) + cfgfile.AddAllowedBackwardsCompatibleFlag("plugin") } // Inherit root flags from run command diff --git a/libbeat/cmd/run.go b/libbeat/cmd/run.go index b078aadaf89..d6cadd318ce 100644 --- a/libbeat/cmd/run.go +++ b/libbeat/cmd/run.go @@ -24,6 +24,7 @@ import ( "github.com/spf13/cobra" "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd/instance" ) @@ -42,9 +43,13 @@ func genRunCmd(settings instance.Settings, beatCreator beat.Creator) *cobra.Comm // Run subcommand flags, only available to *beat run runCmd.Flags().AddGoFlag(flag.CommandLine.Lookup("N")) + cfgfile.AddAllowedBackwardsCompatibleFlag("N") runCmd.Flags().AddGoFlag(flag.CommandLine.Lookup("httpprof")) + cfgfile.AddAllowedBackwardsCompatibleFlag("httpprof") runCmd.Flags().AddGoFlag(flag.CommandLine.Lookup("cpuprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("cpuprofile") runCmd.Flags().AddGoFlag(flag.CommandLine.Lookup("memprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("memprofile") if settings.RunFlags != nil { runCmd.Flags().AddFlagSet(settings.RunFlags) diff --git a/libbeat/cmd/setup.go b/libbeat/cmd/setup.go index 64d1f41fdea..0b28d22f96f 100644 --- a/libbeat/cmd/setup.go +++ b/libbeat/cmd/setup.go @@ -24,6 +24,7 @@ import ( "github.com/spf13/cobra" "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd/instance" ) @@ -111,11 +112,16 @@ func genSetupCmd(settings instance.Settings, beatCreator beat.Creator) *cobra.Co } setup.Flags().Bool(DashboardKey, false, "Setup dashboards") + cfgfile.AddAllowedBackwardsCompatibleFlag(DashboardKey) setup.Flags().Bool(PipelineKey, false, "Setup Ingest pipelines") + cfgfile.AddAllowedBackwardsCompatibleFlag(PipelineKey) setup.Flags().Bool(IndexManagementKey, false, "Setup all components related to Elasticsearch index management, including template, ilm policy and rollover alias") + cfgfile.AddAllowedBackwardsCompatibleFlag(IndexManagementKey) setup.Flags().Bool("enable-all-filesets", false, "Behave as if all modules and filesets had been enabled") + cfgfile.AddAllowedBackwardsCompatibleFlag("enable-all-filesets") setup.Flags().Bool("force-enable-module-filesets", false, "Behave as if all filesets, within enabled modules, are enabled") + cfgfile.AddAllowedBackwardsCompatibleFlag("force-enable-module-filesets") return &setup } diff --git a/libbeat/common/fleetmode/fleet_mode.go b/libbeat/common/fleetmode/fleet_mode.go index af179b887ea..97a17804f64 100644 --- a/libbeat/common/fleetmode/fleet_mode.go +++ b/libbeat/common/fleetmode/fleet_mode.go @@ -17,33 +17,18 @@ package fleetmode -import ( - "flag" - - "github.com/elastic/elastic-agent-libs/config" -) +var managementEnabled bool + +// SetAgentMode stores if the Beat is running under Elastic Agent. +// Normally this is called when the command line flags are parsed. +// This is stored as a package level variable because some components +// (like filebeat/metricbeat modules) don't have access to the +// configuration information to determine this on their own. +func SetAgentMode(enabled bool) { + managementEnabled = enabled +} -// Enabled checks to see if filebeat/metricbeat is running under Agent -// The management setting is stored in the main Beat runtime object, but we can't see that from a module -// So instead we check the CLI flags, since Agent starts filebeat/metricbeat with "-E", "management.enabled=true" +// Enabled returns true if the Beat is running under Elastic Agent. func Enabled() bool { - type management struct { - Enabled bool `config:"management.enabled"` - } - var managementSettings management - - cfgFlag := flag.Lookup("E") - if cfgFlag == nil { - return false - } - - cfgObject, _ := cfgFlag.Value.(*config.SettingsFlag) - cliCfg := cfgObject.Config() - - err := cliCfg.Unpack(&managementSettings) - if err != nil { - return false - } - - return managementSettings.Enabled + return managementEnabled } diff --git a/libbeat/docs/processors-list.asciidoc b/libbeat/docs/processors-list.asciidoc index 4105666049d..341875f9f96 100644 --- a/libbeat/docs/processors-list.asciidoc +++ b/libbeat/docs/processors-list.asciidoc @@ -131,6 +131,9 @@ endif::[] ifndef::no_timestamp_processor[] * <> endif::[] +ifndef::no_translate_ldap_attribute_processor[] +* <> +endif::[] ifndef::no_translate_sid_processor[] * <> endif::[] @@ -279,6 +282,9 @@ endif::[] ifndef::no_timestamp_processor[] include::{libbeat-processors-dir}/timestamp/docs/timestamp.asciidoc[] endif::[] +ifndef::no_translate_ldap_attribute_processor[] +include::{libbeat-processors-dir}/translate_ldap_attribute/docs/translate_ldap_attribute.asciidoc[] +endif::[] ifndef::no_translate_sid_processor[] include::{libbeat-processors-dir}/translate_sid/docs/translate_sid.asciidoc[] endif::[] diff --git a/libbeat/esleg/eslegclient/api_mock_test.go b/libbeat/esleg/eslegclient/api_mock_test.go index 97834dcda51..231ee437800 100644 --- a/libbeat/esleg/eslegclient/api_mock_test.go +++ b/libbeat/esleg/eslegclient/api_mock_test.go @@ -20,6 +20,7 @@ package eslegclient import ( + "context" "encoding/json" "fmt" "net/http" @@ -63,14 +64,14 @@ func TestOneHostSuccessResp(t *testing.T) { server := ElasticsearchMock(200, expectedResp) - client := newTestConnection(server.URL) + client := newTestConnection(t, server.URL) params := map[string]string{ "refresh": "true", } _, resp, err := client.Index(index, "test", "1", params, body) if err != nil { - t.Errorf("Index() returns error: %s", err) + t.Fatalf("Index() returns error: %s", err) } if !resp.Created { t.Errorf("Index() fails: %s", resp) @@ -89,8 +90,10 @@ func TestOneHost500Resp(t *testing.T) { server := ElasticsearchMock(http.StatusInternalServerError, []byte("Something wrong happened")) - client := newTestConnection(server.URL) - err := client.Connect() + client := newTestConnection(t, server.URL) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + err := client.Connect(ctx) if err != nil { t.Fatalf("Failed to connect: %v", err) } @@ -121,7 +124,7 @@ func TestOneHost503Resp(t *testing.T) { server := ElasticsearchMock(503, []byte("Something wrong happened")) - client := newTestConnection(server.URL) + client := newTestConnection(t, server.URL) params := map[string]string{ "refresh": "true", diff --git a/libbeat/esleg/eslegclient/api_test.go b/libbeat/esleg/eslegclient/api_test.go index 6c7dd675ccf..0bd0f5341b5 100644 --- a/libbeat/esleg/eslegclient/api_test.go +++ b/libbeat/esleg/eslegclient/api_test.go @@ -19,6 +19,7 @@ package eslegclient import ( + "context" "encoding/json" "testing" @@ -170,11 +171,20 @@ func TestReadSearchResult_invalid(t *testing.T) { assert.Error(t, err) } -func newTestConnection(url string) *Connection { +// newTestConnection creates a new connection for testing +// +//nolint:unused // it's used by files with the !integration constraint +func newTestConnection(t *testing.T, url string) *Connection { conn, _ := NewConnection(ConnectionSettings{ URL: url, }) conn.Encoder = NewJSONEncoder(nil, false) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + if err := conn.Connect(ctx); err != nil { + t.Fatalf("cannot connect to Elasticsearch: %s", err) + } + return conn } diff --git a/libbeat/esleg/eslegclient/bulkapi_mock_test.go b/libbeat/esleg/eslegclient/bulkapi_mock_test.go index 96434819eca..598204386f9 100644 --- a/libbeat/esleg/eslegclient/bulkapi_mock_test.go +++ b/libbeat/esleg/eslegclient/bulkapi_mock_test.go @@ -60,7 +60,7 @@ func TestOneHostSuccessResp_Bulk(t *testing.T) { server := ElasticsearchMock(200, expectedResp) - client := newTestConnection(server.URL) + client := newTestConnection(t, server.URL) params := map[string]string{ "refresh": "true", @@ -95,7 +95,7 @@ func TestOneHost500Resp_Bulk(t *testing.T) { server := ElasticsearchMock(http.StatusInternalServerError, []byte("Something wrong happened")) - client := newTestConnection(server.URL) + client := newTestConnection(t, server.URL) params := map[string]string{ "refresh": "true", @@ -134,7 +134,7 @@ func TestOneHost503Resp_Bulk(t *testing.T) { server := ElasticsearchMock(503, []byte("Something wrong happened")) - client := newTestConnection(server.URL) + client := newTestConnection(t, server.URL) params := map[string]string{ "refresh": "true", diff --git a/libbeat/esleg/eslegclient/connection.go b/libbeat/esleg/eslegclient/connection.go index 6a22132080f..310aa853e34 100644 --- a/libbeat/esleg/eslegclient/connection.go +++ b/libbeat/esleg/eslegclient/connection.go @@ -67,7 +67,6 @@ type Connection struct { // requests will share the same cancellable context // so they can be aborted on Close() reqsContext context.Context - cancelReqs func() } // ConnectionSettings are the settings needed for a Connection @@ -82,7 +81,7 @@ type ConnectionSettings struct { Kerberos *kerberos.Config - OnConnectCallback func() error + OnConnectCallback func(*Connection) error Observer transport.IOStatser Parameters map[string]string @@ -109,7 +108,7 @@ type ESVersionData struct { BuildFlavor string `json:"build_flavor"` } -// NewConnection returns a new Elasticsearch client +// NewConnection returns a new Elasticsearch client. func NewConnection(s ConnectionSettings) (*Connection, error) { logger := logp.NewLogger("esclientleg") @@ -184,15 +183,12 @@ func NewConnection(s ConnectionSettings) (*Connection, error) { logger.Info("kerberos client created") } - ctx, cancelFunc := context.WithCancel(context.Background()) conn := Connection{ ConnectionSettings: s, HTTP: esClient, Encoder: encoder, log: logger, responseBuffer: bytes.NewBuffer(nil), - reqsContext: ctx, - cancelReqs: cancelFunc, } if s.APIKey != "" { @@ -255,7 +251,7 @@ func NewClients(cfg *cfg.C, beatname string) ([]Connection, error) { } // NewConnectedClient returns a non-thread-safe connection. Make sure for each goroutine you initialize a new connection. -func NewConnectedClient(cfg *cfg.C, beatname string) (*Connection, error) { +func NewConnectedClient(ctx context.Context, cfg *cfg.C, beatname string) (*Connection, error) { clients, err := NewClients(cfg, beatname) if err != nil { return nil, err @@ -264,7 +260,7 @@ func NewConnectedClient(cfg *cfg.C, beatname string) (*Connection, error) { errors := []string{} for _, client := range clients { - err = client.Connect() + err = client.Connect(ctx) if err != nil { const errMsg = "error connecting to Elasticsearch at %v: %v" client.log.Errorf(errMsg, client.URL, err) @@ -279,17 +275,22 @@ func NewConnectedClient(cfg *cfg.C, beatname string) (*Connection, error) { // Connect connects the client. It runs a GET request against the root URL of // the configured host, updates the known Elasticsearch version and calls -// globally configured handlers. -func (conn *Connection) Connect() error { +// globally configured handlers. The context is used to control the lifecycle +// of the HTTP requests/connections, the caller is responsible for cancelling +// the context to stop any in-flight requests. +func (conn *Connection) Connect(ctx context.Context) error { if conn.log == nil { conn.log = logp.NewLogger("esclientleg") } + + conn.reqsContext = ctx + if err := conn.getVersion(); err != nil { return err } if conn.OnConnectCallback != nil { - if err := conn.OnConnectCallback(); err != nil { + if err := conn.OnConnectCallback(conn); err != nil { return fmt.Errorf("Connection marked as failed because the onConnect callback failed: %w", err) } } @@ -323,7 +324,7 @@ func (conn *Connection) Ping() (ESPingData, error) { return response, nil } -// Close closes a connection. +// Close closes any idle connections from the HTTP client. func (conn *Connection) Close() error { conn.HTTP.CloseIdleConnections() return nil @@ -358,7 +359,9 @@ func (conn *Connection) Test(d testing.Driver) { }) } - err = conn.Connect() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err = conn.Connect(ctx) d.Fatal("talk to server", err) version := conn.GetVersion() d.Info("version", version.String()) diff --git a/libbeat/esleg/eslegclient/connection_integration_test.go b/libbeat/esleg/eslegclient/connection_integration_test.go index b4e277ed1a6..b56360b4232 100644 --- a/libbeat/esleg/eslegclient/connection_integration_test.go +++ b/libbeat/esleg/eslegclient/connection_integration_test.go @@ -21,8 +21,7 @@ package eslegclient import ( "context" - "io/ioutil" - "math/rand" + "io" "net" "net/http" "net/http/httptest" @@ -34,17 +33,25 @@ import ( "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/libbeat/esleg/eslegtest" - "github.com/elastic/beats/v7/libbeat/outputs" conf "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/transport/httpcommon" ) func TestConnect(t *testing.T) { conn := getTestingElasticsearch(t) - err := conn.Connect() + err := conn.Connect(context.Background()) assert.NoError(t, err) } +func TestConnectionCanBeClosedAndReused(t *testing.T) { + conn := getTestingElasticsearch(t) + ctx, cancel := context.WithCancel(context.Background()) + assert.NoError(t, conn.Connect(ctx), "first connect must succeed") + assert.NoError(t, conn.Close(), "close must succeed") + cancel() + assert.NoError(t, conn.Connect(context.Background()), "calling connect after close must succeed") +} + func TestConnectWithProxy(t *testing.T) { wrongPort, err := net.Listen("tcp", "localhost:0") require.NoError(t, err) @@ -66,7 +73,9 @@ func TestConnectWithProxy(t *testing.T) { "timeout": 5, // seconds }) require.NoError(t, err) - assert.Error(t, client.Connect(), "it should fail without proxy") + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + assert.Error(t, client.Connect(ctx), "it should fail without proxy") client, err = connectTestEs(t, map[string]interface{}{ "hosts": "http://" + wrongPort.Addr().String(), @@ -74,7 +83,7 @@ func TestConnectWithProxy(t *testing.T) { "timeout": 5, // seconds }) require.NoError(t, err) - assert.NoError(t, client.Connect()) + assert.NoError(t, client.Connect(ctx)) } func connectTestEs(t *testing.T, cfg interface{}) (*Connection, error) { @@ -139,16 +148,6 @@ func getTestingElasticsearch(t eslegtest.TestLogger) *Connection { return conn } -func randomClient(grp outputs.Group) outputs.NetworkClient { - L := len(grp.Clients) - if L == 0 { - panic("no elasticsearch client") - } - - client := grp.Clients[rand.Intn(L)] - return client.(outputs.NetworkClient) -} - // startTestProxy starts a proxy that redirects all connections to the specified URL func startTestProxy(t *testing.T, redirectURL string) *httptest.Server { t.Helper() @@ -166,14 +165,14 @@ func startTestProxy(t *testing.T, redirectURL string) *httptest.Server { require.NoError(t, err) defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) require.NoError(t, err) for _, header := range []string{"Content-Encoding", "Content-Type"} { w.Header().Set(header, resp.Header.Get(header)) } w.WriteHeader(resp.StatusCode) - w.Write(body) + w.Write(body) //nolint: errcheck // It's a test, we can ignore this error })) return proxy } diff --git a/libbeat/esleg/eslegclient/connection_test.go b/libbeat/esleg/eslegclient/connection_test.go index 19fe67e9f55..77cbcdda674 100644 --- a/libbeat/esleg/eslegclient/connection_test.go +++ b/libbeat/esleg/eslegclient/connection_test.go @@ -162,7 +162,9 @@ func TestUserAgentHeader(t *testing.T) { testCase.connSettings.URL = server.URL conn, err := NewConnection(testCase.connSettings) require.NoError(t, err) - require.NoError(t, conn.Connect(), "conn.Connect must not return an error") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + require.NoError(t, conn.Connect(ctx), "conn.Connect must not return an error") }) } } diff --git a/libbeat/esleg/eslegtest/util.go b/libbeat/esleg/eslegtest/util.go index 28f33fde2dc..e86ca14363d 100644 --- a/libbeat/esleg/eslegtest/util.go +++ b/libbeat/esleg/eslegtest/util.go @@ -18,6 +18,7 @@ package eslegtest import ( + "context" "fmt" "os" ) @@ -32,20 +33,23 @@ const ( // TestLogger is used to report fatal errors to the testing framework. type TestLogger interface { Fatal(args ...interface{}) + Cleanup(f func()) } // Connectable defines the minimum interface required to initialize a connected // client. type Connectable interface { - Connect() error + Connect(context.Context) error } // InitConnection initializes a new connection if the no error value from creating the // connection instance is reported. // The test logger will be used if an error is found. func InitConnection(t TestLogger, conn Connectable, err error) { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) if err == nil { - err = conn.Connect() + err = conn.Connect(ctx) } if err != nil { diff --git a/libbeat/idxmgmt/lifecycle/client_handler_integration_test.go b/libbeat/idxmgmt/lifecycle/client_handler_integration_test.go index 67b9a1cfb06..6f81bf98a02 100644 --- a/libbeat/idxmgmt/lifecycle/client_handler_integration_test.go +++ b/libbeat/idxmgmt/lifecycle/client_handler_integration_test.go @@ -20,6 +20,7 @@ package lifecycle import ( + "context" "fmt" "os" "testing" @@ -141,7 +142,9 @@ func newRawESClient(t *testing.T) ESClient { t.Fatal(err) } - if err := client.Connect(); err != nil { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + if err := client.Connect(ctx); err != nil { t.Fatalf("Failed to connect to Test Elasticsearch instance: %v", err) } diff --git a/libbeat/libbeat_test.go b/libbeat/libbeat_test.go index e4bac5e309d..1f9e1b3ffe8 100644 --- a/libbeat/libbeat_test.go +++ b/libbeat/libbeat_test.go @@ -21,6 +21,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" ) @@ -31,11 +32,14 @@ func init() { systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started func TestSystem(t *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/libbeat/licenser/elastic_fetcher.go b/libbeat/licenser/elastic_fetcher.go index bcbe68a938f..1f869d61fef 100644 --- a/libbeat/licenser/elastic_fetcher.go +++ b/libbeat/licenser/elastic_fetcher.go @@ -18,10 +18,10 @@ package licenser import ( + "context" "encoding/json" "errors" "fmt" - "math/rand" "net/http" "github.com/elastic/beats/v7/libbeat/esleg/eslegclient" @@ -98,6 +98,7 @@ func (f *ElasticFetcher) parseJSON(b []byte) (License, error) { // esClientMux is taking care of round robin request over an array of elasticsearch client, note that // calling request is not threadsafe. +// nolint: unused // it's used on Linux type esClientMux struct { clients []eslegclient.Connection idx int @@ -107,6 +108,7 @@ type esClientMux struct { // at the end of the function call, if an error occur we return the error and will pick up the next client on the // next call. Not that we just round robin between hosts, any backoff strategy should be handled by // the consumer of this type. +// nolint: unused // it's used on Linux func (mux *esClientMux) Request( method, path string, pipeline string, @@ -115,7 +117,9 @@ func (mux *esClientMux) Request( ) (int, []byte, error) { c := mux.clients[mux.idx] - if err := c.Connect(); err != nil { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if err := c.Connect(ctx); err != nil { return 0, nil, err } defer c.Close() @@ -127,19 +131,3 @@ func (mux *esClientMux) Request( } return status, response, err } - -// newESClientMux takes a list of clients and randomize where we start and the list of host we are -// querying. -func newESClientMux(clients []eslegclient.Connection) *esClientMux { - // randomize where we start - idx := rand.Intn(len(clients)) - - // randomize the list of round robin hosts. - tmp := make([]eslegclient.Connection, len(clients)) - copy(tmp, clients) - rand.Shuffle(len(tmp), func(i, j int) { - tmp[i], tmp[j] = tmp[j], tmp[i] - }) - - return &esClientMux{idx: idx, clients: tmp} -} diff --git a/libbeat/licenser/elastic_fetcher_integration_test.go b/libbeat/licenser/elastic_fetcher_integration_test.go index f303bfe0d8c..7560ebb394d 100644 --- a/libbeat/licenser/elastic_fetcher_integration_test.go +++ b/libbeat/licenser/elastic_fetcher_integration_test.go @@ -20,6 +20,7 @@ package licenser import ( + "context" "testing" "time" @@ -35,7 +36,7 @@ const ( elasticsearchPort = "9200" ) -func getTestClient() *eslegclient.Connection { +func getTestClient(t *testing.T) *eslegclient.Connection { transport := httpcommon.DefaultHTTPTransportSettings() transport.Timeout = 60 * time.Second @@ -47,16 +48,22 @@ func getTestClient() *eslegclient.Connection { CompressionLevel: 3, Transport: transport, }) - if err != nil { - panic(err) + t.Fatalf("cannot get new ES connection: %s", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + if err := client.Connect(ctx); err != nil { + t.Fatalf("cannot connect to ES: %s", err) } + return client } // Sanity check for schema change on the HTTP response from a live Elasticsearch instance. func TestElasticsearch(t *testing.T) { - f := NewElasticFetcher(getTestClient()) + f := NewElasticFetcher(getTestClient(t)) license, err := f.Fetch() if !assert.NoError(t, err) { return diff --git a/libbeat/licenser/elastic_fetcher_test.go b/libbeat/licenser/elastic_fetcher_test.go index 731bf5c0618..82ca7e47ca2 100644 --- a/libbeat/licenser/elastic_fetcher_test.go +++ b/libbeat/licenser/elastic_fetcher_test.go @@ -18,7 +18,8 @@ package licenser import ( - "io/ioutil" + "context" + "fmt" "net/http" "net/http/httptest" "os" @@ -26,15 +27,41 @@ import ( "testing" "github.com/elastic/beats/v7/libbeat/esleg/eslegclient" + "github.com/elastic/beats/v7/libbeat/version" "github.com/stretchr/testify/assert" ) +func esRootHandler(w http.ResponseWriter, r *http.Request) { + respStr := fmt.Sprintf(` +{ + "name" : "582a64c35c16", + "cluster_name" : "docker-cluster", + "cluster_uuid" : "fnanWPBeSNS9KZ930Z5JmA", + "version" : { + "number" : "%s", + "build_flavor" : "default", + "build_type" : "docker", + "build_hash" : "14b7170921f2f0e4109255b83cb9af175385d87f", + "build_date" : "2024-08-23T00:26:58.284513650Z", + "build_snapshot" : true, + "lucene_version" : "9.11.1", + "minimum_wire_compatibility_version" : "7.17.0", + "minimum_index_compatibility_version" : "7.0.0" + }, + "tagline" : "You Know, for Search" +}`, version.GetDefaultVersion()) + + w.Write([]byte(respStr)) +} + func newServerClientPair(t *testing.T, handler http.HandlerFunc) (*httptest.Server, *eslegclient.Connection) { mux := http.NewServeMux() - mux.Handle("/_license/", http.HandlerFunc(handler)) + mux.Handle("/", http.HandlerFunc(esRootHandler)) + mux.Handle("/_license/", handler) server := httptest.NewServer(mux) + t.Cleanup(server.Close) client, err := eslegclient.NewConnection(eslegclient.ConnectionSettings{ URL: server.URL, @@ -43,13 +70,19 @@ func newServerClientPair(t *testing.T, handler http.HandlerFunc) (*httptest.Serv t.Fatalf("could not create the elasticsearch client, error: %s", err) } + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + if err := client.Connect(ctx); err != nil { + t.Fatalf("cannot connect to ES: %s", err) + } + return server, client } func TestParseJSON(t *testing.T) { t.Run("OSS release of Elasticsearch (Code: 405)", func(t *testing.T) { h := func(w http.ResponseWriter, r *http.Request) { - http.Error(w, "Method Not Allowed", 405) + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) } s, c := newServerClientPair(t, h) defer s.Close() @@ -75,7 +108,7 @@ func TestParseJSON(t *testing.T) { t.Run("malformed JSON", func(t *testing.T) { h := func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("hello bad JSON")) + _, _ = w.Write([]byte("hello bad JSON")) } s, c := newServerClientPair(t, h) defer s.Close() @@ -88,7 +121,7 @@ func TestParseJSON(t *testing.T) { t.Run("401 response", func(t *testing.T) { h := func(w http.ResponseWriter, r *http.Request) { - http.Error(w, "Unauthorized", 401) + http.Error(w, "Unauthorized", http.StatusUnauthorized) } s, c := newServerClientPair(t, h) defer s.Close() @@ -113,14 +146,14 @@ func TestParseJSON(t *testing.T) { }) t.Run("200 response", func(t *testing.T) { - filepath.Walk("testdata/", func(path string, i os.FileInfo, err error) error { + _ = filepath.Walk("testdata/", func(path string, i os.FileInfo, err error) error { if i.IsDir() { return nil } t.Run(path, func(t *testing.T) { h := func(w http.ResponseWriter, r *http.Request) { - json, err := ioutil.ReadFile(path) + json, err := os.ReadFile(path) if err != nil { t.Fatal("could not read JSON") } diff --git a/libbeat/monitoring/report/elasticsearch/client.go b/libbeat/monitoring/report/elasticsearch/client.go index 56f56ac8e1e..28be1c37917 100644 --- a/libbeat/monitoring/report/elasticsearch/client.go +++ b/libbeat/monitoring/report/elasticsearch/client.go @@ -59,10 +59,10 @@ func newPublishClient( return p, nil } -func (c *publishClient) Connect() error { +func (c *publishClient) Connect(ctx context.Context) error { c.log.Debug("Monitoring client: connect.") - err := c.es.Connect() + err := c.es.Connect(ctx) if err != nil { return fmt.Errorf("cannot connect underlying Elasticsearch client: %w", err) } diff --git a/libbeat/monitoring/report/elasticsearch/elasticsearch.go b/libbeat/monitoring/report/elasticsearch/elasticsearch.go index da3f6135110..61e051d1222 100644 --- a/libbeat/monitoring/report/elasticsearch/elasticsearch.go +++ b/libbeat/monitoring/report/elasticsearch/elasticsearch.go @@ -18,6 +18,7 @@ package elasticsearch import ( + "context" "errors" "io" "math/rand" @@ -214,8 +215,10 @@ func (r *reporter) initLoop(c config) { for { // Select one configured endpoint by random and check if xpack is available + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() client := r.out[rand.Intn(len(r.out))] - err := client.Connect() + err := client.Connect(ctx) if err == nil { closing(log, client) break diff --git a/libbeat/outputs/backoff.go b/libbeat/outputs/backoff.go index 3c7f8e51e10..87d94bb66d0 100644 --- a/libbeat/outputs/backoff.go +++ b/libbeat/outputs/backoff.go @@ -45,8 +45,8 @@ func WithBackoff(client NetworkClient, init, max time.Duration) NetworkClient { } } -func (b *backoffClient) Connect() error { - err := b.client.Connect() +func (b *backoffClient) Connect(ctx context.Context) error { + err := b.client.Connect(ctx) backoff.WaitOnError(b.backoff, err) return err } diff --git a/libbeat/outputs/elasticsearch/client.go b/libbeat/outputs/elasticsearch/client.go index 70c4cc1cce5..56f28cdbf30 100644 --- a/libbeat/outputs/elasticsearch/client.go +++ b/libbeat/outputs/elasticsearch/client.go @@ -133,7 +133,7 @@ func NewClient( return nil, err } - conn.OnConnectCallback = func() error { + conn.OnConnectCallback = func(conn *eslegclient.Connection) error { globalCallbackRegistry.mutex.Lock() defer globalCallbackRegistry.mutex.Unlock() @@ -532,8 +532,8 @@ func (client *Client) applyItemStatus( return true } -func (client *Client) Connect() error { - return client.conn.Connect() +func (client *Client) Connect(ctx context.Context) error { + return client.conn.Connect(ctx) } func (client *Client) Close() error { diff --git a/libbeat/outputs/elasticsearch/client_integration_test.go b/libbeat/outputs/elasticsearch/client_integration_test.go index 765fd3eec5a..f4fb0e4f9a9 100644 --- a/libbeat/outputs/elasticsearch/client_integration_test.go +++ b/libbeat/outputs/elasticsearch/client_integration_test.go @@ -429,8 +429,12 @@ func connectTestEs(t *testing.T, cfg interface{}, stats outputs.Observer) (outpu } client := randomClient(output).(clientWrap).Client().(*Client) - // Load version number - _ = client.Connect() + // Load version ctx + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + if err := client.Connect(ctx); err != nil { + t.Fatalf("cannot connect to ES: %s", err) + } return client, client } diff --git a/libbeat/outputs/elasticsearch/client_proxy_test.go b/libbeat/outputs/elasticsearch/client_proxy_test.go index c2f23f34052..bd6739c3bf0 100644 --- a/libbeat/outputs/elasticsearch/client_proxy_test.go +++ b/libbeat/outputs/elasticsearch/client_proxy_test.go @@ -22,6 +22,7 @@ package elasticsearch import ( "bytes" + "context" "fmt" "net/http" "net/http/httptest" @@ -209,10 +210,12 @@ func doClientPing(t *testing.T) { client, err := NewClient(clientSettings, nil) require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) // This ping won't succeed; we aren't testing end-to-end communication // (which would require a lot more setup work), we just want to make sure // the client is pointed at the right server or proxy. - _ = client.Connect() + _ = client.Connect(ctx) } // serverState contains the state of the http listeners for proxy tests, diff --git a/libbeat/outputs/elasticsearch/client_test.go b/libbeat/outputs/elasticsearch/client_test.go index 5124c0defe9..abda06a02ee 100644 --- a/libbeat/outputs/elasticsearch/client_test.go +++ b/libbeat/outputs/elasticsearch/client_test.go @@ -748,8 +748,10 @@ func TestClientWithHeaders(t *testing.T) { }, nil) assert.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) // simple ping - err = client.Connect() + err = client.Connect(ctx) assert.NoError(t, err) assert.Equal(t, 1, requestCount) @@ -943,11 +945,13 @@ func TestClientWithAPIKey(t *testing.T) { }, nil) assert.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) // This connection will fail since the server doesn't return a valid // response. This is fine since we're just testing the headers in the // original client request. //nolint:errcheck // connection doesn't need to succeed - client.Connect() + client.Connect(ctx) assert.Equal(t, "ApiKey aHlva0hHNEJmV2s1dmlLWjE3Mlg6bzQ1SlVreXVTLS15aVNBdXV4bDhVdw==", headers.Get("Authorization")) } diff --git a/libbeat/outputs/failover.go b/libbeat/outputs/failover.go index 3e999e8321f..d69e01b03cc 100644 --- a/libbeat/outputs/failover.go +++ b/libbeat/outputs/failover.go @@ -54,7 +54,7 @@ func NewFailoverClient(clients []NetworkClient) NetworkClient { } } -func (f *failoverClient) Connect() error { +func (f *failoverClient) Connect(ctx context.Context) error { var ( next int active = f.active @@ -82,7 +82,7 @@ func (f *failoverClient) Connect() error { client := f.clients[next] f.active = next - return client.Connect() + return client.Connect(ctx) } func (f *failoverClient) Close() error { diff --git a/libbeat/outputs/kafka/config.go b/libbeat/outputs/kafka/config.go index 19055e0b317..c7dc74ee993 100644 --- a/libbeat/outputs/kafka/config.go +++ b/libbeat/outputs/kafka/config.go @@ -100,12 +100,18 @@ var compressionModes = map[string]sarama.CompressionCodec{ // As of sarama 1.24.1, zstd support is broken // (https://github.com/Shopify/sarama/issues/1252), which needs to be // addressed before we add support here. + + // (https://github.com/IBM/sarama/pull/1574) sarama version 1.26.0 has + // fixed this issue and elastic version of sarama has merged this commit. + // (https://github.com/elastic/sarama/commit/37faed7ffc7d59e681d99cfebd1f3d453d6d607c) + "none": sarama.CompressionNone, "no": sarama.CompressionNone, "off": sarama.CompressionNone, "gzip": sarama.CompressionGZIP, "lz4": sarama.CompressionLZ4, "snappy": sarama.CompressionSnappy, + "zstd": sarama.CompressionZSTD, } func defaultConfig() kafkaConfig { diff --git a/libbeat/outputs/kafka/docs/kafka.asciidoc b/libbeat/outputs/kafka/docs/kafka.asciidoc index d1b3d937559..9907cad61c2 100644 --- a/libbeat/outputs/kafka/docs/kafka.asciidoc +++ b/libbeat/outputs/kafka/docs/kafka.asciidoc @@ -300,7 +300,7 @@ The keep-alive period for an active network connection. If 0s, keep-alives are d ===== `compression` -Sets the output compression codec. Must be one of `none`, `snappy`, `lz4` and `gzip`. The default is `gzip`. +Sets the output compression codec. Must be one of `none`, `snappy`, `lz4`, `gzip` and `zstd`. The default is `gzip`. [IMPORTANT] .Known issue with Azure Event Hub for Kafka diff --git a/libbeat/outputs/kafka/kafka_integration_test.go b/libbeat/outputs/kafka/kafka_integration_test.go index 29fc72ac859..e9abc559774 100644 --- a/libbeat/outputs/kafka/kafka_integration_test.go +++ b/libbeat/outputs/kafka/kafka_integration_test.go @@ -240,6 +240,18 @@ func TestKafkaPublish(t *testing.T) { "host": "test-host", }), }, + { + "publish message with zstd compression to test topic", + map[string]interface{}{ + "compression": "zstd", + "version": "2.2", + }, + testTopic, + single(mapstr.M{ + "host": "test-host", + "message": id, + }), + }, } defaultConfig := map[string]interface{}{ @@ -254,7 +266,10 @@ func TestKafkaPublish(t *testing.T) { cfg := makeConfig(t, defaultConfig) if test.config != nil { - cfg.Merge(makeConfig(t, test.config)) + err := cfg.Merge(makeConfig(t, test.config)) + if err != nil { + t.Fatal(err) + } } t.Run(name, func(t *testing.T) { @@ -263,7 +278,8 @@ func TestKafkaPublish(t *testing.T) { t.Fatal(err) } - output := grp.Clients[0].(*client) + output, ok := grp.Clients[0].(*client) + assert.True(t, ok, "grp.Clients[0] didn't contain a ptr to client") if err := output.Connect(); err != nil { t.Fatal(err) } @@ -279,7 +295,10 @@ func TestKafkaPublish(t *testing.T) { } wg.Add(1) - output.Publish(context.Background(), batch) + err := output.Publish(context.Background(), batch) + if err != nil { + t.Fatal(err) + } } // wait for all published batches to be ACKed @@ -335,7 +354,8 @@ func validateJSON(t *testing.T, value []byte, events []beat.Event) string { return "" } - msg := decoded["message"].(string) + msg, ok := decoded["message"].(string) + assert.True(t, ok, "type of decoded message was not string") event := findEvent(events, msg) if event == nil { t.Errorf("could not find expected event with message: %v", msg) diff --git a/libbeat/outputs/logstash/async.go b/libbeat/outputs/logstash/async.go index b1e20a0e774..a980d1cef32 100644 --- a/libbeat/outputs/logstash/async.go +++ b/libbeat/outputs/logstash/async.go @@ -91,7 +91,7 @@ func newAsyncClient( } c.connect = func() error { - err := c.Client.Connect() + err := c.Client.ConnectContext(context.Background()) if err == nil { c.client, err = clientFactory(c.Client) } @@ -116,7 +116,7 @@ func makeClientFactory( } } -func (c *asyncClient) Connect() error { +func (c *asyncClient) Connect(ctx context.Context) error { c.log.Debug("connect") return c.connect() } diff --git a/libbeat/outputs/logstash/async_test.go b/libbeat/outputs/logstash/async_test.go index 6e2a102edf2..12d2edd124c 100644 --- a/libbeat/outputs/logstash/async_test.go +++ b/libbeat/outputs/logstash/async_test.go @@ -72,6 +72,8 @@ func newAsyncTestDriver(client outputs.NetworkClient) *testAsyncDriver { go func() { defer driver.wg.Done() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() for { cmd, ok := <-driver.ch if !ok { @@ -82,7 +84,7 @@ func newAsyncTestDriver(client outputs.NetworkClient) *testAsyncDriver { case driverCmdQuit: return case driverCmdConnect: - driver.client.Connect() + driver.client.Connect(ctx) case driverCmdClose: driver.client.Close() case driverCmdPublish: diff --git a/libbeat/outputs/logstash/logstash_integration_test.go b/libbeat/outputs/logstash/logstash_integration_test.go index 442145835df..286717e49ed 100644 --- a/libbeat/outputs/logstash/logstash_integration_test.go +++ b/libbeat/outputs/logstash/logstash_integration_test.go @@ -115,6 +115,11 @@ func esConnect(t *testing.T, index string) *esConnection { Password: password, Transport: transport, }) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + if err := client.Connect(ctx); err != nil { + t.Fatalf("cannot connect to LS: %s:", err) + } if err != nil { t.Fatal(err) } @@ -207,7 +212,9 @@ func newTestElasticsearchOutput(t *testing.T, test string) *testOutputer { // The Elasticsearch output requires events to be encoded // before calling Publish, so create an event encoder. es.encoder = grp.EncoderFactory() - es.Connect() + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + es.Connect(ctx) return es } diff --git a/libbeat/outputs/logstash/logstash_test.go b/libbeat/outputs/logstash/logstash_test.go index fa1b57fb841..5be2054cf2a 100644 --- a/libbeat/outputs/logstash/logstash_test.go +++ b/libbeat/outputs/logstash/logstash_test.go @@ -116,7 +116,9 @@ func testConnectionType( output := makeOutputer() t.Logf("new outputter: %v", output) - err := output.Connect() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err := output.Connect(ctx) if err != nil { t.Error("test client failed to connect: ", err) return @@ -186,8 +188,10 @@ func newTestLumberjackOutput( t.Fatalf("init logstash output plugin failed: %v", err) } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() client := grp.Clients[0].(outputs.NetworkClient) - if err := client.Connect(); err != nil { + if err := client.Connect(ctx); err != nil { t.Fatalf("Client failed to connected: %v", err) } diff --git a/libbeat/outputs/logstash/sync.go b/libbeat/outputs/logstash/sync.go index d24ab1ebb97..6a456907365 100644 --- a/libbeat/outputs/logstash/sync.go +++ b/libbeat/outputs/logstash/sync.go @@ -74,9 +74,9 @@ func newSyncClient( return c, nil } -func (c *syncClient) Connect() error { +func (c *syncClient) Connect(ctx context.Context) error { c.log.Debug("connect") - err := c.Client.Connect() + err := c.Client.ConnectContext(ctx) if err != nil { return err } diff --git a/libbeat/outputs/logstash/sync_test.go b/libbeat/outputs/logstash/sync_test.go index d0410c2a8a7..0d8a3e0f513 100644 --- a/libbeat/outputs/logstash/sync_test.go +++ b/libbeat/outputs/logstash/sync_test.go @@ -86,6 +86,8 @@ func newClientTestDriver(client outputs.NetworkClient) *testSyncDriver { go func() { defer driver.wg.Done() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() for { cmd, ok := <-driver.ch if !ok { @@ -96,7 +98,7 @@ func newClientTestDriver(client outputs.NetworkClient) *testSyncDriver { case driverCmdQuit: return case driverCmdConnect: - driver.client.Connect() + driver.client.Connect(ctx) case driverCmdClose: driver.client.Close() case driverCmdPublish: diff --git a/libbeat/outputs/otelconsumer/otelconsumer.go b/libbeat/outputs/otelconsumer/otelconsumer.go index 6eee8b58e65..cad11ab1442 100644 --- a/libbeat/outputs/otelconsumer/otelconsumer.go +++ b/libbeat/outputs/otelconsumer/otelconsumer.go @@ -29,6 +29,7 @@ import ( "github.com/elastic/elastic-agent-libs/mapstr" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" ) @@ -63,7 +64,7 @@ func (out *otelConsumer) Close() error { return nil } -// Publish converts Beat events to Otel format and send to the next otel consumer +// Publish converts Beat events to Otel format and sends them to the Otel collector func (out *otelConsumer) Publish(ctx context.Context, batch publisher.Batch) error { switch { case out.logsConsumer != nil: @@ -73,8 +74,7 @@ func (out *otelConsumer) Publish(ctx context.Context, batch publisher.Batch) err } } -func (out *otelConsumer) logsPublish(_ context.Context, batch publisher.Batch) error { - defer batch.ACK() +func (out *otelConsumer) logsPublish(ctx context.Context, batch publisher.Batch) error { st := out.observer pLogs := plog.NewLogs() resourceLogs := pLogs.ResourceLogs().AppendEmpty() @@ -97,10 +97,26 @@ func (out *otelConsumer) logsPublish(_ context.Context, batch publisher.Batch) e pcommonEvent.CopyTo(logRecord.Body().SetEmptyMap()) } - if err := out.logsConsumer.ConsumeLogs(context.TODO(), pLogs); err != nil { - return fmt.Errorf("error otel log consumer: %w", err) + err := out.logsConsumer.ConsumeLogs(ctx, pLogs) + if err != nil { + // Permanent errors shouldn't be retried. This tipically means + // the data cannot be serialized by the exporter that is attached + // to the pipeline or when the destination refuses the data because + // it cannot decode it. Retrying in this case is useless. + // + // See https://github.com/open-telemetry/opentelemetry-collector/blob/1c47d89/receiver/doc.go#L23-L40 + if consumererror.IsPermanent(err) { + st.PermanentErrors(len(events)) + batch.Drop() + } else { + st.RetryableErrors(len(events)) + batch.Retry() + } + + return fmt.Errorf("failed to send batch events to otel collector: %w", err) } + batch.ACK() st.NewBatch(len(events)) st.AckedEvents(len(events)) return nil diff --git a/libbeat/outputs/otelconsumer/otelconsumer_test.go b/libbeat/outputs/otelconsumer/otelconsumer_test.go index 1a8c34e21a0..a18bf77e6b8 100644 --- a/libbeat/outputs/otelconsumer/otelconsumer_test.go +++ b/libbeat/outputs/otelconsumer/otelconsumer_test.go @@ -18,15 +18,103 @@ package otelconsumer import ( + "context" + "errors" "testing" "time" "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/outputs" + "github.com/elastic/beats/v7/libbeat/outputs/outest" "github.com/elastic/elastic-agent-libs/mapstr" ) +func TestPublish(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + event1 := beat.Event{Fields: mapstr.M{"field": 1}} + event2 := beat.Event{Fields: mapstr.M{"field": 2}} + event3 := beat.Event{Fields: mapstr.M{"field": 3}} + + makeOtelConsumer := func(t *testing.T, consumeFn func(ctx context.Context, ld plog.Logs) error) *otelConsumer { + t.Helper() + + logConsumer, err := consumer.NewLogs(consumeFn) + assert.NoError(t, err) + consumer := &otelConsumer{ + observer: outputs.NewNilObserver(), + logsConsumer: logConsumer, + beatInfo: beat.Info{}, + } + return consumer + } + + t.Run("ack batch on consumer success", func(t *testing.T) { + batch := outest.NewBatch(event1, event2, event3) + + var countLogs int + otelConsumer := makeOtelConsumer(t, func(ctx context.Context, ld plog.Logs) error { + countLogs = countLogs + ld.LogRecordCount() + return nil + }) + + err := otelConsumer.Publish(ctx, batch) + assert.NoError(t, err) + assert.Len(t, batch.Signals, 1) + assert.Equal(t, outest.BatchACK, batch.Signals[0].Tag) + assert.Equal(t, len(batch.Events()), countLogs, "all events should be consumed") + }) + + t.Run("retries the batch on non-permanent consumer error", func(t *testing.T) { + batch := outest.NewBatch(event1, event2, event3) + + otelConsumer := makeOtelConsumer(t, func(ctx context.Context, ld plog.Logs) error { + return errors.New("consume error") + }) + + err := otelConsumer.Publish(ctx, batch) + assert.Error(t, err) + assert.False(t, consumererror.IsPermanent(err)) + assert.Len(t, batch.Signals, 1) + assert.Equal(t, outest.BatchRetry, batch.Signals[0].Tag) + }) + + t.Run("drop batch on permanent consumer error", func(t *testing.T) { + batch := outest.NewBatch(event1, event2, event3) + + otelConsumer := makeOtelConsumer(t, func(ctx context.Context, ld plog.Logs) error { + return consumererror.NewPermanent(errors.New("consumer error")) + }) + + err := otelConsumer.Publish(ctx, batch) + assert.Error(t, err) + assert.True(t, consumererror.IsPermanent(err)) + assert.Len(t, batch.Signals, 1) + assert.Equal(t, outest.BatchDrop, batch.Signals[0].Tag) + }) + + t.Run("retries on context cancelled", func(t *testing.T) { + batch := outest.NewBatch(event1, event2, event3) + + otelConsumer := makeOtelConsumer(t, func(ctx context.Context, ld plog.Logs) error { + return context.Canceled + }) + + err := otelConsumer.Publish(ctx, batch) + assert.Error(t, err) + assert.ErrorIs(t, err, context.Canceled) + assert.Len(t, batch.Signals, 1) + assert.Equal(t, outest.BatchRetry, batch.Signals[0].Tag) + }) +} + func TestMapstrToPcommonMapString(t *testing.T) { tests := map[string]struct { mapstr_val interface{} diff --git a/libbeat/outputs/outputs.go b/libbeat/outputs/outputs.go index 0fdf4d9407b..3cfdb5aef66 100644 --- a/libbeat/outputs/outputs.go +++ b/libbeat/outputs/outputs.go @@ -57,5 +57,5 @@ type Connectable interface { // The connection attempt shall report an error if no connection could been // established within the given time interval. A timeout value of 0 == wait // forever. - Connect() error + Connect(context.Context) error } diff --git a/libbeat/outputs/redis/backoff.go b/libbeat/outputs/redis/backoff.go index ef3dcd7cc48..2abc1f846f0 100644 --- a/libbeat/outputs/redis/backoff.go +++ b/libbeat/outputs/redis/backoff.go @@ -60,7 +60,7 @@ func newBackoffClient(client *client, init, max time.Duration) *backoffClient { } } -func (b *backoffClient) Connect() error { +func (b *backoffClient) Connect(ctx context.Context) error { err := b.client.Connect() if err != nil { // give the client a chance to promote an internal error to a network error. diff --git a/libbeat/outputs/redis/redis_integration_test.go b/libbeat/outputs/redis/redis_integration_test.go index dfd48dc75d2..6fd3e09397a 100644 --- a/libbeat/outputs/redis/redis_integration_test.go +++ b/libbeat/outputs/redis/redis_integration_test.go @@ -336,7 +336,9 @@ func newRedisTestingOutput(t *testing.T, cfg map[string]interface{}) outputs.Cli } client := out.Clients[0].(outputs.NetworkClient) - if err := client.Connect(); err != nil { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + if err := client.Connect(ctx); err != nil { t.Fatalf("Failed to connect to redis host: %v", err) } diff --git a/libbeat/processors/actions/alterFieldProcessor.go b/libbeat/processors/actions/alterFieldProcessor.go new file mode 100644 index 00000000000..ce525f10849 --- /dev/null +++ b/libbeat/processors/actions/alterFieldProcessor.go @@ -0,0 +1,174 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package actions + +import ( + "errors" + "fmt" + "strings" + + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/processors" + conf "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/mapstr" +) + +type alterFieldProcessor struct { + Fields []string + Values []string + IgnoreMissing bool + FailOnError bool + AlterFullField bool + + processorName string + alterFunc mapstr.AlterFunc +} + +// NewAlterFieldProcessor is an umbrella method for processing events based on provided fields. Such as converting event keys to uppercase/lowercase +func NewAlterFieldProcessor(c *conf.C, processorName string, alterFunc mapstr.AlterFunc) (beat.Processor, error) { + config := struct { + Fields []string `config:"fields"` + IgnoreMissing bool `config:"ignore_missing"` + FailOnError bool `config:"fail_on_error"` + AlterFullField bool `config:"alter_full_field"` + Values []string `config:"values"` + }{ + IgnoreMissing: false, + FailOnError: true, + AlterFullField: true, + } + + if err := c.Unpack(&config); err != nil { + return nil, fmt.Errorf("failed to unpack the %s fields configuration: %w", processorName, err) + } + + // Skip mandatory fields + var configFields []string + var lowerField string + for _, readOnly := range processors.MandatoryExportedFields { + readOnly = strings.ToLower(readOnly) + for _, field := range config.Fields { + // Skip fields that match "readOnly" or start with "readOnly." + lowerField = strings.ToLower(field) + if strings.HasPrefix(lowerField, readOnly+".") || lowerField == readOnly { + continue + } + // Add fields that do not match "readOnly" criteria + configFields = append(configFields, field) + } + } + return &alterFieldProcessor{ + Fields: configFields, + IgnoreMissing: config.IgnoreMissing, + FailOnError: config.FailOnError, + processorName: processorName, + AlterFullField: config.AlterFullField, + alterFunc: alterFunc, + Values: config.Values, + }, nil + +} + +func (a *alterFieldProcessor) String() string { + return fmt.Sprintf("%s fields=%+v", a.processorName, *a) +} + +func (a *alterFieldProcessor) Run(event *beat.Event) (*beat.Event, error) { + var backup *beat.Event + if a.FailOnError { + backup = event.Clone() + } + + for _, field := range a.Fields { + err := a.alterField(event, field) + if err != nil { + if a.IgnoreMissing && errors.Is(err, mapstr.ErrKeyNotFound) { + continue + } + if a.FailOnError { + event = backup + _, _ = event.PutValue("error.message", err.Error()) + return event, err + } + } + } + + for _, valueKey := range a.Values { + err := a.alterValue(event, valueKey) + if err != nil { + if a.IgnoreMissing && errors.Is(err, mapstr.ErrKeyNotFound) { + continue + } + if a.FailOnError { + event = backup + _, _ = event.PutValue("error.message", err.Error()) + return event, err + } + } + } + return event, nil +} + +func (a *alterFieldProcessor) alterField(event *beat.Event, field string) error { + + // modify all segments of the key + var err error + if a.AlterFullField { + err = event.Fields.AlterPath(field, mapstr.CaseInsensitiveMode, a.alterFunc) + } else { + // modify only the last segment + segmentCount := strings.Count(field, ".") + err = event.Fields.AlterPath(field, mapstr.CaseInsensitiveMode, func(key string) (string, error) { + if segmentCount > 0 { + segmentCount-- + return key, nil + } + return a.alterFunc(key) + }) + } + + return err +} + +func (a *alterFieldProcessor) alterValue(event *beat.Event, valueKey string) error { + value, err := event.GetValue(valueKey) + if err != nil { + return fmt.Errorf("could not fetch value for key: %s, Error: %w", valueKey, err) + } + + if v, ok := value.(string); ok { + err = event.Delete(valueKey) + if err != nil { + return fmt.Errorf("could not delete key: %s, %w", v, err) + } + + v, err = a.alterFunc(v) + if err != nil { + return fmt.Errorf("could not alter %s successfully, %w", v, err) + } + + _, err = event.PutValue(valueKey, v) + if err != nil { + return fmt.Errorf("could not put value: %s: %v, %w", valueKey, v, err) + } + } else { + return fmt.Errorf("value of key %q is not a string", valueKey) + } + + return nil +} diff --git a/libbeat/processors/actions/docs/lowercase.asciidoc b/libbeat/processors/actions/docs/lowercase.asciidoc new file mode 100644 index 00000000000..be7182942d1 --- /dev/null +++ b/libbeat/processors/actions/docs/lowercase.asciidoc @@ -0,0 +1,119 @@ +[[lowercase]] +=== Lowercase fields in events + +++++ +lowercase +++++ + +The `lowercase` processor specifies a list of `fields` and `values` to be converted to lowercase. Keys listed in `fields` will be matched case-insensitively and converted to lowercase. For `values`, only exact, case-sensitive matches are transformed to lowercase. This way, keys and values can be selectively converted based on the specified criteria. + + +==== Examples: + +1. Default scenario + +[source,yaml] +---- +processors: + - rename: + fields: + - "ab.cd" + values: + - "testKey" + ignore_missing: false + fail_on_error: true + alter_full_field: true +---- +[source,json] +---- +// Input +{ + "AB": {"CD":"data"}, + "CD": {"ef":"data"}, + "testKey": {"TESTVALUE"} +} + + +// output +{ + "ab": {"cd":"data"}, // `AB.CD` -> `ab.cd` + "CD": {"ef":"data"}, + "testKey": {"testvalue"} // `TESTVALUE` -> `testvalue` is lowercased +} +---- + +[start=2] +2. When `alter_full_field` is false (applicable only for fields) + +[source,yaml] +---- +processors: + - rename: + fields: + - "ab.cd" + ignore_missing: false + fail_on_error: true + alter_full_field: false +---- + +[source,json] +---- +// Input +{ + "AB": {"CD":"data"}, + "CD": {"ef":"data"}, +} + + +// output +{ + "AB": {"cd":"data"}, // `AB.CD` -> `AB.cd` (only `cd` is lowercased) + "CD": {"ef":"data"}, +} +---- + +[start=2] +2. In case of non unique path to the key + +[source,yaml] +---- +processors: + - rename: + fields: + - "ab" + ignore_missing: false + fail_on_error: true + alter_full_field: true +---- + +[source,json] +---- +// Input +{ + "ab": "first", + "aB": "second" +} + +// Output +{ + "ab": "first", + "aB": "second", + "err": "... Error: key collision" +} +---- + +==== Configuration: + +The `lowercase` processor has the following configuration settings: + +`fields`:: The field names to lowercase. The match is case-insensitive, e.g. `a.b.c.d` would match `A.b.C.d` or `A.B.C.D`. +`values`:: (Optional) Specifies the exact values to be converted to lowercase. Each entry should include the full path to the value. Key matching is case-sensitive. If the target value is not a string, an error is triggered (`fail_on_error: true`) or the value is skipped (`fail_on_error: false`). +`ignore_missing`:: (Optional) Indicates whether to ignore events that lack the source field. + The default is `false`, which will fail processing of an event if a field is missing. +`fail_on_error`:: (Optional) If set to `true` and an error occurs, the changes are reverted and the original event is returned. + If set to `false`, processing continues if an error occurs. Default is `true`. +`alter_full_field`:: (Optional) If set to `true`, the entire key path is lowercased. If set to `false` only the final part of the key path is lowercased. Default is true + + + +See <> for a list of supported conditions. diff --git a/libbeat/processors/actions/drop_fields.go b/libbeat/processors/actions/drop_fields.go index a90d6438125..ba0e5434592 100644 --- a/libbeat/processors/actions/drop_fields.go +++ b/libbeat/processors/actions/drop_fields.go @@ -61,22 +61,23 @@ func newDropFields(c *conf.C) (beat.Processor, error) { return nil, fmt.Errorf("fail to unpack the drop_fields configuration: %w", err) } - /* remove read only fields */ - // TODO: Is this implementation used? If so, there's a fix needed in removal of exported fields + // Do not drop manadatory fields + var configFields []string for _, readOnly := range processors.MandatoryExportedFields { - for i, field := range config.Fields { - if readOnly == field { - config.Fields = append(config.Fields[:i], config.Fields[i+1:]...) + for _, field := range config.Fields { + if readOnly == field || strings.HasPrefix(field, readOnly+".") { + continue } + configFields = append(configFields, field) } } // Parse regexp containing fields and removes them from initial config regexpFields := make([]match.Matcher, 0) - for i := len(config.Fields) - 1; i >= 0; i-- { - field := config.Fields[i] + for i := len(configFields) - 1; i >= 0; i-- { + field := configFields[i] if strings.HasPrefix(field, "/") && strings.HasSuffix(field, "/") && len(field) > 2 { - config.Fields = append(config.Fields[:i], config.Fields[i+1:]...) + configFields = append(configFields[:i], configFields[i+1:]...) matcher, err := match.Compile(field[1 : len(field)-1]) if err != nil { @@ -87,7 +88,7 @@ func newDropFields(c *conf.C) (beat.Processor, error) { } } - f := &dropFields{Fields: config.Fields, IgnoreMissing: config.IgnoreMissing, RegexpFields: regexpFields} + f := &dropFields{Fields: configFields, IgnoreMissing: config.IgnoreMissing, RegexpFields: regexpFields} return f, nil } diff --git a/libbeat/processors/actions/drop_fields_test.go b/libbeat/processors/actions/drop_fields_test.go index d49e4561fdd..53d5a968354 100644 --- a/libbeat/processors/actions/drop_fields_test.go +++ b/libbeat/processors/actions/drop_fields_test.go @@ -24,6 +24,7 @@ import ( config2 "github.com/elastic/elastic-agent-libs/config" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/elastic-agent-libs/mapstr" @@ -50,6 +51,22 @@ func TestDropFieldRun(t *testing.T) { assert.Equal(t, event.Meta, newEvent.Meta) }) + t.Run("Do not drop mandatory fields", func(t *testing.T) { + c := config2.MustNewConfigFrom( + mapstr.M{ + "fields": []string{"field1", "type", "type.value.key", "typeKey"}, + "ignore_missing": true, + }, + ) + + p, err := newDropFields(c) + require.NoError(t, err) + process, ok := p.(*dropFields) + assert.True(t, ok) + assert.NoError(t, err) + assert.Equal(t, []string{"field1", "typeKey"}, process.Fields) + }) + t.Run("supports a metadata field", func(t *testing.T) { p := dropFields{ Fields: []string{"@metadata.meta_field"}, diff --git a/libbeat/processors/actions/lowercase.go b/libbeat/processors/actions/lowercase.go new file mode 100644 index 00000000000..3ae2cc6583e --- /dev/null +++ b/libbeat/processors/actions/lowercase.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package actions + +import ( + "strings" + + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/processors" + "github.com/elastic/beats/v7/libbeat/processors/checks" + conf "github.com/elastic/elastic-agent-libs/config" +) + +func init() { + processors.RegisterPlugin( + "lowercase", + checks.ConfigChecked( + NewLowerCaseProcessor, + checks.RequireFields("fields"), + checks.AllowedFields("fields", "ignore_missing", "fail_on_error", "alter_full_field", "values"), + ), + ) +} + +// NewLowerCaseProcessor converts event keys matching the provided fields to lowercase +func NewLowerCaseProcessor(c *conf.C) (beat.Processor, error) { + return NewAlterFieldProcessor(c, "lowercase", lowerCase) +} + +func lowerCase(field string) (string, error) { + return strings.ToLower(field), nil +} diff --git a/libbeat/processors/actions/lowercase_test.go b/libbeat/processors/actions/lowercase_test.go new file mode 100644 index 00000000000..6dba685caa4 --- /dev/null +++ b/libbeat/processors/actions/lowercase_test.go @@ -0,0 +1,436 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package actions + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/libbeat/beat" + conf "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/mapstr" +) + +func TestNewLowerCaseProcessor(t *testing.T) { + c := conf.MustNewConfigFrom( + mapstr.M{ + "fields": []string{"field1", "type", "field2", "type.value.key", "typeKey"}, // "type" is our mandatory field + "ignore_missing": true, + "fail_on_error": false, + }, + ) + + procInt, err := NewLowerCaseProcessor(c) + assert.NoError(t, err) + + processor, ok := procInt.(*alterFieldProcessor) + assert.True(t, ok) + assert.Equal(t, []string{"field1", "field2", "typeKey"}, processor.Fields) // we discard both "type" and "type.value.key" as mandatory fields + assert.True(t, processor.IgnoreMissing) + assert.False(t, processor.FailOnError) +} + +func TestLowerCaseProcessorRun(t *testing.T) { + tests := []struct { + Name string + Fields []string + IgnoreMissing bool + FailOnError bool + FullPath bool + Input mapstr.M + Output mapstr.M + Error bool + }{ + { + Name: "Lowercase Fields", + Fields: []string{"a.b.c", "Field1"}, + IgnoreMissing: false, + FailOnError: true, + FullPath: true, + Input: mapstr.M{ + "Field1": mapstr.M{"Field2": "Value"}, + "Field3": "Value", + "a": mapstr.M{ + "B": mapstr.M{ + "C": "D", + }, + }, + }, + Output: mapstr.M{ + "field1": mapstr.M{"Field2": "Value"}, // field1 is lowercased + "Field3": "Value", + "a": mapstr.M{ + "b": mapstr.M{ + "c": "D", + }, + }, + }, + Error: false, + }, + { + Name: "Lowercase Fields", + Fields: []string{"a.b.c", "Field1"}, + IgnoreMissing: false, + FailOnError: true, + FullPath: true, + Input: mapstr.M{ + "Field1": mapstr.M{"Field2": "Value"}, + "Field3": "Value", + "a": mapstr.M{ + "B": mapstr.M{ + "C": "D", + }, + }, + }, + Output: mapstr.M{ + "field1": mapstr.M{"Field2": "Value"}, // field1 is lowercased + "Field3": "Value", + "a": mapstr.M{ + "b": mapstr.M{ + "c": "D", + }, + }, + }, + Error: false, + }, + { + Name: "Lowercase Fields when full_path is false", // searches only the most nested key 'case insensitively' + Fields: []string{"a.B.c"}, + IgnoreMissing: false, + FailOnError: true, + FullPath: false, + Input: mapstr.M{ + "Field3": "Value", + "a": mapstr.M{ + "B": mapstr.M{ + "C": "D", + }, + }, + }, + Output: mapstr.M{ + "Field3": "Value", + "a": mapstr.M{ + "B": mapstr.M{ + "c": "D", // only c is lowercased + }, + }, + }, + + Error: false, + }, + { + Name: "Revert to original map on error", + Fields: []string{"Field1", "abcbd"}, + IgnoreMissing: false, + FailOnError: true, + FullPath: true, + Input: mapstr.M{ + "Field1": "value1", + "ab": "first", + }, + Output: mapstr.M{ + "Field1": "value1", + "ab": "first", + "error": mapstr.M{"message": "could not fetch value for key: abcbd, Error: key not found"}, + }, + Error: true, + }, + { + Name: "Ignore Missing Key Error", + Fields: []string{"Field4"}, + IgnoreMissing: true, + FailOnError: true, + FullPath: true, + Input: mapstr.M{ + "Field1": mapstr.M{"Field2": "Value"}, + "Field3": "Value", + }, + Output: mapstr.M{ + "Field1": mapstr.M{"Field2": "Value"}, + "Field3": "Value", + }, + Error: false, + }, + { + Name: "Do Not Fail On Missing Key Error", + Fields: []string{"Field4"}, + IgnoreMissing: false, + FailOnError: false, + FullPath: true, + Input: mapstr.M{ + "Field1": mapstr.M{"Field2": "Value"}, + "Field3": "Value", + }, + Output: mapstr.M{ + "Field1": mapstr.M{"Field2": "Value"}, + "Field3": "Value", + }, + Error: false, + }, + { + Name: "Fail On Missing Key Error", + Fields: []string{"Field4"}, + IgnoreMissing: false, + FailOnError: true, + FullPath: true, + Input: mapstr.M{ + "Field1": mapstr.M{"Field2": "Value"}, + "Field3": "Value", + }, + Output: mapstr.M{ + "Field1": mapstr.M{"Field2": "Value"}, + "Field3": "Value", + "error": mapstr.M{"message": "could not fetch value for key: Field4, Error: key not found"}, + }, + Error: true, + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + p := &alterFieldProcessor{ + Fields: test.Fields, + IgnoreMissing: test.IgnoreMissing, + FailOnError: test.FailOnError, + AlterFullField: test.FullPath, + alterFunc: lowerCase, + } + + event, err := p.Run(&beat.Event{Fields: test.Input}) + + if !test.Error { + require.NoError(t, err) + } else { + require.Error(t, err) + } + + assert.Equal(t, test.Output, event.Fields) + }) + } + + t.Run("test key collison", func(t *testing.T) { + Input := + mapstr.M{ + "ab": "first", + "Ab": "second", + } + + p := &alterFieldProcessor{ + Fields: []string{"ab"}, + IgnoreMissing: false, + FailOnError: true, + AlterFullField: true, + alterFunc: lowerCase, + } + + _, err := p.Run(&beat.Event{Fields: Input}) + require.Error(t, err) + assert.ErrorIs(t, err, mapstr.ErrKeyCollision) + + }) +} + +func TestLowerCaseProcessorValues(t *testing.T) { + tests := []struct { + Name string + Values []string + IgnoreMissing bool + FailOnError bool + FullPath bool + Input mapstr.M + Output mapstr.M + Error bool + }{ + { + Name: "Lowercase Values", + Values: []string{"a.b.c"}, + IgnoreMissing: false, + FailOnError: true, + FullPath: true, + Input: mapstr.M{ + "a": mapstr.M{ + "b": mapstr.M{ + "c": "D", + }, + }, + }, + Output: mapstr.M{ + "a": mapstr.M{ + "b": mapstr.M{ + "c": "d", // d is lowercased + }, + }, + }, + Error: false, + }, + { + Name: "Fail if given path to value is not a string", + Values: []string{"a.B"}, + IgnoreMissing: false, + FailOnError: true, + FullPath: true, + Input: mapstr.M{ + "Field3": "Value", + "a": mapstr.M{ + "B": mapstr.M{ + "C": "D", + }, + }, + }, + Output: mapstr.M{ + "Field3": "Value", + "a": mapstr.M{ + "B": mapstr.M{ + "C": "D", + }, + }, + "error": mapstr.M{"message": "value of key \"a.B\" is not a string"}, + }, + + Error: true, + }, + { + Name: "Fail On Missing Key Error", + Values: []string{"a.B.c"}, + IgnoreMissing: false, + FailOnError: true, + FullPath: true, + Input: mapstr.M{ + "Field3": "Value", + "a": mapstr.M{ + "B": mapstr.M{ + "C": "D", + }, + }, + }, + Output: mapstr.M{ + "Field3": "Value", + "a": mapstr.M{ + "B": mapstr.M{ + "C": "D", + }, + }, + "error": mapstr.M{"message": "could not fetch value for key: a.B.c, Error: key not found"}, + }, + + Error: true, + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + p := &alterFieldProcessor{ + Values: test.Values, + IgnoreMissing: test.IgnoreMissing, + FailOnError: test.FailOnError, + AlterFullField: test.FullPath, + alterFunc: lowerCase, + } + + event, err := p.Run(&beat.Event{Fields: test.Input}) + + if !test.Error { + require.NoError(t, err) + } else { + require.Error(t, err) + } + + assert.Equal(t, test.Output, event.Fields) + }) + } +} +func BenchmarkLowerCaseProcessorRun(b *testing.B) { + tests := []struct { + Name string + Events []beat.Event + }{ + { + Name: "5000 events with 5 fields on each level with 3 level depth without collisions", + Events: GenerateEvents(5000, 5, 3, false), + }, + { + Name: "5000 events with 5 fields on each level with 3 level depth with collisions", + Events: GenerateEvents(5000, 5, 3, true), + }, + { + Name: "500 events with 50 fields on each level with 5 level depth without collisions", + Events: GenerateEvents(500, 50, 3, false), + }, + { + Name: "500 events with 50 fields on each level with 5 level depth with collisions", + Events: GenerateEvents(500, 50, 3, true), + }, + // Add more test cases as needed for benchmarking + } + + for _, tt := range tests { + b.Run(tt.Name, func(b *testing.B) { + p := &alterFieldProcessor{ + Fields: []string{"level1field1.level2field1.level3field1"}, + alterFunc: lowerCase, + AlterFullField: true, + IgnoreMissing: false, + FailOnError: true, + } + for i := 0; i < b.N; i++ { + //Run the function with the input + for _, e := range tt.Events { + ev := e + _, err := p.Run(&ev) + require.NoError(b, err) + } + + } + }) + } +} + +func GenerateEvents(numEvents, fieldsPerLevel, depth int, withCollisions bool) []beat.Event { + events := make([]beat.Event, numEvents) + for i := 0; i < numEvents; i++ { + event := &beat.Event{Fields: mapstr.M{}} + generateFields(event, fieldsPerLevel, depth, withCollisions) + events[i] = *event + } + return events +} + +func generateFields(event *beat.Event, fieldsPerLevel, depth int, withCollisions bool) { + if depth == 0 { + return + } + + for j := 1; j <= fieldsPerLevel; j++ { + var key string + for d := 1; d < depth; d++ { + key += fmt.Sprintf("level%dfield%d", d, j) + key += "." + } + if withCollisions { + key += fmt.Sprintf("Level%dField%d", depth, j) // Creating a collision (Level is capitalized) + } else { + key += fmt.Sprintf("level%dfield%d", depth, j) + } + event.Fields.Put(key, "value") + key = "" + } + +} diff --git a/libbeat/processors/fingerprint/fingerprint_test.go b/libbeat/processors/fingerprint/fingerprint_test.go index 5f6bdb70b5e..8d66762e479 100644 --- a/libbeat/processors/fingerprint/fingerprint_test.go +++ b/libbeat/processors/fingerprint/fingerprint_test.go @@ -505,7 +505,7 @@ func nRandomEvents(num int) []beat.Event { charsetLen := len(charset) b := make([]byte, 200) - events := make([]beat.Event, num) + events := make([]beat.Event, 0, num) for i := 0; i < num; i++ { for j := range b { b[j] = charset[prng.Intn(charsetLen)] diff --git a/libbeat/processors/ratelimit/limit.go b/libbeat/processors/ratelimit/limit.go index 0d6f14d88d7..c989e4a2527 100644 --- a/libbeat/processors/ratelimit/limit.go +++ b/libbeat/processors/ratelimit/limit.go @@ -52,7 +52,7 @@ func (l *rate) Unpack(str string) error { } if allowed := []unit{unitPerSecond, unitPerMinute, unitPerHour}; !contains(allowed, unitStr) { - allowedStrs := make([]string, len(allowed)) + allowedStrs := make([]string, 0, len(allowed)) for _, a := range allowed { allowedStrs = append(allowedStrs, "/"+string(a)) } diff --git a/libbeat/processors/translate_ldap_attribute/config.go b/libbeat/processors/translate_ldap_attribute/config.go new file mode 100644 index 00000000000..b6b46410e98 --- /dev/null +++ b/libbeat/processors/translate_ldap_attribute/config.go @@ -0,0 +1,45 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package translate_ldap_attribute + +import ( + "github.com/elastic/elastic-agent-libs/transport/tlscommon" +) + +type config struct { + Field string `config:"field" validate:"required"` + TargetField string `config:"target_field"` + LDAPAddress string `config:"ldap_address" validate:"required"` + LDAPBaseDN string `config:"ldap_base_dn" validate:"required"` + LDAPBindUser string `config:"ldap_bind_user"` + LDAPBindPassword string `config:"ldap_bind_password"` + LDAPSearchAttribute string `config:"ldap_search_attribute" validate:"required"` + LDAPMappedAttribute string `config:"ldap_mapped_attribute" validate:"required"` + LDAPSearchTimeLimit int `config:"ldap_search_time_limit"` + LDAPTLS *tlscommon.Config `config:"ldap_ssl"` + + IgnoreMissing bool `config:"ignore_missing"` + IgnoreFailure bool `config:"ignore_failure"` +} + +func defaultConfig() config { + return config{ + LDAPSearchAttribute: "objectGUID", + LDAPMappedAttribute: "cn", + LDAPSearchTimeLimit: 30} +} diff --git a/libbeat/processors/translate_ldap_attribute/doc.go b/libbeat/processors/translate_ldap_attribute/doc.go new file mode 100644 index 00000000000..70ceee7297d --- /dev/null +++ b/libbeat/processors/translate_ldap_attribute/doc.go @@ -0,0 +1,21 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package translate_ldap_attribute provides a Beat processor for converting +// LDAP attributes from one to another. It is typically used for converting Windows +// Global Unique Identifiers (GUIDs) to object names. +package translate_ldap_attribute diff --git a/libbeat/processors/translate_ldap_attribute/docs/translate_ldap_attribute.asciidoc b/libbeat/processors/translate_ldap_attribute/docs/translate_ldap_attribute.asciidoc new file mode 100644 index 00000000000..aff1125f43a --- /dev/null +++ b/libbeat/processors/translate_ldap_attribute/docs/translate_ldap_attribute.asciidoc @@ -0,0 +1,92 @@ +[[processor-translate-guid]] +=== Translate GUID + +++++ +translate_ldap_attribute +++++ + +The `translate_ldap_attribute` processor translates an LDAP attributes between eachother. +It is typically used to translate AD Global Unique Identifiers (GUID) +into their common names. + +Every object on an Active Directory or an LDAP server is issued a GUID. Internal processes +refer to their GUID's rather than the object's name and these values +sometimes appear in logs. + +If the search attribute is invalid (malformed) or does not map to any object on the domain +then this will result in the processor returning an error unless `ignore_failure` +is set. + +The result of this operation is an array of values, given that a single attribute +can hold multiple values. + +Note: the search attribute is expected to map to a single object. If it doesn't, +no error will be returned, but only results of the first entry will be added +to the event. + +[source,yaml] +---- +processors: + - translate_ldap_attribute: + field: winlog.event_data.ObjectGuid + ldap_address: "ldap://" + ldap_base_dn: "dc=example,dc=com" + ignore_missing: true + ignore_failure: true +---- + +The `translate_ldap_attribute` processor has the following configuration settings: + +.Translate GUID options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | | Source field containing a GUID. +| `target_field` | no | | Target field for the mapped attribute value. If not set it will be replaced in place. +| `ldap_address` | yes | | LDAP server address. eg: `ldap://ds.example.com:389` +| `ldap_base_dn` | yes | | LDAP base DN. eg: `dc=example,dc=com` +| `ldap_bind_user` | no | | LDAP user. +| `ldap_bind_password` | no | | LDAP password. +| `ldap_search_attribute` | yes | `objectGUID` | LDAP attribute to search by. +| `ldap_mapped_attribute` | yes | `cn` | LDAP attribute to map to. +| `ldap_search_time_limit` | no | 30 | LDAP search time limit in seconds. +| `ldap_ssl`* | no | 30 | LDAP TLS/SSL connection settings. +| `ignore_missing` | no | false | Ignore errors when the source field is missing. +| `ignore_failure` | no | false | Ignore all errors produced by the processor. +|====== + +* Also see <> for a full description of the `ldap_ssl` options. + +If the searches are slow or you expect a high amount of different key attributes to be found, +consider using a cache processor to speed processing: + + +[source,yaml] +------------------------------------------------------------------------------- +processors: + - cache: + backend: + memory: + id: ldapguids + get: + key_field: winlog.event_data.ObjectGuid + target_field: winlog.common_name + ignore_missing: true + - if: + not: + - has_fields: winlog.common_name + then: + - translate_ldap_attribute: + field: winlog.event_data.ObjectGuid + target_field: winlog.common_name + ldap_address: "ldap://" + ldap_base_dn: "dc=example,dc=com" + - cache: + backend: + memory: + id: ldapguids + capacity: 10000 + put: + key_field: winlog.event_data.ObjectGuid + value_field: winlog.common_name +------------------------------------------------------------------------------- \ No newline at end of file diff --git a/libbeat/processors/translate_ldap_attribute/ldap.go b/libbeat/processors/translate_ldap_attribute/ldap.go new file mode 100644 index 00000000000..f83200e6652 --- /dev/null +++ b/libbeat/processors/translate_ldap_attribute/ldap.go @@ -0,0 +1,139 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package translate_ldap_attribute + +import ( + "crypto/tls" + "fmt" + "sync" + + "github.com/go-ldap/ldap/v3" +) + +// ldapClient manages a single reusable LDAP connection +type ldapClient struct { + conn *ldap.Conn + mu sync.Mutex + *ldapConfig +} + +type ldapConfig struct { + address string + baseDN string + username string + password string + searchAttr string + mappedAttr string + searchTimeLimit int + tlsConfig *tls.Config +} + +// newLDAPClient initializes a new ldapClient with a single connection +func newLDAPClient(config *ldapConfig) (*ldapClient, error) { + client := &ldapClient{ldapConfig: config} + + // Establish initial connection + if err := client.connect(); err != nil { + return nil, err + } + + return client, nil +} + +// connect establishes a new connection to the LDAP server +func (client *ldapClient) connect() error { + client.mu.Lock() + defer client.mu.Unlock() + + // Connect with or without TLS based on configuration + var opts []ldap.DialOpt + if client.tlsConfig != nil { + opts = append(opts, ldap.DialWithTLSConfig(client.tlsConfig)) + } + conn, err := ldap.DialURL(client.address, opts...) + if err != nil { + return fmt.Errorf("failed to dial LDAP server: %w", err) + } + + if client.password != "" { + err = conn.Bind(client.username, client.password) + } else { + err = conn.UnauthenticatedBind(client.username) + } + + if err != nil { + conn.Close() + return fmt.Errorf("failed to bind to LDAP server: %w", err) + } + + client.conn = conn + return nil +} + +// reconnect checks the connection's health and reconnects if necessary +func (client *ldapClient) reconnect() error { + client.mu.Lock() + defer client.mu.Unlock() + + // Check if the connection is still alive + if client.conn.IsClosing() { + return client.connect() + } + return nil +} + +// findObjectBy searches for an object and returns its mapped values. +func (client *ldapClient) findObjectBy(searchBy string) ([]string, error) { + // Ensure the connection is alive or reconnect if necessary + if err := client.reconnect(); err != nil { + return nil, fmt.Errorf("failed to reconnect: %w", err) + } + + client.mu.Lock() + defer client.mu.Unlock() + + // Format the filter and perform the search + filter := fmt.Sprintf("(%s=%s)", client.searchAttr, searchBy) + searchRequest := ldap.NewSearchRequest( + client.baseDN, + ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 1, client.searchTimeLimit, false, + filter, []string{client.mappedAttr}, nil, + ) + + // Execute search + result, err := client.conn.Search(searchRequest) + if err != nil { + return nil, fmt.Errorf("search failed: %w", err) + } + if len(result.Entries) == 0 { + return nil, fmt.Errorf("no entries found for search attribute %s", searchBy) + } + + // Retrieve the CN attribute + cn := result.Entries[0].GetAttributeValues(client.mappedAttr) + return cn, nil +} + +// close closes the LDAP connection +func (client *ldapClient) close() { + client.mu.Lock() + defer client.mu.Unlock() + if client.conn != nil { + client.conn.Close() + } +} diff --git a/libbeat/processors/translate_ldap_attribute/translate_ldap_attribute.go b/libbeat/processors/translate_ldap_attribute/translate_ldap_attribute.go new file mode 100644 index 00000000000..dec72263cfd --- /dev/null +++ b/libbeat/processors/translate_ldap_attribute/translate_ldap_attribute.go @@ -0,0 +1,125 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package translate_ldap_attribute + +import ( + "errors" + "fmt" + + "github.com/elastic/beats/v7/libbeat/beat" + "github.com/elastic/beats/v7/libbeat/processors" + jsprocessor "github.com/elastic/beats/v7/libbeat/processors/script/javascript/module/processor" + conf "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent-libs/mapstr" + "github.com/elastic/elastic-agent-libs/transport/tlscommon" +) + +const logName = "processor.translate_ldap_attribute" + +var errInvalidType = errors.New("search attribute field value is not a string") + +func init() { + processors.RegisterPlugin("translate_ldap_attribute", New) + jsprocessor.RegisterPlugin("TranslateLDAPAttribute", New) +} + +type processor struct { + config + client *ldapClient + log *logp.Logger +} + +func New(cfg *conf.C) (beat.Processor, error) { + c := defaultConfig() + if err := cfg.Unpack(&c); err != nil { + return nil, fmt.Errorf("fail to unpack the translate_ldap_attribute configuration: %w", err) + } + + return newFromConfig(c) +} + +func newFromConfig(c config) (*processor, error) { + ldapConfig := &ldapConfig{ + address: c.LDAPAddress, + baseDN: c.LDAPBaseDN, + username: c.LDAPBindUser, + password: c.LDAPBindPassword, + searchAttr: c.LDAPSearchAttribute, + mappedAttr: c.LDAPMappedAttribute, + searchTimeLimit: c.LDAPSearchTimeLimit, + } + if c.LDAPTLS != nil { + tlsConfig, err := tlscommon.LoadTLSConfig(c.LDAPTLS) + if err != nil { + return nil, fmt.Errorf("could not load provided LDAP TLS configuration: %w", err) + } + ldapConfig.tlsConfig = tlsConfig.ToConfig() + } + client, err := newLDAPClient(ldapConfig) + if err != nil { + return nil, err + } + return &processor{ + config: c, + client: client, + log: logp.NewLogger(logName), + }, nil +} + +func (p *processor) String() string { + return fmt.Sprintf("translate_ldap_attribute=[field=%s, ldap_address=%s, ldap_base_dn=%s, ldap_bind_user=%s, ldap_search_attribute=%s, ldap_mapped_attribute=%s]", + p.Field, p.LDAPAddress, p.LDAPBaseDN, p.LDAPBindUser, p.LDAPSearchAttribute, p.LDAPMappedAttribute) +} + +func (p *processor) Run(event *beat.Event) (*beat.Event, error) { + err := p.translateLDAPAttr(event) + if err == nil || p.IgnoreFailure || (p.IgnoreMissing && errors.Is(err, mapstr.ErrKeyNotFound)) { + return event, nil + } + return event, err +} + +func (p *processor) translateLDAPAttr(event *beat.Event) error { + v, err := event.GetValue(p.Field) + if err != nil { + return err + } + + guidString, ok := v.(string) + if !ok { + return errInvalidType + } + + cn, err := p.client.findObjectBy(guidString) + if err != nil { + return err + } + + field := p.Field + if p.TargetField != "" { + field = p.TargetField + } + _, err = event.PutValue(field, cn) + return err +} + +func (p *processor) Close() error { + p.client.close() + return nil +} diff --git a/libbeat/publisher/pipeline/client_worker.go b/libbeat/publisher/pipeline/client_worker.go index e05658d9749..3e6b8202dd2 100644 --- a/libbeat/publisher/pipeline/client_worker.go +++ b/libbeat/publisher/pipeline/client_worker.go @@ -29,8 +29,8 @@ import ( ) type worker struct { - qu chan publisher.Batch - done chan struct{} + qu chan publisher.Batch + cancel func() } // clientWorker manages output client of type outputs.Client, not supporting reconnect. @@ -50,14 +50,15 @@ type netClientWorker struct { } func makeClientWorker(qu chan publisher.Batch, client outputs.Client, logger logger, tracer *apm.Tracer) outputWorker { + ctx, cancel := context.WithCancel(context.Background()) w := worker{ - qu: qu, - done: make(chan struct{}), + qu: qu, + cancel: cancel, } var c interface { outputWorker - run() + run(context.Context) } if nc, ok := client.(outputs.NetworkClient); ok { @@ -71,12 +72,12 @@ func makeClientWorker(qu chan publisher.Batch, client outputs.Client, logger log c = &clientWorker{worker: w, client: client} } - go c.run() + go c.run(ctx) return c } func (w *worker) close() { - close(w.done) + w.cancel() } func (w *clientWorker) Close() error { @@ -84,20 +85,20 @@ func (w *clientWorker) Close() error { return w.client.Close() } -func (w *clientWorker) run() { +func (w *clientWorker) run(ctx context.Context) { for { // We wait for either the worker to be closed or for there to be a batch of // events to publish. select { - case <-w.done: + case <-ctx.Done(): return case batch := <-w.qu: if batch == nil { continue } - if err := w.client.Publish(context.TODO(), batch); err != nil { + if err := w.client.Publish(ctx, batch); err != nil { return } } @@ -109,7 +110,7 @@ func (w *netClientWorker) Close() error { return w.client.Close() } -func (w *netClientWorker) run() { +func (w *netClientWorker) run(ctx context.Context) { var ( connected = false reconnectAttempts = 0 @@ -120,7 +121,7 @@ func (w *netClientWorker) run() { // events to publish. select { - case <-w.done: + case <-ctx.Done(): return case batch := <-w.qu: @@ -139,7 +140,7 @@ func (w *netClientWorker) run() { w.logger.Infof("Attempting to reconnect to %v with %d reconnect attempt(s)", w.client, reconnectAttempts) } - err := w.client.Connect() + err := w.client.Connect(ctx) connected = err == nil if connected { w.logger.Infof("Connection to %v established", w.client) @@ -152,15 +153,14 @@ func (w *netClientWorker) run() { continue } - if err := w.publishBatch(batch); err != nil { + if err := w.publishBatch(ctx, batch); err != nil { connected = false } } } } -func (w *netClientWorker) publishBatch(batch publisher.Batch) error { - ctx := context.Background() +func (w *netClientWorker) publishBatch(ctx context.Context, batch publisher.Batch) error { if w.tracer != nil && w.tracer.Recording() { tx := w.tracer.StartTransaction("publish", "output") defer tx.End() diff --git a/libbeat/publisher/pipeline/testing.go b/libbeat/publisher/pipeline/testing.go index ca357646a81..61977377a75 100644 --- a/libbeat/publisher/pipeline/testing.go +++ b/libbeat/publisher/pipeline/testing.go @@ -54,7 +54,7 @@ type mockNetworkClient struct { outputs.Client } -func (c *mockNetworkClient) Connect() error { return nil } +func (c *mockNetworkClient) Connect(_ context.Context) error { return nil } type mockBatch struct { mu sync.Mutex diff --git a/libbeat/publisher/pipeline/ttl_batch.go b/libbeat/publisher/pipeline/ttl_batch.go index dab77fa5659..0ef4408b613 100644 --- a/libbeat/publisher/pipeline/ttl_batch.go +++ b/libbeat/publisher/pipeline/ttl_batch.go @@ -77,6 +77,7 @@ func newBatch(retryer retryer, original queue.Batch, ttl int) *ttlBatch { events = append(events, event) } } + original.FreeEntries() b := &ttlBatch{ done: original.Done, diff --git a/libbeat/publisher/pipeline/ttl_batch_test.go b/libbeat/publisher/pipeline/ttl_batch_test.go index 769ccc37c35..4c5207acbb0 100644 --- a/libbeat/publisher/pipeline/ttl_batch_test.go +++ b/libbeat/publisher/pipeline/ttl_batch_test.go @@ -112,6 +112,12 @@ func TestBatchCallsDoneAndFreesEvents(t *testing.T) { require.True(t, doneCalled, "Calling batch.Drop should invoke the done callback") } +func TestNewBatchFreesEvents(t *testing.T) { + queueBatch := &mockQueueBatch{} + _ = newBatch(nil, queueBatch, 0) + assert.Equal(t, 1, queueBatch.freeEntriesCalled, "Creating a new ttlBatch should call FreeEntries on the underlying queue.Batch") +} + type mockQueueBatch struct { freeEntriesCalled int } @@ -127,6 +133,10 @@ func (b *mockQueueBatch) Entry(i int) queue.Entry { return fmt.Sprintf("event %v", i) } +func (b *mockQueueBatch) FreeEntries() { + b.freeEntriesCalled++ +} + type mockRetryer struct { batches []*ttlBatch } diff --git a/libbeat/publisher/queue/diskqueue/consumer.go b/libbeat/publisher/queue/diskqueue/consumer.go index 20e6648d927..a0e5e944df3 100644 --- a/libbeat/publisher/queue/diskqueue/consumer.go +++ b/libbeat/publisher/queue/diskqueue/consumer.go @@ -97,6 +97,9 @@ func (batch *diskQueueBatch) Entry(i int) queue.Entry { return batch.frames[i].event } +func (batch *diskQueueBatch) FreeEntries() { +} + func (batch *diskQueueBatch) Done() { batch.queue.acks.addFrames(batch.frames) } diff --git a/libbeat/publisher/queue/memqueue/broker.go b/libbeat/publisher/queue/memqueue/broker.go index b617bae6110..3e3e47e502c 100644 --- a/libbeat/publisher/queue/memqueue/broker.go +++ b/libbeat/publisher/queue/memqueue/broker.go @@ -398,6 +398,15 @@ func (b *batch) Entry(i int) queue.Entry { return b.rawEntry(i).event } +func (b *batch) FreeEntries() { + // This signals that the event data has been copied out of the batch, and is + // safe to free from the queue buffer, so set all the event pointers to nil. + for i := 0; i < b.count; i++ { + index := (b.start + i) % len(b.queue.buf) + b.queue.buf[index].event = nil + } +} + func (b *batch) Done() { b.doneChan <- batchDoneMsg{} } diff --git a/libbeat/publisher/queue/memqueue/queue_test.go b/libbeat/publisher/queue/memqueue/queue_test.go index 9cd209bbd51..168c923e598 100644 --- a/libbeat/publisher/queue/memqueue/queue_test.go +++ b/libbeat/publisher/queue/memqueue/queue_test.go @@ -262,3 +262,41 @@ func TestAdjustInputQueueSize(t *testing.T) { assert.Equal(t, int(float64(mainQueue)*maxInputQueueSizeRatio), AdjustInputQueueSize(mainQueue, mainQueue)) }) } + +func TestBatchFreeEntries(t *testing.T) { + const queueSize = 10 + const batchSize = 5 + // 1. Add 10 events to the queue, request two batches with 5 events each + // 2. Make sure the queue buffer has 10 non-nil events + // 3. Call FreeEntries on the second batch + // 4. Make sure only events 6-10 are nil + // 5. Call FreeEntries on the first batch + // 6. Make sure all events are nil + testQueue := NewQueue(nil, nil, Settings{Events: queueSize, MaxGetRequest: batchSize, FlushTimeout: time.Second}, 0, nil) + producer := testQueue.Producer(queue.ProducerConfig{}) + for i := 0; i < queueSize; i++ { + _, ok := producer.Publish(i) + require.True(t, ok, "Queue publish must succeed") + } + batch1, err := testQueue.Get(batchSize) + require.NoError(t, err, "Queue read must succeed") + require.Equal(t, batchSize, batch1.Count(), "Returned batch size must match request") + batch2, err := testQueue.Get(batchSize) + require.NoError(t, err, "Queue read must succeed") + require.Equal(t, batchSize, batch2.Count(), "Returned batch size must match request") + // Slight concurrency subtlety: we check events are non-nil after the queue + // reads, since if we do it before we have no way to be sure the insert + // has been completed. + for i := 0; i < queueSize; i++ { + require.NotNil(t, testQueue.buf[i].event, "All queue events must be non-nil") + } + batch2.FreeEntries() + for i := 0; i < batchSize; i++ { + require.NotNilf(t, testQueue.buf[i].event, "Queue index %v: batch 1's events should be unaffected by calling FreeEntries on Batch 2", i) + require.Nilf(t, testQueue.buf[batchSize+i].event, "Queue index %v: batch 2's events should be nil after FreeEntries", batchSize+i) + } + batch1.FreeEntries() + for i := 0; i < queueSize; i++ { + require.Nilf(t, testQueue.buf[i].event, "Queue index %v: all events should be nil after calling FreeEntries on both batches") + } +} diff --git a/libbeat/publisher/queue/queue.go b/libbeat/publisher/queue/queue.go index 075d7ad66a4..983a835a069 100644 --- a/libbeat/publisher/queue/queue.go +++ b/libbeat/publisher/queue/queue.go @@ -112,6 +112,10 @@ type Batch interface { Count() int Entry(i int) Entry Done() + // Release internal references to the contained events if supported + // (the disk queue does not currently implement this). + // Entry() should not be used after this call. + FreeEntries() } // Outputs can provide an EncoderFactory to enable early encoding, in which diff --git a/libbeat/template/load_integration_test.go b/libbeat/template/load_integration_test.go index b3aafad5d69..4705f9be5a8 100644 --- a/libbeat/template/load_integration_test.go +++ b/libbeat/template/load_integration_test.go @@ -20,6 +20,7 @@ package template import ( + "context" "encoding/json" "fmt" "io/ioutil" @@ -66,7 +67,9 @@ func newTestSetup(t *testing.T, cfg TemplateConfig) *testSetup { cfg.Name = fmt.Sprintf("load-test-%+v", rand.Int()) } client := getTestingElasticsearch(t) - if err := client.Connect(); err != nil { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + if err := client.Connect(ctx); err != nil { t.Fatal(err) } handler := &mockClientHandler{serverless: false, mode: lifecycle.ILM} @@ -554,7 +557,9 @@ func getTestingElasticsearch(t eslegtest.TestLogger) *eslegclient.Connection { conn.Encoder = eslegclient.NewJSONEncoder(nil, false) - err = conn.Connect() + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + err = conn.Connect(ctx) if err != nil { t.Fatal(err) panic(err) // panic in case TestLogger did not stop test @@ -586,7 +591,9 @@ func getMockElasticsearchClient(t *testing.T, method, endpoint string, code int, Transport: httpcommon.DefaultHTTPTransportSettings(), }) require.NoError(t, err) - err = conn.Connect() + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + err = conn.Connect(ctx) require.NoError(t, err) return conn } diff --git a/libbeat/tests/integration/elasticsearch_test.go b/libbeat/tests/integration/elasticsearch_test.go new file mode 100644 index 00000000000..6d8d1a46a08 --- /dev/null +++ b/libbeat/tests/integration/elasticsearch_test.go @@ -0,0 +1,148 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration + +package integration + +import ( + "errors" + "io" + "net/http" + "testing" + "time" + + "github.com/gofrs/uuid/v5" + "github.com/rcrowley/go-metrics" + "github.com/stretchr/testify/require" + + "github.com/elastic/mock-es/pkg/api" +) + +var esCfg = ` +mockbeat: +logging: + level: debug + selectors: + - publisher_pipeline_output + - esclientleg +queue.mem: + events: 4096 + flush.min_events: 8 + flush.timeout: 0.1s +output.elasticsearch: + allow_older_versions: true + hosts: + - "http://localhost:4242" + backoff: + init: 0.1s + max: 0.2s +` + +func TestESOutputRecoversFromNetworkError(t *testing.T) { + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(esCfg) + + s, mr := startMockES(t, "localhost:4242") + + mockbeat.Start() + + // 1. Wait for one _bulk call + waitForEventToBePublished(t, mr) + + // 2. Stop the mock-es server + if err := s.Close(); err != nil { + t.Fatalf("cannot close mock-es server: %s", err) + } + + // 3. Wait for connection error logs + mockbeat.WaitForLogs( + `Get \"http://localhost:4242\": dial tcp 127.0.0.1:4242: connect: connection refused`, + 2*time.Second, + "did not find connection refused error") + + mockbeat.WaitForLogs( + "Attempting to reconnect to backoff(elasticsearch(http://localhost:4242)) with 2 reconnect attempt(s)", + 2*time.Second, + "did not find two tries to reconnect") + + // 4. Restart mock-es on the same port + s, mr = startMockES(t, "localhost:4242") + + // 5. Wait for reconnection logs + mockbeat.WaitForLogs( + "Connection to backoff(elasticsearch(http://localhost:4242)) established", + 5*time.Second, // There is a backoff, so ensure we wait enough + "did not find re connection confirmation") + + // 6. Ensure one new call to _bulk is made + waitForEventToBePublished(t, mr) + s.Close() +} + +func startMockES(t *testing.T, addr string) (*http.Server, metrics.Registry) { + uid := uuid.Must(uuid.NewV4()) + mr := metrics.NewRegistry() + es := api.NewAPIHandler(uid, "foo2", mr, time.Now().Add(24*time.Hour), 0, 0, 0, 0, 0) + + s := http.Server{Addr: addr, Handler: es, ReadHeaderTimeout: time.Second} + go func() { + if err := s.ListenAndServe(); !errors.Is(http.ErrServerClosed, err) { + t.Errorf("could not start mock-es server: %s", err) + } + }() + + require.Eventually(t, func() bool { + resp, err := http.Get("http://" + addr) //nolint: noctx // It's just a test + if err != nil { + //nolint: errcheck // We're just draining the body, we can ignore the error + io.Copy(io.Discard, resp.Body) + resp.Body.Close() + return false + } + return true + }, + time.Second, time.Millisecond, "mock-es server did not start on '%s'", addr) + + return &s, mr +} + +// waitForEventToBePublished waits for at least one event published +// by inspecting the count for `bulk.create.total` in `mr`. Once +// the counter is > 1, waitForEventToBePublished returns. If that +// does not happen within 10min, then the test fails with a call to +// t.Fatal. +func waitForEventToBePublished(t *testing.T, mr metrics.Registry) { + t.Helper() + require.Eventually(t, func() bool { + total := mr.Get("bulk.create.total") + if total == nil { + return false + } + + sc, ok := total.(*metrics.StandardCounter) + if !ok { + t.Fatalf("expecting 'bulk.create.total' to be *metrics.StandardCounter, but got '%T' instead", + total, + ) + } + + return sc.Count() > 1 + }, + 10*time.Second, 100*time.Millisecond, + "at least one bulk request must be made") +} diff --git a/libbeat/tests/integration/framework.go b/libbeat/tests/integration/framework.go index 9b8002f1176..904fc1e302a 100644 --- a/libbeat/tests/integration/framework.go +++ b/libbeat/tests/integration/framework.go @@ -33,6 +33,7 @@ import ( "os/exec" "path/filepath" "regexp" + "slices" "strconv" "strings" "sync" @@ -282,8 +283,6 @@ func (b *BeatProc) waitBeatToExit() { b.t.Fatalf("error waiting for %q to finish: %s. Exit code: %s", b.beatName, err, exitCode) } - - return } // Stop stops the Beat process @@ -312,10 +311,10 @@ func (b *BeatProc) stopNonsynced() { defer b.waitingMutex.Unlock() ps, err := b.Process.Wait() if err != nil { - b.t.Logf("[WARN] got an error waiting mockbeat to top: %v", err) + b.t.Logf("[WARN] got an error waiting %s to top: %v", b.beatName, err) } if !ps.Success() { - b.t.Logf("[WARN] mockbeat did not stopped successfully: %v", ps.String()) + b.t.Logf("[WARN] %s did not stopped successfully: %v", b.beatName, ps.String()) } } @@ -430,6 +429,29 @@ func (b *BeatProc) GetLogLine(s string) string { return line } +// GetLastLogLine search for the string s starting at the end +// of the logs, if it is found the whole log line is returned, otherwise +// an empty string is returned. GetLastLogLine does not keep track of +// any offset. +func (b *BeatProc) GetLastLogLine(s string) string { + logFile := b.openLogFile() + defer logFile.Close() + + found, line := b.searchStrInLogsReversed(logFile, s) + if found { + return line + } + + eventLogFile := b.openEventLogFile() + if eventLogFile == nil { + return "" + } + defer eventLogFile.Close() + _, line = b.searchStrInLogsReversed(eventLogFile, s) + + return line +} + // searchStrInLogs search for s as a substring of any line in logFile starting // from offset. // @@ -471,6 +493,44 @@ func (b *BeatProc) searchStrInLogs(logFile *os.File, s string, offset int64) (bo return false, offset, "" } +// searchStrInLogs search for s as a substring of any line in logFile starting +// from offset. +// +// It will close logFile and return the current offset. +func (b *BeatProc) searchStrInLogsReversed(logFile *os.File, s string) (bool, string) { + t := b.t + + defer func() { + if err := logFile.Close(); err != nil { + // That's not quite a test error, but it can impact + // next executions of LogContains, so treat it as an error + t.Errorf("could not close log file: %s", err) + } + }() + + r := bufio.NewReader(logFile) + lines := []string{} + for { + line, err := r.ReadString('\n') + if err != nil { + if err != io.EOF { + t.Fatalf("error reading log file '%s': %s", logFile.Name(), err) + } + break + } + lines = append(lines, line) + } + + slices.Reverse(lines) + for _, line := range lines { + if strings.Contains(line, s) { + return true, line + } + } + + return false, "" +} + // WaitForLogs waits for the specified string s to be present in the logs within // the given timeout duration and fails the test if s is not found. // msgAndArgs should be a format string and arguments that will be printed diff --git a/metricbeat/cmd/root.go b/metricbeat/cmd/root.go index e3d308d2508..497b71bed8a 100644 --- a/metricbeat/cmd/root.go +++ b/metricbeat/cmd/root.go @@ -24,6 +24,7 @@ import ( "github.com/elastic/elastic-agent-libs/mapstr" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd" "github.com/elastic/beats/v7/libbeat/cmd/instance" "github.com/elastic/beats/v7/libbeat/ecs" @@ -61,6 +62,7 @@ func MetricbeatSettings(moduleNameSpace string) instance.Settings { } var runFlags = pflag.NewFlagSet(Name, pflag.ExitOnError) runFlags.AddGoFlag(flag.CommandLine.Lookup("system.hostfs")) + cfgfile.AddAllowedBackwardsCompatibleFlag("system.hostfs") return instance.Settings{ RunFlags: runFlags, Name: Name, diff --git a/metricbeat/docs/fields.asciidoc b/metricbeat/docs/fields.asciidoc index ffada82ea92..53407f68020 100644 --- a/metricbeat/docs/fields.asciidoc +++ b/metricbeat/docs/fields.asciidoc @@ -5225,7 +5225,7 @@ Runtime CPU metrics. *`awsfargate.task_stats.cpu.kernel.pct`*:: + -- -Percentage of time in kernel space. +Percentage of time in kernel space, expressed as a value between 0 and 1. type: scaled_float @@ -5237,7 +5237,7 @@ format: percent *`awsfargate.task_stats.cpu.kernel.norm.pct`*:: + -- -Percentage of time in kernel space normalized by the number of CPU cores. +Percentage of time in kernel space normalized by the number of CPU cores, expressed as a value between 0 and 1. type: scaled_float @@ -5259,7 +5259,7 @@ type: long *`awsfargate.task_stats.cpu.system.pct`*:: + -- -Percentage of total CPU time in the system. +Percentage of total CPU time in the system, expressed as a value between 0 and 1. type: scaled_float @@ -5271,7 +5271,7 @@ format: percent *`awsfargate.task_stats.cpu.system.norm.pct`*:: + -- -Percentage of total CPU time in the system normalized by the number of CPU cores. +Percentage of total CPU time in the system normalized by the number of CPU cores, expressed as a value between 0 and 1. type: scaled_float @@ -5293,7 +5293,7 @@ type: long *`awsfargate.task_stats.cpu.user.pct`*:: + -- -Percentage of time in user space. +Percentage of time in user space, expressed as a value between 0 and 1. type: scaled_float @@ -5305,7 +5305,7 @@ format: percent *`awsfargate.task_stats.cpu.user.norm.pct`*:: + -- -Percentage of time in user space normalized by the number of CPU cores. +Percentage of time in user space normalized by the number of CPU cores, expressed as a value between 0 and 1. type: scaled_float @@ -5327,7 +5327,7 @@ type: long *`awsfargate.task_stats.cpu.total.pct`*:: + -- -Total CPU usage. +Total CPU usage, expressed as a value between 0 and 1. type: scaled_float @@ -5339,7 +5339,7 @@ format: percent *`awsfargate.task_stats.cpu.total.norm.pct`*:: + -- -Total CPU usage normalized by the number of CPU cores. +Total CPU usage normalized by the number of CPU cores, expressed as a value between 0 and 1. type: scaled_float @@ -5703,7 +5703,7 @@ format: bytes *`awsfargate.task_stats.memory.rss.pct`*:: + -- -Memory resident set size percentage. +Memory resident set size percentage, expressed as a value between 0 and 1. type: scaled_float @@ -12043,7 +12043,7 @@ type: double *`containerd.cpu.usage.total.pct`*:: + -- -Percentage of total CPU time normalized by the number of CPU cores +Percentage of total CPU time normalized by the number of CPU cores, expressed as a value between 0 and 1. type: scaled_float @@ -12055,7 +12055,7 @@ format: percent *`containerd.cpu.usage.kernel.pct`*:: + -- -Percentage of time in kernel space normalized by the number of CPU cores. +Percentage of time in kernel space normalized by the number of CPU cores, expressed as a value between 0 and 1. type: scaled_float @@ -12067,7 +12067,7 @@ format: percent *`containerd.cpu.usage.user.pct`*:: + -- -Percentage of time in user space normalized by the number of CPU cores. +Percentage of time in user space normalized by the number of CPU cores, expressed as a value between 0 and 1. type: scaled_float @@ -13633,7 +13633,7 @@ Runtime CPU metrics. *`docker.cpu.kernel.pct`*:: + -- -Percentage of time in kernel space. +Percentage of time in kernel space, expressed as a value between 0 and 1. type: scaled_float @@ -13645,7 +13645,7 @@ format: percent *`docker.cpu.kernel.norm.pct`*:: + -- -Percentage of time in kernel space normalized by the number of CPU cores. +Percentage of time in kernel space normalized by the number of CPU cores, expressed as a value between 0 and 1. type: scaled_float @@ -13667,7 +13667,7 @@ type: long *`docker.cpu.system.pct`*:: + -- -Percentage of total CPU time in the system. +Percentage of total CPU time in the system, expressed as a value between 0 and 1. type: scaled_float @@ -13679,7 +13679,7 @@ format: percent *`docker.cpu.system.norm.pct`*:: + -- -Percentage of total CPU time in the system normalized by the number of CPU cores. +Percentage of total CPU time in the system normalized by the number of CPU cores, expressed as a value between 0 and 1. type: scaled_float @@ -13701,7 +13701,7 @@ type: long *`docker.cpu.user.pct`*:: + -- -Percentage of time in user space. +Percentage of time in user space, expressed as a value between 0 and 1. type: scaled_float @@ -13713,7 +13713,7 @@ format: percent *`docker.cpu.user.norm.pct`*:: + -- -Percentage of time in user space normalized by the number of CPU cores. +Percentage of time in user space normalized by the number of CPU cores, expressed as a value between 0 and 1. type: scaled_float @@ -13759,7 +13759,7 @@ format: percent *`docker.cpu.core.*.pct`*:: + -- -Percentage of CPU time in this core. +Percentage of CPU time in this core, expressed as a value between 0 and 1. type: object @@ -13771,7 +13771,7 @@ format: percent *`docker.cpu.core.*.norm.pct`*:: + -- -Percentage of CPU time in this core, normalized by the number of CPU cores. +Percentage of CPU time in this core normalized by the number of CPU cores, expressed as a value between 0 and 1. type: object @@ -14432,7 +14432,7 @@ format: bytes *`docker.memory.rss.pct`*:: + -- -Memory resident set size percentage. +Memory resident set size percentage, expressed as a value between 0 and 1. type: scaled_float @@ -14463,7 +14463,7 @@ format: bytes *`docker.memory.usage.pct`*:: + -- -Memory usage percentage. +Memory usage percentage, expressed as a value between 0 and 1. type: scaled_float diff --git a/metricbeat/docs/modules/aws.asciidoc b/metricbeat/docs/modules/aws.asciidoc index 0ee7f601052..291e2b7c09b 100644 --- a/metricbeat/docs/modules/aws.asciidoc +++ b/metricbeat/docs/modules/aws.asciidoc @@ -146,6 +146,25 @@ Enforces the use of FIPS service endpoints. See < - Percentage of time in kernel space. + Percentage of time in kernel space, expressed as a value between 0 and 1. - name: kernel.norm.pct type: scaled_float format: percent description: > - Percentage of time in kernel space normalized by the number of CPU cores. + Percentage of time in kernel space normalized by the number of CPU cores, expressed as a value between 0 and 1. - name: kernel.ticks type: long description: > @@ -22,12 +22,12 @@ type: scaled_float format: percent description: > - Percentage of total CPU time in the system. + Percentage of total CPU time in the system, expressed as a value between 0 and 1. - name: system.norm.pct type: scaled_float format: percent description: > - Percentage of total CPU time in the system normalized by the number of CPU cores. + Percentage of total CPU time in the system normalized by the number of CPU cores, expressed as a value between 0 and 1. - name: system.ticks type: long description: > @@ -36,12 +36,12 @@ type: scaled_float format: percent description: > - Percentage of time in user space. + Percentage of time in user space, expressed as a value between 0 and 1. - name: user.norm.pct type: scaled_float format: percent description: > - Percentage of time in user space normalized by the number of CPU cores. + Percentage of time in user space normalized by the number of CPU cores, expressed as a value between 0 and 1. - name: user.ticks type: long description: > @@ -61,13 +61,13 @@ object_type: scaled_float format: percent description: > - Percentage of CPU time in this core. + Percentage of CPU time in this core, expressed as a value between 0 and 1. - name: core.*.norm.pct type: object object_type: scaled_float format: percent description: > - Percentage of CPU time in this core, normalized by the number of CPU cores. + Percentage of CPU time in this core normalized by the number of CPU cores, expressed as a value between 0 and 1. - name: core.*.ticks type: object object_type: long diff --git a/metricbeat/module/docker/fields.go b/metricbeat/module/docker/fields.go index a313e328ed9..a18cbb5c6a9 100644 --- a/metricbeat/module/docker/fields.go +++ b/metricbeat/module/docker/fields.go @@ -32,5 +32,5 @@ func init() { // AssetDocker returns asset data. // This is the base64 encoded zlib format compressed contents of module/docker. func AssetDocker() string { - return "eJzsnEtv4zgSgO/5FYXsYYFGx8Yu9pTDArNJLzrY6UnQneweBgMPTZVtrilSTVJ2u3/9gKQk60FJfsiOM2gfcrDsqo/14qucG1ji5hYiSZeorgAMMxxv4frevXF9BRChpoolhklxC/+8AgDwD0EbYjRQyTlSgxHMlIyzZ6MrAIUcicZbmJMrAI3GMDHXt/Drtdb8+jf73kIqM6FSzNj8FmaEa7wCmDHkkb51mm5AkBhLfPZlNomVqmSaZO8EGO3rQcykiol9G4iIHDDThlENZCpTk4n9qwaVCsHEHKgUhjCBSo8yKWWaMlHxyeJJCKwDrmTIQhbEaBSjhXL7qpoxf9WxqmhxTERUeZbDLXGzlqr+rAPRvu68QDALYmBNNOA3pKl1ORNgFtgYxyjMpZAYDHNFxOB+UPfEIKwX6Am2JrR8maYwho2CVA9pnVy1lxzWypIJiSKFWmNYN0sOVfvwBIXoliGz73XrhmN1v+Gy7xiKWGiJzzKRktJMZnVLbMG4FPPAwx4293qWhnAPJ2dAOHcBMmMcdR6vLYFaAVyfgu1LRrUlcjm1ICuEKaLIIxekArogYo4RaCYo+gdMirCDDZkPGNEPMZmjkzlq1r0kPabifU6FYTHC3dPLMMVuiUogHyXUBMevKeEYTWZckvoH/NxwCwkqiqL+tMdET/5L1k7WnXZITGQwoBNCMeyoDFdIFV8gM1guwtl3jGC6cVEq0niKyn7BuoxK1VZjspEZRpfhUAykTV+peXoBJ2832+qNNvjqZnXVx5N7A1srZmhd2JcQEh3sx4RGNsKhQyMDc2LDilON6rVtmlnSonQFr0O9hBho8B7jeTeqk5WEPpu6eD67PZ+LLEo1mXeivYq/a3zHuNc+Gr1rHYGc/h8bj/ybk3PGdLWiMe25u0bU6ZiLHtb7AfzZnrD9Q98/pX+p0BXJHXBUcRrA9JLJozbeTC/hYfw4zBpUIQnvag/YXv1EaRqn3G0CrFwNUaqYmDtHcjYrtg+hA4g20DKsTE6x69o68SDoLd50Yxob5F7APKnavrzDAP5lv+rgD2dXzUOMXvS9bEtTpVCYzMaJnf2QysZRT2nlhWrFKE5smTgBmZ9KXA0yMlcGD4+g8GuK2uj3NpMFEdJzNn2Tg64JM2egzLlAJ9aOVqt1NRPwNcUUtY2kfBw7s7uvNn0wFPi2entFxSCCtWitWCMIBylGTvCbq0aHUV9KObL0BsUbqEiZnX+UpB8lqQGj0zgmanO6FRIRb7U82SWoTFC5o903W6bcqil3wtuoVyWj/6hZP2pWBQRX1Y35ode6dTn7byyHvqr8YIlCUotbygFvjb0yFvlLWrIijJMpx6DemZLx4MOUqaJhdVb2cOqeF+i+bmPLn4UAxsy1O7iyV4+DLQehVuZpSDq1ynoJPmI2bgrrm1UbUdY37B048uE/3OezzW6uqBjGGMWmaddUGjwAg/oh2FGj+C9RTKbaChmvCE+xxFUd23uIMEER2dFJAczoamTn41og4WZBF0iXA5S1krTm8VnlCx9Ln4yIIbBmnIMUfANT3FYE3zgU1XpItK0bCu1wK0Kzz/3+8cNPPz9/vPv44e4/vwMT2qjUZRMsiPZ36anGyE6o05TxyJkt+y6La+fy+1fmGWGcibk2CskymEtMGJw3ljo9/qdS0NQtUKwCjKDutNPNDWVnedlAZRSun6E0OriCOGGZsfdtI0ERTQKtQ9DVV7QDUt0eKKKwpJIzlDkHiVPUzSJTk6ShEjVAbSqjtOgpXPONmUkjgsok4Qw5zCiNcC3mGpvrA1Q9J2eY64KWRdYBqWMnPA/GycaWTBahMGzGmp1NfZmUbYxOEzbwItjXNGfdQsKcrWyhTrLZK9zkVMZMyAkpH7Zg2TzrgN8DmwEzNqLdXsxNV+sFowu/q822lH5wEVNIDd84hSjqJe2EvZCuQ9PuzoqmSE/U3xA5YHegbx1zzXc+JPeNwxVTJiX8BNvASvNdYwlQpVA4TzkJlaaB2xMtC+EcKKELjDyWBqK1pMydcBnZDLKW5VYOz8kU+aH3t0c0DHq9PXDn61RkYnbUHfGDmMm84MOU2MWkXV0ak+jb8TiSVI/8enJEZTxGMWcCxwpnqFBQHJOEjf3zicJYGpyQhE1Wfxv9/R/jv4wjphNONje+h+lmzSK8Ydt29WMbwPM19FBp/bhC5cK00uu8d3InxK7JT5BV9SMeryjQzt9kylr/zwDV/iODJpU2MknOYqpM005UrmvpDExupu0y1SnOq7I1Sr7LldoEl1NhDle3gyz7N8S0WsNraVa6GGNZuV/Zu9Z9chKGWd76yvDuwPknYK7S80lMkoSJefbh63fX+5n2M1ln1sp+veTWcm6CddbS2dORfeo2KGpGWg4RqYxjNtgu+M5JM65pyx30CPgfE5Fc16Oqr8YelKMDXAX5qA0LKOp/87DkHGhPSJaZu3oNXKAqtiIGJ2upljbgNJpRyLSt7F3cPcyZbsh0g0bTyzsjjI+oTFvOZTo6Entg/k2YnfdTmwvhIsxZWx4Ma5asSDl1YRKlB1vxfP7ypVIp9l3qvG4aZuQKtZvCbAS5LUfHxjp4qt0bPNDb0roj96cW4lxqaye1a2Ieyusv2p/yHO73mHx7Ba9/It9y6kDTOVyen33reZtv4dISqWbUxgpMoLHF+pgl2C9exOFrsPDCNLiEOWqdnIMWop2qlvV5+HLzkIMlQWVsJ8PM1Nn6bdu8sW+ivlbLTDEQvwzZDqBj3xWp7t1gT+LuQHXvNQDL6RJCl9hp4NJRv1KycdYAA+wLPzjB7m7zEK7ssycAe9gVp3QpM1QmPKZmLv8MmVAMpJ4Jl5AIMoe7sEQ4iOt0ifC4K852RpjKtOWfUww3LfjfzFf/MYS7Oq3fgVxudtRPgVhl3jg8RYY7pYta5oxXyA+Phq3TxWukRqsDg0yliWLQBGmZLf6ECSIr08klJUi9Zl9AguyOdL4EaWeqbbMmzV8UhPOj2DhN0ZDdtmHtKjpbOpJ9Drz75rSn/LwtfNRn6JDanu961KXRkOpe7vvVTTgzOLDOn5nBbsWMxoN68e5Tz0jtX//fC5KWe6wD7o4W6P63EjxtG2GLCv8eqFQKdSJ9z6iRME6UpONfExb9NhYY7hfecg6KuQXMp6ZCkW/dzH69vjO+diP4IwAA//+0neAX" + return "eJzsXE1v4zjSvudXFPIeXqDRbe8s9pTDArNJLzrY6UnQneweBgMPTZXtWkukmqTsuH/9gqQkWxJl+UN2nEH7kENoVT1VrC8WS/4Ac1zdQCT5HNUVgCET4w1c37l/XF8BRKi5otSQFDfw9ysAAL8I2jCjgcs4Rm4wgomSSb42uAJQGCPTeANTdgWg0RgSU30Dv11rHV//bv83k8qMuBQTmt7AhMUarwAmhHGkbxynDyBYghv47MesUktVySzN/xPAaD/3YiJVwuy/gYnIASZtiGtgY5mZnOz/a1CZECSmwKUwjAQqPcipbKLZRFR+s1wJAdsCbkORJS1I0CjiJXP7qaqx+NRhVaElCRNRZa0AN8fVUqr62haI9nPrCYKZMQNLpgFfkGd2y0mAmWFDjkEYl0JmMIwrYgb3A3XHDMJyhh7BWoUWX84pDMNaQab71E7B2lMOc6V0xKJIodYY5k3poWzvH6Ek3SIyfa9rN2yr+4lL3zFksdBin5uIlJRmNKlrYg0slmIaWOzA5j5P0rDYg5MTYHHsDGRCMerCXlsMtQJweQpsX3NUa0TOp2ZsgTBGFIXlglTAZ0xMMQJNgqNfICnCG2zYtEeLvk/YFB3NQTPupdkxEe9LJgwlCLePz/0EuzkqgfEg5SYov+Ysxmg0iSWrf8HnhhtIUXEU9dUOFT36h6ye7HZakUjkYECnjON7wJfUOWUETAODBYszu8tmaTf6Ly4j/RTezlwoIVVygZKBxcVi+o4RjFfOlkWWjFHZB+zGcqlQ9yC/IT4Pm3XABbvC1uMzOHp1aVoi5kobfHXlu0jmkfttsLr20I5Rby7cJZjXFglPb2a5Hvo2sxy+IxtmnGlUr635XN8WyvEBywl0CfbUkOr0VuRkP1moWkvSkvmtB51d60+l32aaTbdCexWrqOHbzQhaTi1S4eBdqwRy/F9sLPl/js5p+dUYStrhPsasc7m3bt9FC396z89V1O773frZPzr8WpGhjBOl2M2SPSI9J3lUn4L0HO6HD/2U7ApZuAlwwGn0Z86zJIvdmcnS1RBlisTUbXdMk/K0FerXtAHdBCvTUxxS15t4EOg1vPHKNPoJnQALz2t7eAcB/mEfdeAPx66aPZ9O6HvplmdKoTC5jlObSJHLRmdsoyBEtSCOIxtLToDMZyUXqIwsmMH9Ayj8lqE2+r31ZMGE9Dibe1MAXTIyZ0BZ4AKdWj1arnarScC3DDPU1pIKOXbG7h5t7kFfwNcx3jMqhQjGoqWihhH2Eowc4TcXjQ5DfSnhyKI3KN5ARMr1/CMk/QhJDTA6SxKmVqerkGxF+zbDky1BZYrKdcLfbJhyVVOxCW8jXm0o/UfM+hGzKkBwUT29H3oLXqez/8Gy75vdjxZRiGp5qdvjJbtnRpG/02YLRjEbxxjkO1Ey6V1MmSkeZmdp98fuaYbucWtbvhcCmJCbDnFhr24HaxyMW5qnQbKVq6yH4COycZNYV1ZtWFmX2DvgKMS/vyuyzW5bUVGMMYrG2bZUGmyAQb0JdpQU/2aKZKYtkaHv3a1xVWV7DxGmKCIrnRRARlctu5Brhiw2Mz5DPu8hrG1Qa7bPKg982vhmxAyDJcUxSBGvYIzriODnrKLayI22cUOhFbdCNP/eH58+/vzL06fbTx9v//UHkNBGZc6bYMa0Hz3INEY2oY4ziiOntvxZSmot/v0j84RRTGKqjUI2D/oSCYPTRqnTsf9cCp65AsUywAjqm3a63LC5WZ42cBmF42fIjQ6OII5Yrux9p25QRKPApBVsG8PaAVJdHyiiMKWNzVDmHEgco+1YZGbSLBSieohNm1Ba+JRb80Jm1LCgTSRhDzlMKQ1zLXON9fUeop6j0891QUuRdYDr2ITngcVsZUMmRSgMTag5CNblSfnB6DRmA8+CvmUF1jVImNLCBuo0z17hmbBNmCk7Icr7NbA8zzrA74EmQMZatDuLuXS1nBGf+VNtfqT0wkWkkJt45RiiqIe0E46OuoFWezorZ0g9ou750R6HKf2knZtV9Ca5rx0uSJmMxSc4BlZmFRslQBWFwmkWs1Bo6nma02JhcQyc8RlGHpYGprXk5DpcRjaNrKXcKsDHbIzxofe3R8xXer4d4M432ElictQd8b2YyCLgw5jZYtJWl8ak+mY4jCTXA19PDrhMhiimJHCocIIKBcchS2no10cKE2lwxFIaLX4a/PVvw/8bRqTTmK0++NGqD0uK8AOtp/uPnZcvaui+3PphgcqZaWU0fG/nTpmtyU/gVfUWj2cUePuhiSl/U+IMoNrfyWii0kam6VlUlXPaCZUbgDoDJpdpt6nqFP2qvEYpTrlSm2A5Fcbh4nYQy/4DMa3a8FyakS7BRFbuV/aOdZ8dhX7KWx8Z3h2YfwLq2lgfJSxNSUzzL1+/u95PtV/YMtdW/rKXq+VcgnXa0vnqwK66A4qasJYmIpdJQr2dgm8dNeNGu1yjR8B/SERyWbeqrhh7kI/2cBXkrTZMoIz/zWbJOaA9Ipvn29Wp4BKqogUzOFpKNbcGp9EMQqptxb4NdwfmnDfkvEGj6cQ7YRQPuMxa+jJbxhY7wPyTkc37mfWFcBCOqc0P+lVLHqQcuzASpXureL58/VqJFPuWOq/rhjlyhdqlMGtB7six5WAd7Gp3Gg90zr3uiPtzC+KCqjuHHzPTXm0DwTG28ax9L+hw60jYyyvYxmf2UqAOTLnD5VmDn3U/zgLg0pyypvpGNSfQ2MB/TDn3qydxeD0XLnKD5dBRNXcBtCTtWLXU+uGL0kOaVILLxCbWXNV5LbgeBNnXnV9r/KYUxJc0awG2nOEitf1k2eHeO6C68xyACnQp43PcquCNawOlZKNvAT2cMT86wu6e9BBc+XdPAOx+VzgbFzx9ecJDZqbyz+AJpSB1T7gER5AFuAtzhINwnc4RHnaFs84IY5m1/C5If2nB/1xB9Tc53DVs/T7lcr2j3lGiSt443EX66/hFLTnjFfzDQ8PWdPEartG6gUFMG4miVwdpyRZ/QgeRlXRySQ5Sj9kX4CC7Qzqfg7Rjqh2zRs23E8L+UR6cxmjYbsewdhZbx0PSfZrnXTntsejdhduGhvfJ7em2g10W9cnu+a6b3Sgmgz3z/IUMbmdMPOl1F28/d0hq//ofVUhb7sQOuIeaoftZK3hcD9WWEf49cKkU6lT6+VMjYZgqyYe/pRT9PhQYnj1e4+wV5hpgkZpKRn4MNH9ffmf42knwvwAAAP//GRdgEA==" } diff --git a/metricbeat/module/docker/memory/_meta/data.json b/metricbeat/module/docker/memory/_meta/data.json index 7cf34c7736b..8d2f9139a56 100644 --- a/metricbeat/module/docker/memory/_meta/data.json +++ b/metricbeat/module/docker/memory/_meta/data.json @@ -1,83 +1,88 @@ { "@timestamp": "2017-10-12T08:05:34.853Z", "container": { - "id": "23ce6f1b53181ea3db0611fe4de36f0ebf1c0a37cb8272e028cac06240dafbe0", + "id": "3ebfd3aebc686af21efccc552aabceb4303b70ef4bc2be7fffbb616000f824b4", "image": { - "name": "docker.elastic.co/beats/elastic-agent:7.15.0-SNAPSHOT" + "name": "docker.elastic.co/elastic-agent/elastic-agent-complete:8.15.3" }, - "name": "elastic-package-stack_elastic-agent_1", + "memory": { + "usage": 0.0012028823743718271 + }, + "name": "elastic-package-stack-elastic-agent-1", "runtime": "docker" }, "docker": { "container": { "labels": { - "com_docker_compose_config-hash": "8e3d03827946685d53a2f171a126c397a3278da18ecd68a970cba9131160c52c", + "com_docker_compose_config-hash": "34d3699c997ecee19f466a3a5be2c73b86a5f4a89c362301412d9cc03e41d62d", "com_docker_compose_container-number": "1", + "com_docker_compose_depends_on": "fleet-server:service_healthy:false", + "com_docker_compose_image": "sha256:37bd3034b35d10da7c806226eb2956b6c998745da9dc15ed3e920d214a59bcec", "com_docker_compose_oneoff": "False", "com_docker_compose_project": "elastic-package-stack", + "com_docker_compose_project_config_files": "/home/alexk/.elastic-package/profiles/default/stack/docker-compose.yml", + "com_docker_compose_project_working_dir": "/home/alexk/.elastic-package/profiles/default/stack", "com_docker_compose_service": "elastic-agent", - "com_docker_compose_version": "1.28.6", - "description": "Agent manages other beats based on configuration provided.", - "io_k8s_description": "Agent manages other beats based on configuration provided.", + "com_docker_compose_version": "2.29.2", + "description": "Elastic Agent - single, unified way to add monitoring for logs, metrics, and other types of data to a host.", + "io_k8s_description": "Elastic Agent - single, unified way to add monitoring for logs, metrics, and other types of data to a host.", "io_k8s_display-name": "Elastic-Agent image", "license": "Elastic License", "maintainer": "infra@elastic.co", "name": "elastic-agent", - "org_label-schema_build-date": "2021-07-28T09:55:40Z", + "org_label-schema_build-date": "2024-10-10T10:08:37Z", "org_label-schema_license": "Elastic License", "org_label-schema_name": "elastic-agent", "org_label-schema_schema-version": "1.0", - "org_label-schema_url": "https://www.elastic.co/beats/elastic-agent", - "org_label-schema_vcs-ref": "16108a69f9f437c00cb6125c57bbc01c4eb805bb", - "org_label-schema_vcs-url": "github.com/elastic/beats/v7", + "org_label-schema_url": "https://www.elastic.co/elastic-agent", + "org_label-schema_vcs-ref": "61975895b1409449db21ddca0405e7b71bfc1c46", + "org_label-schema_vcs-url": "github.com/elastic/elastic-agent", "org_label-schema_vendor": "Elastic", - "org_label-schema_version": "7.15.0-SNAPSHOT", - "org_opencontainers_image_created": "2021-07-28T09:55:40Z", + "org_label-schema_version": "8.15.3", + "org_opencontainers_image_created": "2024-10-10T10:08:37Z", "org_opencontainers_image_licenses": "Elastic License", + "org_opencontainers_image_ref_name": "ubuntu", "org_opencontainers_image_title": "Elastic-Agent", "org_opencontainers_image_vendor": "Elastic", + "org_opencontainers_image_version": "20.04", "release": "1", "summary": "elastic-agent", - "url": "https://www.elastic.co/beats/elastic-agent", + "url": "https://www.elastic.co/elastic-agent", "vendor": "Elastic", - "version": "7.15.0-SNAPSHOT" + "version": "8.15.3" } }, "memory": { "fail": { "count": 0 }, - "limit": 67514433536, - "rss": { - "pct": 0, - "total": 0 - }, + "limit": 33537363968, "stats": { - "active_anon": 270336, - "active_file": 135168, - "anon": 246484992, - "anon_thp": 4194304, - "file": 325484544, + "active_anon": 10223616, + "active_file": 19795968, + "anon": 19140608, + "anon_thp": 0, + "file": 36466688, "file_dirty": 0, - "file_mapped": 170582016, + "file_mapped": 585728, "file_writeback": 0, - "inactive_anon": 250257408, - "inactive_file": 325619712, - "kernel_stack": 2703360, - "pgactivate": 62898, + "inactive_anon": 8921088, + "inactive_file": 16670720, + "kernel_stack": 212992, + "pgactivate": 4810, "pgdeactivate": 0, - "pgfault": 2150971515, - "pglazyfree": 207999, + "pgfault": 703936010, + "pglazyfree": 0, "pglazyfreed": 0, - "pgmajfault": 0, - "pgrefill": 0, - "pgscan": 0, - "pgsteal": 0, + "pgmajfault": 729, + "pgrefill": 170270, + "pgscan": 12002, + "pgsteal": 10536, "shmem": 0, - "slab": 8112800, - "slab_reclaimable": 5753632, - "slab_unreclaimable": 2359168, - "sock": 200704, + "slab": 513312, + "slab_reclaimable": 194680, + "slab_unreclaimable": 318632, + "sock": 0, "thp_collapse_alloc": 0, "thp_fault_alloc": 0, "unevictable": 0, @@ -87,8 +92,8 @@ }, "usage": { "max": 0, - "pct": 0.0039415723433138695, - "total": 266113024 + "pct": 0.0012028823743718271, + "total": 40341504 } } }, diff --git a/metricbeat/module/docker/memory/_meta/fields.yml b/metricbeat/module/docker/memory/_meta/fields.yml index 0ebb64ba975..62bc1681fda 100644 --- a/metricbeat/module/docker/memory/_meta/fields.yml +++ b/metricbeat/module/docker/memory/_meta/fields.yml @@ -53,7 +53,7 @@ type: scaled_float format: percent description: > - Memory resident set size percentage. + Memory resident set size percentage, expressed as a value between 0 and 1. - name: usage type: group description: > @@ -68,7 +68,7 @@ type: scaled_float format: percent description: > - Memory usage percentage. + Memory usage percentage, expressed as a value between 0 and 1. - name: total type: long format: bytes diff --git a/metricbeat/module/docker/memory/data.go b/metricbeat/module/docker/memory/data.go index 31b943449a6..48afe7d6ed6 100644 --- a/metricbeat/module/docker/memory/data.go +++ b/metricbeat/module/docker/memory/data.go @@ -51,16 +51,21 @@ func eventMapping(r mb.ReporterV2, memoryData *MemoryData) { "count": memoryData.Failcnt, }, "limit": memoryData.Limit, - "rss": mapstr.M{ - "total": memoryData.TotalRss, - "pct": memoryData.TotalRssP, - }, "usage": mapstr.M{ "total": memoryData.Usage, "pct": memoryData.UsageP, "max": memoryData.MaxUsage, }, } + if memoryData.TotalRss.Exists() { + fields["rss"] = mapstr.M{ + "total": memoryData.TotalRss.ValueOr(0), + } + if memoryData.TotalRssP.Exists() { + fields.Put("rss.pct", memoryData.TotalRssP.ValueOr(0)) + } + } + // Add container ECS fields _, _ = rootFields.Put("container.memory.usage", memoryData.UsageP) } diff --git a/metricbeat/module/docker/memory/helper.go b/metricbeat/module/docker/memory/helper.go index ad74726dc92..7f7f8b917fd 100644 --- a/metricbeat/module/docker/memory/helper.go +++ b/metricbeat/module/docker/memory/helper.go @@ -20,6 +20,7 @@ package memory import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/metricbeat/module/docker" + "github.com/elastic/elastic-agent-libs/opt" ) // MemoryData contains parsed container memory info @@ -29,8 +30,8 @@ type MemoryData struct { Failcnt uint64 Limit uint64 MaxUsage uint64 - TotalRss uint64 - TotalRssP float64 + TotalRss opt.Uint + TotalRssP opt.Float Usage uint64 UsageP float64 //Raw stats from the cgroup subsystem @@ -61,7 +62,7 @@ func (s *MemoryService) getMemoryStatsList(containers []docker.Stat, dedot bool) } func (s *MemoryService) getMemoryStats(myRawStat docker.Stat, dedot bool) MemoryData { - totalRSS := myRawStat.Stats.MemoryStats.Stats["total_rss"] + totalRSS, rssOK := myRawStat.Stats.MemoryStats.Stats["total_rss"] // Emulate newer docker releases and exclude cache values from memory usage // See here for a little more context. usage - cache won't work, as it includes shared mappings that can't be dropped @@ -79,14 +80,12 @@ func (s *MemoryService) getMemoryStats(myRawStat docker.Stat, dedot bool) Memory } memUsage = myRawStat.Stats.MemoryStats.Usage - fileUsage - return MemoryData{ + memData := MemoryData{ Time: common.Time(myRawStat.Stats.Read), Container: docker.NewContainer(myRawStat.Container, dedot), Failcnt: myRawStat.Stats.MemoryStats.Failcnt, Limit: myRawStat.Stats.MemoryStats.Limit, MaxUsage: myRawStat.Stats.MemoryStats.MaxUsage, - TotalRss: totalRSS, - TotalRssP: float64(totalRSS) / float64(myRawStat.Stats.MemoryStats.Limit), Usage: memUsage, UsageP: float64(memUsage) / float64(myRawStat.Stats.MemoryStats.Limit), Stats: myRawStat.Stats.MemoryStats.Stats, @@ -95,4 +94,13 @@ func (s *MemoryService) getMemoryStats(myRawStat docker.Stat, dedot bool) Memory CommitPeak: myRawStat.Stats.MemoryStats.CommitPeak, PrivateWorkingSet: myRawStat.Stats.MemoryStats.PrivateWorkingSet, } + // the RSS metrics are cgv1 only. + if rssOK { + memData.TotalRss = opt.UintWith(totalRSS) + if myRawStat.Stats.MemoryStats.Limit != 0 { + memData.TotalRssP = opt.FloatWith(float64(totalRSS) / float64(myRawStat.Stats.MemoryStats.Limit)) + } + + } + return memData } diff --git a/metricbeat/module/docker/memory/memory_test.go b/metricbeat/module/docker/memory/memory_test.go index 93e11824e28..d05d0a7a61a 100644 --- a/metricbeat/module/docker/memory/memory_test.go +++ b/metricbeat/module/docker/memory/memory_test.go @@ -22,43 +22,68 @@ import ( "time" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" "github.com/elastic/beats/v7/metricbeat/module/docker" "github.com/elastic/elastic-agent-libs/mapstr" ) +var defaultContainerID = "containerID" + +var defaultLabels = map[string]string{ + "label1": "val1", + "label2": "val2", + "label2.foo": "val3", +} + +var defaultContainerStats = types.Container{ + ID: defaultContainerID, + Image: "image", + Command: "command", + Created: 123789, + Status: "Up", + SizeRw: 123, + SizeRootFs: 456, + Names: []string{"/name1", "name1/fake"}, + Labels: defaultLabels, +} + +func TestMemStatsV2(t *testing.T) { + // Test to make sure we don't report any RSS metrics where they don't exist + memoryService := &MemoryService{} + memorystats := getMemoryStats(time.Now(), 1, false) + + memoryRawStats := docker.Stat{} + memoryRawStats.Container = &defaultContainerStats + memoryRawStats.Stats = memorystats + + rawStats := memoryService.getMemoryStats(memoryRawStats, false) + require.False(t, rawStats.TotalRss.Exists()) + require.False(t, rawStats.TotalRssP.Exists()) + + r := &mbtest.CapturingReporterV2{} + eventMapping(r, &rawStats) + events := r.GetEvents() + require.NotContains(t, "rss", events[0].MetricSetFields) + +} + func TestMemoryService_GetMemoryStats(t *testing.T) { - //Container + dockerstats - containerID := "containerID" - labels := map[string]string{ - "label1": "val1", - "label2": "val2", - "label2.foo": "val3", - } - container := types.Container{ - ID: containerID, - Image: "image", - Command: "command", - Created: 123789, - Status: "Up", - SizeRw: 123, - SizeRootFs: 456, - Names: []string{"/name1", "name1/fake"}, - Labels: labels, - } + memoryService := &MemoryService{} - memorystats := getMemoryStats(time.Now(), 1) + memorystats := getMemoryStats(time.Now(), 1, true) memoryRawStats := docker.Stat{} - memoryRawStats.Container = &container + memoryRawStats.Container = &defaultContainerStats memoryRawStats.Stats = memorystats totalRSS := memorystats.MemoryStats.Stats["total_rss"] expectedRootFields := mapstr.M{ "container": mapstr.M{ - "id": containerID, + "id": defaultContainerID, "name": "name1", "image": mapstr.M{ "name": "image", @@ -113,30 +138,30 @@ func TestMemoryService_GetMemoryStats(t *testing.T) { func TestMemoryServiceBadData(t *testing.T) { - badMemStats := types.StatsJSON{ - Stats: types.Stats{ + badMemStats := container.StatsResponse{ + Stats: container.Stats{ Read: time.Now(), - MemoryStats: types.MemoryStats{}, //Test for cases where this is empty + MemoryStats: container.MemoryStats{}, //Test for cases where this is empty }, } memoryService := &MemoryService{} - memoryRawStats := []docker.Stat{docker.Stat{Stats: badMemStats}} + memoryRawStats := []docker.Stat{{Stats: badMemStats}} rawStats := memoryService.getMemoryStatsList(memoryRawStats, false) assert.Len(t, rawStats, 0) } func TestMemoryMath(t *testing.T) { - memStats := types.StatsJSON{ - Stats: types.Stats{ + memStats := container.StatsResponse{ + Stats: container.Stats{ Read: time.Now(), - PreCPUStats: types.CPUStats{ - CPUUsage: types.CPUUsage{ + PreCPUStats: container.CPUStats{ + CPUUsage: container.CPUUsage{ TotalUsage: 200, }, }, - MemoryStats: types.MemoryStats{ + MemoryStats: container.MemoryStats{ Limit: 5, Usage: 5000, Stats: map[string]uint64{ @@ -149,18 +174,18 @@ func TestMemoryMath(t *testing.T) { memoryService := &MemoryService{} memoryRawStats := []docker.Stat{ - docker.Stat{Stats: memStats, Container: &types.Container{Names: []string{"test-container"}, Labels: map[string]string{}}}, + {Stats: memStats, Container: &types.Container{Names: []string{"test-container"}, Labels: map[string]string{}}}, } rawStats := memoryService.getMemoryStatsList(memoryRawStats, false) assert.Equal(t, float64(800), rawStats[0].UsageP) // 5000-900 /5 } -func getMemoryStats(read time.Time, number uint64) types.StatsJSON { +func getMemoryStats(read time.Time, number uint64, rssExists bool) container.StatsResponse { - myMemoryStats := types.StatsJSON{ - Stats: types.Stats{ + myMemoryStats := container.StatsResponse{ + Stats: container.Stats{ Read: read, - MemoryStats: types.MemoryStats{ + MemoryStats: container.MemoryStats{ MaxUsage: number, Usage: number * 2, Failcnt: number * 3, @@ -170,7 +195,9 @@ func getMemoryStats(read time.Time, number uint64) types.StatsJSON { }, } - myMemoryStats.MemoryStats.Stats["total_rss"] = number * 5 + if rssExists { + myMemoryStats.MemoryStats.Stats["total_rss"] = number * 5 + } return myMemoryStats } diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 5844c555c88..e9070e760c8 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -33,7 +33,7 @@ import ( k8sclientmeta "k8s.io/client-go/metadata" "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/api/resource" + k8sresource "k8s.io/apimachinery/pkg/api/resource" "github.com/elastic/elastic-agent-autodiscover/kubernetes" "github.com/elastic/elastic-agent-autodiscover/kubernetes/metadata" @@ -101,7 +101,8 @@ type metaWatcher struct { metricsetsUsing []string // list of metricsets using this shared watcher(e.g. pod, container, state_pod) - enrichers map[string]*enricher // map of enrichers using this watcher. The key is the metricset name. Each metricset has its own enricher + enrichers map[string]*enricher // map of enrichers using this watcher. The key is the metricset name. Each metricset has its own enricher + metricsRepo *MetricsRepo // used to update container metrics derived from metadata, like resource limits nodeScope bool // whether this watcher should watch for resources in current node or in whole cluster restartWatcher kubernetes.Watcher // whether this watcher needs a restart. Only relevant in leader nodes due to metricsets with different nodescope(pod, state_pod) @@ -311,6 +312,7 @@ func createWatcher( client k8sclient.Interface, metadataClient k8sclientmeta.Interface, resourceWatchers *Watchers, + metricsRepo *MetricsRepo, namespace string, extraWatcher bool) (bool, error) { @@ -388,6 +390,7 @@ func createWatcher( watcher: watcher, started: false, // not started yet enrichers: make(map[string]*enricher), + metricsRepo: metricsRepo, metricsetsUsing: make([]string, 0), restartWatcher: nil, nodeScope: nodeScope, @@ -395,15 +398,65 @@ func createWatcher( resourceWatchers.metaWatchersMap[resourceName] = resourceMetaWatcher // Add event handlers to the watcher. The only action we need to do here is invalidate the enricher cache. - addEventHandlerToWatcher(resourceMetaWatcher, resourceWatchers) + addEventHandlersToWatcher(resourceMetaWatcher, resourceWatchers) return true, nil } -// addEventHandlerToWatcher adds an event handler to the watcher that invalidates the cache of enrichers attached -// to the watcher. -func addEventHandlerToWatcher(metaWatcher *metaWatcher, resourceWatchers *Watchers) { - notifyFunc := func(obj interface{}) { +// addEventHandlerToWatcher adds an event handlers to the watcher that invalidate the cache of enrichers attached +// to the watcher and update container metrics on Pod change events. +func addEventHandlersToWatcher( + metaWatcher *metaWatcher, + resourceWatchers *Watchers, +) { + containerMetricsUpdateFunc := func(pod *kubernetes.Pod) { + nodeStore, _ := metaWatcher.metricsRepo.AddNodeStore(pod.Spec.NodeName) + podId := NewPodId(pod.Namespace, pod.Name) + podStore, _ := nodeStore.AddPodStore(podId) + + for _, container := range append(pod.Spec.Containers, pod.Spec.InitContainers...) { + metrics := NewContainerMetrics() + + if cpu, ok := container.Resources.Limits["cpu"]; ok { + if q, err := k8sresource.ParseQuantity(cpu.String()); err == nil { + metrics.CoresLimit = NewFloat64Metric(float64(q.MilliValue()) / 1000) + } + } + if memory, ok := container.Resources.Limits["memory"]; ok { + if q, err := k8sresource.ParseQuantity(memory.String()); err == nil { + metrics.MemoryLimit = NewFloat64Metric(float64(q.Value())) + } + } + + containerStore, _ := podStore.AddContainerStore(container.Name) + containerStore.SetContainerMetrics(metrics) + } + } + + containerMetricsDeleteFunc := func(pod *kubernetes.Pod) { + podId := NewPodId(pod.Namespace, pod.Name) + nodeStore := metaWatcher.metricsRepo.GetNodeStore(pod.Spec.NodeName) + nodeStore.DeletePodStore(podId) + } + + nodeMetricsUpdateFunc := func(node *kubernetes.Node) { + nodeName := node.GetObjectMeta().GetName() + metrics := NewNodeMetrics() + if cpu, ok := node.Status.Capacity["cpu"]; ok { + if q, err := k8sresource.ParseQuantity(cpu.String()); err == nil { + metrics.CoresAllocatable = NewFloat64Metric(float64(q.MilliValue()) / 1000) + } + } + if memory, ok := node.Status.Capacity["memory"]; ok { + if q, err := k8sresource.ParseQuantity(memory.String()); err == nil { + metrics.MemoryAllocatable = NewFloat64Metric(float64(q.Value())) + } + } + nodeStore, _ := metaWatcher.metricsRepo.AddNodeStore(nodeName) + nodeStore.SetNodeMetrics(metrics) + } + + clearMetadataCacheFunc := func(obj interface{}) { enrichers := make(map[string]*enricher, len(metaWatcher.enrichers)) resourceWatchers.lock.Lock() @@ -420,10 +473,35 @@ func addEventHandlerToWatcher(metaWatcher *metaWatcher, resourceWatchers *Watche enricher.Unlock() } } + metaWatcher.watcher.AddEventHandler(kubernetes.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) {}, // do nothing - UpdateFunc: notifyFunc, - DeleteFunc: notifyFunc, + AddFunc: func(obj interface{}) { + switch res := obj.(type) { + case *kubernetes.Pod: + containerMetricsUpdateFunc(res) + case *kubernetes.Node: + nodeMetricsUpdateFunc(res) + } + }, + UpdateFunc: func(obj interface{}) { + clearMetadataCacheFunc(obj) + switch res := obj.(type) { + case *kubernetes.Pod: + containerMetricsUpdateFunc(res) + case *kubernetes.Node: + nodeMetricsUpdateFunc(res) + } + }, + DeleteFunc: func(obj interface{}) { + clearMetadataCacheFunc(obj) + switch res := obj.(type) { + case *kubernetes.Pod: + containerMetricsDeleteFunc(res) + case *kubernetes.Node: + nodeName := res.GetObjectMeta().GetName() + metaWatcher.metricsRepo.DeleteNodeStore(nodeName) + } + }, }) } @@ -481,6 +559,7 @@ func createAllWatchers( config *kubernetesConfig, log *logp.Logger, resourceWatchers *Watchers, + metricsRepo *MetricsRepo, ) error { res := getResource(resourceName) if res == nil { @@ -494,7 +573,7 @@ func createAllWatchers( // Create the main watcher for the given resource. // For example pod metricset's main watcher will be pod watcher. // If it fails, we return an error, so we can stop the extra watchers from creating. - created, err := createWatcher(resourceName, res, *options, client, metadataClient, resourceWatchers, config.Namespace, false) + created, err := createWatcher(resourceName, res, *options, client, metadataClient, resourceWatchers, metricsRepo, config.Namespace, false) if err != nil { return fmt.Errorf("error initializing Kubernetes watcher %s, required by %s: %w", resourceName, metricsetName, err) } else if created { @@ -509,7 +588,7 @@ func createAllWatchers( for _, extra := range extraWatchers { extraRes := getResource(extra) if extraRes != nil { - created, err = createWatcher(extra, extraRes, *options, client, metadataClient, resourceWatchers, config.Namespace, true) + created, err = createWatcher(extra, extraRes, *options, client, metadataClient, resourceWatchers, metricsRepo, config.Namespace, true) if err != nil { log.Errorf("Error initializing Kubernetes watcher %s, required by %s: %s", extra, metricsetName, err) } else { @@ -654,7 +733,7 @@ func NewResourceMetadataEnricher( metricsetName := base.Name() resourceName := getResourceName(metricsetName) // Create all watchers needed for this metricset - err = createAllWatchers(client, metadataClient, metricsetName, resourceName, nodeScope, config, log, resourceWatchers) + err = createAllWatchers(client, metadataClient, metricsetName, resourceName, nodeScope, config, log, resourceWatchers, metricsRepo) if err != nil { log.Errorf("Error starting the watchers: %s", err) return &nilEnricher{} @@ -689,20 +768,13 @@ func NewResourceMetadataEnricher( // It is responsible for generating the metadata for a detected resource by executing the metadata generators Generate method. // It is a common handler for all resource watchers. The kind of resource(e.g. pod or deployment) is checked inside the function. // It returns a map of a resource identifier(i.e. namespace-resource_name) as key and the metadata as value. - updateFunc := getEventMetadataFunc(log, generalMetaGen, specificMetaGen, metricsRepo) + updateFunc := getEventMetadataFunc(log, generalMetaGen, specificMetaGen) // deleteFunc to be used as the resource watcher's delete handler. // The deleteFunc is executed when a watcher is triggered for a resource deletion(e.g. pod deleted). // It returns the identifier of the resource. deleteFunc := func(r kubernetes.Resource) []string { accessor, _ := meta.Accessor(r) - - switch r := r.(type) { - case *kubernetes.Node: - nodeName := r.GetObjectMeta().GetName() - metricsRepo.DeleteNodeStore(nodeName) - } - id := accessor.GetName() namespace := accessor.GetNamespace() if namespace != "" { @@ -781,7 +853,7 @@ func NewContainerMetadataEnricher( metricsetName := base.Name() - err = createAllWatchers(client, metadataClient, metricsetName, PodResource, nodeScope, config, log, resourceWatchers) + err = createAllWatchers(client, metadataClient, metricsetName, PodResource, nodeScope, config, log, resourceWatchers, metricsRepo) if err != nil { log.Errorf("Error starting the watchers: %s", err) return &nilEnricher{} @@ -820,27 +892,8 @@ func NewContainerMetadataEnricher( mapStatuses(pod.Status.ContainerStatuses) mapStatuses(pod.Status.InitContainerStatuses) - nodeStore, _ := metricsRepo.AddNodeStore(pod.Spec.NodeName) - podId := NewPodId(pod.Namespace, pod.Name) - podStore, _ := nodeStore.AddPodStore(podId) - for _, container := range append(pod.Spec.Containers, pod.Spec.InitContainers...) { cmeta := mapstr.M{} - metrics := NewContainerMetrics() - - if cpu, ok := container.Resources.Limits["cpu"]; ok { - if q, err := resource.ParseQuantity(cpu.String()); err == nil { - metrics.CoresLimit = NewFloat64Metric(float64(q.MilliValue()) / 1000) - } - } - if memory, ok := container.Resources.Limits["memory"]; ok { - if q, err := resource.ParseQuantity(memory.String()); err == nil { - metrics.MemoryLimit = NewFloat64Metric(float64(q.Value())) - } - } - - containerStore, _ := podStore.AddContainerStore(container.Name) - containerStore.SetContainerMetrics(metrics) if s, ok := statuses[container.Name]; ok { // Extracting id and runtime ECS fields from ContainerID @@ -867,9 +920,6 @@ func NewContainerMetadataEnricher( if !ok { base.Logger().Debugf("Error while casting event: %s", ok) } - podId := NewPodId(pod.Namespace, pod.Name) - nodeStore := metricsRepo.GetNodeStore(pod.Spec.NodeName) - nodeStore.DeletePodStore(podId) for _, container := range append(pod.Spec.Containers, pod.Spec.InitContainers...) { id := join(pod.ObjectMeta.GetNamespace(), pod.GetObjectMeta().GetName(), container.Name) @@ -1235,7 +1285,6 @@ func getEventMetadataFunc( logger *logp.Logger, generalMetaGen *metadata.Resource, specificMetaGen metadata.MetaGen, - metricsRepo *MetricsRepo, ) func(r kubernetes.Resource) map[string]mapstr.M { return func(r kubernetes.Resource) map[string]mapstr.M { accessor, accErr := meta.Accessor(r) @@ -1251,23 +1300,7 @@ func getEventMetadataFunc( switch r := r.(type) { case *kubernetes.Pod: return map[string]mapstr.M{id: specificMetaGen.Generate(r)} - case *kubernetes.Node: - nodeName := r.GetObjectMeta().GetName() - metrics := NewNodeMetrics() - if cpu, ok := r.Status.Capacity["cpu"]; ok { - if q, err := resource.ParseQuantity(cpu.String()); err == nil { - metrics.CoresAllocatable = NewFloat64Metric(float64(q.MilliValue()) / 1000) - } - } - if memory, ok := r.Status.Capacity["memory"]; ok { - if q, err := resource.ParseQuantity(memory.String()); err == nil { - metrics.MemoryAllocatable = NewFloat64Metric(float64(q.Value())) - } - } - nodeStore, _ := metricsRepo.AddNodeStore(nodeName) - nodeStore.SetNodeMetrics(metrics) - return map[string]mapstr.M{id: generalMetaGen.Generate(NodeResource, r)} case *kubernetes.Deployment: return map[string]mapstr.M{id: generalMetaGen.Generate(DeploymentResource, r)} diff --git a/metricbeat/module/kubernetes/util/kubernetes_test.go b/metricbeat/module/kubernetes/util/kubernetes_test.go index ec2309b08bf..3f38e7656b1 100644 --- a/metricbeat/module/kubernetes/util/kubernetes_test.go +++ b/metricbeat/module/kubernetes/util/kubernetes_test.go @@ -22,6 +22,8 @@ import ( "testing" "time" + "k8s.io/apimachinery/pkg/api/resource" + "github.com/elastic/beats/v7/metricbeat/mb" "github.com/stretchr/testify/assert" @@ -71,6 +73,7 @@ func TestWatchOptions(t *testing.T) { func TestCreateWatcher(t *testing.T) { resourceWatchers := NewWatchers() + metricsRepo := NewMetricsRepo() client := k8sfake.NewSimpleClientset() metadataClient := k8smetafake.NewSimpleMetadataClient(k8smetafake.NewTestScheme()) @@ -84,7 +87,16 @@ func TestCreateWatcher(t *testing.T) { options, err := getWatchOptions(config, false, client, log) require.NoError(t, err) - created, err := createWatcher(NamespaceResource, &kubernetes.Node{}, *options, client, metadataClient, resourceWatchers, config.Namespace, false) + created, err := createWatcher( + NamespaceResource, + &kubernetes.Node{}, + *options, + client, + metadataClient, + resourceWatchers, + metricsRepo, + config.Namespace, + false) require.True(t, created) require.NoError(t, err) @@ -94,7 +106,15 @@ func TestCreateWatcher(t *testing.T) { require.NotNil(t, resourceWatchers.metaWatchersMap[NamespaceResource].watcher) resourceWatchers.lock.Unlock() - created, err = createWatcher(NamespaceResource, &kubernetes.Namespace{}, *options, client, metadataClient, resourceWatchers, config.Namespace, true) + created, err = createWatcher( + NamespaceResource, + &kubernetes.Namespace{}, + *options, client, + metadataClient, + resourceWatchers, + metricsRepo, + config.Namespace, + true) require.False(t, created) require.NoError(t, err) @@ -104,7 +124,15 @@ func TestCreateWatcher(t *testing.T) { require.NotNil(t, resourceWatchers.metaWatchersMap[NamespaceResource].watcher) resourceWatchers.lock.Unlock() - created, err = createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, metadataClient, resourceWatchers, config.Namespace, false) + created, err = createWatcher( + DeploymentResource, + &kubernetes.Deployment{}, + *options, client, + metadataClient, + resourceWatchers, + metricsRepo, + config.Namespace, + false) require.True(t, created) require.NoError(t, err) @@ -117,6 +145,7 @@ func TestCreateWatcher(t *testing.T) { func TestAddToMetricsetsUsing(t *testing.T) { resourceWatchers := NewWatchers() + metricsRepo := NewMetricsRepo() client := k8sfake.NewSimpleClientset() metadataClient := k8smetafake.NewSimpleMetadataClient(k8smetafake.NewTestScheme()) @@ -131,7 +160,15 @@ func TestAddToMetricsetsUsing(t *testing.T) { require.NoError(t, err) // Create the new entry with watcher and nil string array first - created, err := createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, metadataClient, resourceWatchers, config.Namespace, false) + created, err := createWatcher( + DeploymentResource, + &kubernetes.Deployment{}, + *options, client, + metadataClient, + resourceWatchers, + metricsRepo, + config.Namespace, + false) require.True(t, created) require.NoError(t, err) @@ -155,6 +192,7 @@ func TestAddToMetricsetsUsing(t *testing.T) { func TestRemoveFromMetricsetsUsing(t *testing.T) { resourceWatchers := NewWatchers() + metricsRepo := NewMetricsRepo() client := k8sfake.NewSimpleClientset() metadataClient := k8smetafake.NewSimpleMetadataClient(k8smetafake.NewTestScheme()) @@ -169,7 +207,16 @@ func TestRemoveFromMetricsetsUsing(t *testing.T) { require.NoError(t, err) // Create the new entry with watcher and nil string array first - created, err := createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, metadataClient, resourceWatchers, config.Namespace, false) + created, err := createWatcher( + DeploymentResource, + &kubernetes.Deployment{}, + *options, + client, + metadataClient, + resourceWatchers, + metricsRepo, + config.Namespace, + false) require.True(t, created) require.NoError(t, err) @@ -194,8 +241,141 @@ func TestRemoveFromMetricsetsUsing(t *testing.T) { require.Equal(t, 0, size) } +func TestWatcherContainerMetrics(t *testing.T) { + resourceWatchers := NewWatchers() + metricsRepo := NewMetricsRepo() + + containerName := "test" + cpuLimit := resource.MustParse("100m") + memoryLimit := resource.MustParse("100Mi") + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + UID: types.UID("mockuid"), + Name: "enrich", + Labels: map[string]string{ + "label": "value", + }, + Namespace: "default", + }, + Spec: v1.PodSpec{ + NodeName: "test-node", + Containers: []v1.Container{ + { + Name: containerName, + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceCPU: cpuLimit, + v1.ResourceMemory: memoryLimit, + }, + }, + }, + }, + }, + } + podId := NewPodId(pod.Namespace, pod.Name) + resourceWatchers.lock.Lock() + + watcher := newMockWatcher() + metaWatcher := &metaWatcher{ + watcher: watcher, + started: false, + metricsetsUsing: []string{"pod"}, + enrichers: make(map[string]*enricher), + metricsRepo: metricsRepo, + } + resourceWatchers.metaWatchersMap[PodResource] = metaWatcher + addEventHandlersToWatcher(metaWatcher, resourceWatchers) + resourceWatchers.lock.Unlock() + + // add Pod and verify container metrics are present and valid + watcher.handler.OnAdd(pod) + + containerStore := metricsRepo.GetNodeStore(pod.Spec.NodeName).GetPodStore(podId).GetContainerStore(containerName) + metrics := containerStore.GetContainerMetrics() + require.NotNil(t, metrics) + assert.Equal(t, 0.1, metrics.CoresLimit.Value) + assert.Equal(t, 100*1024*1024.0, metrics.MemoryLimit.Value) + + // modify the limit and verify the new value is present + pod.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = resource.MustParse("200m") + watcher.handler.OnUpdate(pod) + metrics = containerStore.GetContainerMetrics() + require.NotNil(t, metrics) + assert.Equal(t, 0.2, metrics.CoresLimit.Value) + + // delete the pod and verify no metrics are present + watcher.handler.OnDelete(pod) + containerStore = metricsRepo.GetNodeStore(pod.Spec.NodeName).GetPodStore(podId).GetContainerStore(containerName) + metrics = containerStore.GetContainerMetrics() + require.NotNil(t, metrics) + assert.Nil(t, metrics.CoresLimit) + assert.Nil(t, metrics.MemoryLimit) +} + +func TestWatcherNodeMetrics(t *testing.T) { + resourceWatchers := NewWatchers() + metricsRepo := NewMetricsRepo() + + cpuLimit := resource.MustParse("100m") + memoryLimit := resource.MustParse("100Mi") + node := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + UID: types.UID("mockuid"), + Name: "enrich", + Labels: map[string]string{ + "label": "value", + }, + Namespace: "default", + }, + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + v1.ResourceCPU: cpuLimit, + v1.ResourceMemory: memoryLimit, + }, + }, + } + resourceWatchers.lock.Lock() + + watcher := newMockWatcher() + metaWatcher := &metaWatcher{ + watcher: watcher, + started: false, + metricsetsUsing: []string{"pod"}, + enrichers: make(map[string]*enricher), + metricsRepo: metricsRepo, + } + resourceWatchers.metaWatchersMap[NodeResource] = metaWatcher + addEventHandlersToWatcher(metaWatcher, resourceWatchers) + resourceWatchers.lock.Unlock() + + // add node and verify container metrics are present and valid + watcher.handler.OnAdd(node) + + nodeStore := metricsRepo.GetNodeStore(node.Name) + metrics := nodeStore.GetNodeMetrics() + require.NotNil(t, metrics) + assert.Equal(t, 0.1, metrics.CoresAllocatable.Value) + assert.Equal(t, 100*1024*1024.0, metrics.MemoryAllocatable.Value) + + // modify the limit and verify the new value is present + node.Status.Capacity[v1.ResourceCPU] = resource.MustParse("200m") + watcher.handler.OnUpdate(node) + metrics = nodeStore.GetNodeMetrics() + require.NotNil(t, metrics) + assert.Equal(t, 0.2, metrics.CoresAllocatable.Value) + + // delete the node and verify no metrics are present + watcher.handler.OnDelete(node) + nodeStore = metricsRepo.GetNodeStore(node.Name) + metrics = nodeStore.GetNodeMetrics() + require.NotNil(t, metrics) + assert.Nil(t, metrics.CoresAllocatable) + assert.Nil(t, metrics.MemoryAllocatable) +} + func TestCreateAllWatchers(t *testing.T) { resourceWatchers := NewWatchers() + metricsRepo := NewMetricsRepo() client := k8sfake.NewSimpleClientset() metadataClient := k8smetafake.NewSimpleMetadataClient(k8smetafake.NewTestScheme()) @@ -211,7 +391,16 @@ func TestCreateAllWatchers(t *testing.T) { log := logp.NewLogger("test") // Start watchers based on a resource that does not exist should cause an error - err := createAllWatchers(client, metadataClient, "does-not-exist", "does-not-exist", false, config, log, resourceWatchers) + err := createAllWatchers( + client, + metadataClient, + "does-not-exist", + "does-not-exist", + false, + config, + log, + resourceWatchers, + metricsRepo) require.Error(t, err) resourceWatchers.lock.Lock() require.Equal(t, 0, len(resourceWatchers.metaWatchersMap)) @@ -220,7 +409,16 @@ func TestCreateAllWatchers(t *testing.T) { // Start watcher for a resource that requires other resources, should start all the watchers metricsetPod := "pod" extras := getExtraWatchers(PodResource, config.AddResourceMetadata) - err = createAllWatchers(client, metadataClient, metricsetPod, PodResource, false, config, log, resourceWatchers) + err = createAllWatchers( + client, + metadataClient, + metricsetPod, + PodResource, + false, + config, + log, + resourceWatchers, + metricsRepo) require.NoError(t, err) // Check that all the required watchers are in the map @@ -235,6 +433,7 @@ func TestCreateAllWatchers(t *testing.T) { func TestCreateMetaGen(t *testing.T) { resourceWatchers := NewWatchers() + metricsRepo := NewMetricsRepo() commonMetaConfig := metadata.Config{} commonConfig, err := conf.NewConfigFrom(&commonMetaConfig) @@ -259,7 +458,16 @@ func TestCreateMetaGen(t *testing.T) { // Create the watchers necessary for the metadata generator metricsetDeployment := "state_deployment" - err = createAllWatchers(client, metadataClient, metricsetDeployment, DeploymentResource, false, config, log, resourceWatchers) + err = createAllWatchers( + client, + metadataClient, + metricsetDeployment, + DeploymentResource, + false, + config, + log, + resourceWatchers, + metricsRepo) require.NoError(t, err) // Create the generators, this time without error @@ -269,6 +477,7 @@ func TestCreateMetaGen(t *testing.T) { func TestCreateMetaGenSpecific(t *testing.T) { resourceWatchers := NewWatchers() + metricsRepo := NewMetricsRepo() commonMetaConfig := metadata.Config{} commonConfig, err := conf.NewConfigFrom(&commonMetaConfig) @@ -302,7 +511,16 @@ func TestCreateMetaGenSpecific(t *testing.T) { require.Error(t, err) // Create the pod resource + the extras - err = createAllWatchers(client, metadataClient, metricsetPod, PodResource, false, config, log, resourceWatchers) + err = createAllWatchers( + client, + metadataClient, + metricsetPod, + PodResource, + false, + config, + log, + resourceWatchers, + metricsRepo) require.NoError(t, err) _, err = createMetadataGenSpecific(client, commonConfig, config.AddResourceMetadata, PodResource, resourceWatchers) @@ -315,7 +533,16 @@ func TestCreateMetaGenSpecific(t *testing.T) { // Create the service resource + the extras metricsetService := "state_service" - err = createAllWatchers(client, metadataClient, metricsetService, ServiceResource, false, config, log, resourceWatchers) + err = createAllWatchers( + client, + metadataClient, + metricsetService, + ServiceResource, + false, + config, + log, + resourceWatchers, + metricsRepo) require.NoError(t, err) _, err = createMetadataGenSpecific(client, commonConfig, config.AddResourceMetadata, ServiceResource, resourceWatchers) @@ -478,6 +705,7 @@ func TestBuildMetadataEnricher_Start_Stop_SameResources(t *testing.T) { func TestBuildMetadataEnricher_EventHandler(t *testing.T) { resourceWatchers := NewWatchers() + metricsRepo := NewMetricsRepo() resourceWatchers.lock.Lock() watcher := &metaWatcher{ @@ -485,9 +713,10 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { started: false, metricsetsUsing: []string{"pod"}, enrichers: make(map[string]*enricher), + metricsRepo: metricsRepo, } resourceWatchers.metaWatchersMap[PodResource] = watcher - addEventHandlerToWatcher(watcher, resourceWatchers) + addEventHandlersToWatcher(watcher, resourceWatchers) resourceWatchers.lock.Unlock() funcs := mockFuncs{} @@ -603,6 +832,7 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { func TestBuildMetadataEnricher_PartialMetadata(t *testing.T) { resourceWatchers := NewWatchers() + metricsRepo := NewMetricsRepo() resourceWatchers.lock.Lock() watcher := &metaWatcher{ @@ -612,9 +842,10 @@ func TestBuildMetadataEnricher_PartialMetadata(t *testing.T) { started: false, metricsetsUsing: []string{"replicaset"}, enrichers: make(map[string]*enricher), + metricsRepo: metricsRepo, } resourceWatchers.metaWatchersMap[ReplicaSetResource] = watcher - addEventHandlerToWatcher(watcher, resourceWatchers) + addEventHandlersToWatcher(watcher, resourceWatchers) resourceWatchers.lock.Unlock() isController := true @@ -655,7 +886,7 @@ func TestBuildMetadataEnricher_PartialMetadata(t *testing.T) { client := k8sfake.NewSimpleClientset() generalMetaGen := metadata.NewResourceMetadataGenerator(commonConfig, client) - updateFunc := getEventMetadataFunc(log, generalMetaGen, nil, nil) + updateFunc := getEventMetadataFunc(log, generalMetaGen, nil) deleteFunc := func(r kubernetes.Resource) []string { accessor, _ := meta.Accessor(r) diff --git a/packetbeat/beater/packetbeat.go b/packetbeat/beater/packetbeat.go index 6495a733379..e12573f8406 100644 --- a/packetbeat/beater/packetbeat.go +++ b/packetbeat/beater/packetbeat.go @@ -18,6 +18,7 @@ package beater import ( + "context" "flag" "fmt" "sync" @@ -111,7 +112,9 @@ func New(b *beat.Beat, rawConfig *conf.C) (beat.Beater, error) { } overwritePipelines = config.OverwritePipelines b.OverwritePipelinesCallback = func(esConfig *conf.C) error { - esClient, err := eslegclient.NewConnectedClient(esConfig, "Packetbeat") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + esClient, err := eslegclient.NewConnectedClient(ctx, esConfig, "Packetbeat") if err != nil { return err } diff --git a/packetbeat/cmd/root.go b/packetbeat/cmd/root.go index 7b1c20b34c6..eb6d14b1692 100644 --- a/packetbeat/cmd/root.go +++ b/packetbeat/cmd/root.go @@ -22,6 +22,7 @@ import ( "github.com/spf13/pflag" + "github.com/elastic/beats/v7/libbeat/cfgfile" cmd "github.com/elastic/beats/v7/libbeat/cmd" "github.com/elastic/beats/v7/libbeat/cmd/instance" "github.com/elastic/beats/v7/libbeat/ecs" @@ -51,10 +52,15 @@ var RootCmd *cmd.BeatsRootCmd func PacketbeatSettings(globals processors.PluginConfig) instance.Settings { runFlags := pflag.NewFlagSet(Name, pflag.ExitOnError) runFlags.AddGoFlag(flag.CommandLine.Lookup("I")) + cfgfile.AddAllowedBackwardsCompatibleFlag("I") runFlags.AddGoFlag(flag.CommandLine.Lookup("t")) + cfgfile.AddAllowedBackwardsCompatibleFlag("t") runFlags.AddGoFlag(flag.CommandLine.Lookup("O")) + cfgfile.AddAllowedBackwardsCompatibleFlag("O") runFlags.AddGoFlag(flag.CommandLine.Lookup("l")) + cfgfile.AddAllowedBackwardsCompatibleFlag("l") runFlags.AddGoFlag(flag.CommandLine.Lookup("dump")) + cfgfile.AddAllowedBackwardsCompatibleFlag("dump") return instance.Settings{ RunFlags: runFlags, diff --git a/packetbeat/main_test.go b/packetbeat/main_test.go index 359e70f161b..2dfcfb4572c 100644 --- a/packetbeat/main_test.go +++ b/packetbeat/main_test.go @@ -23,6 +23,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" "github.com/elastic/beats/v7/packetbeat/cmd" ) @@ -33,11 +34,14 @@ func init() { testing.Init() systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(*testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/winlogbeat/beater/winlogbeat.go b/winlogbeat/beater/winlogbeat.go index e41aa54cb7f..4e6b2b3657d 100644 --- a/winlogbeat/beater/winlogbeat.go +++ b/winlogbeat/beater/winlogbeat.go @@ -108,7 +108,9 @@ func (eb *Winlogbeat) init(b *beat.Beat) error { } b.OverwritePipelinesCallback = func(esConfig *conf.C) error { overwritePipelines := config.OverwritePipelines - esClient, err := eslegclient.NewConnectedClient(esConfig, "Winlogbeat") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + esClient, err := eslegclient.NewConnectedClient(ctx, esConfig, "Winlogbeat") if err != nil { return err } diff --git a/winlogbeat/eventlog/wineventlog.go b/winlogbeat/eventlog/wineventlog.go index e418f22bf06..43654284218 100644 --- a/winlogbeat/eventlog/wineventlog.go +++ b/winlogbeat/eventlog/wineventlog.go @@ -62,8 +62,7 @@ var ( const ( // renderBufferSize is the size in bytes of the buffer used to render events. - renderBufferSize = 1 << 14 - + renderBufferSize = 1 << 19 // 512KB, 256K wide characters // winEventLogApiName is the name used to identify the Windows Event Log API // as both an event type and an API. winEventLogAPIName = "wineventlog" @@ -448,14 +447,6 @@ func (l *winEventLog) Read() ([]Record, error) { for _, h := range handles { l.outputBuf.Reset() err := l.render(h, l.outputBuf) - var bufErr sys.InsufficientBufferError - if errors.As(err, &bufErr) { - detailf("%s Increasing render buffer size to %d", l.logPrefix, - bufErr.RequiredSize) - l.renderBuf = make([]byte, bufErr.RequiredSize) - l.outputBuf.Reset() - err = l.render(h, l.outputBuf) - } l.metrics.logError(err) if err != nil && l.outputBuf.Len() == 0 { logp.Err("%s Dropping event with rendering error. %v", l.logPrefix, err) diff --git a/winlogbeat/main_test.go b/winlogbeat/main_test.go index 0bbc2e4e526..002c7431f99 100644 --- a/winlogbeat/main_test.go +++ b/winlogbeat/main_test.go @@ -22,6 +22,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" "github.com/elastic/beats/v7/winlogbeat/cmd" ) @@ -32,12 +33,15 @@ func init() { testing.Init() systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // TestSystem is the function called when the test binary is started. // Only calls main. func TestSystem(*testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/winlogbeat/sys/strings.go b/winlogbeat/sys/strings.go index d542277f17b..34325c32f9d 100644 --- a/winlogbeat/sys/strings.go +++ b/winlogbeat/sys/strings.go @@ -44,3 +44,27 @@ func RemoveWindowsLineEndings(s string) string { s = strings.Replace(s, "\r\n", "\n", -1) return strings.TrimRight(s, "\n") } + +// BinaryToString converts a binary field which is encoded in hexadecimal +// to its string representation. This is equivalent to hex.EncodeToString +// but its output is in uppercase to be equivalent to the windows +// XML formatting of this fields. +func BinaryToString(bin []byte) string { + if len(bin) == 0 { + return "" + } + + const hexTable = "0123456789ABCDEF" + + size := len(bin) * 2 + buffer := make([]byte, size) + + j := 0 + for _, v := range bin { + buffer[j] = hexTable[v>>4] + buffer[j+1] = hexTable[v&0x0f] + j += 2 + } + + return string(buffer) +} diff --git a/winlogbeat/sys/strings_test.go b/winlogbeat/sys/strings_test.go index 0771b7b3cff..53f2d8ae632 100644 --- a/winlogbeat/sys/strings_test.go +++ b/winlogbeat/sys/strings_test.go @@ -36,6 +36,12 @@ func TestUTF16BytesToString(t *testing.T) { assert.Equal(t, input, output) } +func TestMakeDisplayableBinaryString(t *testing.T) { + input := []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF} + output := BinaryToString(input) + assert.Equal(t, "0123456789ABCDEF", output) +} + func BenchmarkUTF16BytesToString(b *testing.B) { utf16Bytes := common.StringToUTF16Bytes("A logon was attempted using explicit credentials.") diff --git a/winlogbeat/sys/strings_windows.go b/winlogbeat/sys/strings_windows.go new file mode 100644 index 00000000000..0ce8b09f5d6 --- /dev/null +++ b/winlogbeat/sys/strings_windows.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package sys + +import ( + "sync" + + "golang.org/x/sys/windows" + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/charmap" +) + +var getCachedANSIDecoder = sync.OnceValue(initANSIDecoder) + +func initANSIDecoder() *encoding.Decoder { + ansiCP := windows.GetACP() + for _, enc := range charmap.All { + cm, ok := enc.(*charmap.Charmap) + if !ok { + continue + } + cmID, _ := cm.ID() + if uint32(cmID) != ansiCP { + continue + } + return cm.NewDecoder() + } + // This should never be reached. + // If the ANSI Code Page is not found, we will default to + // Windows1252 Code Page, which is default for ANSI in + // many regions and corresponds to Western European languages. + return charmap.Windows1252.NewDecoder() +} + +func ANSIBytesToString(enc []byte) (string, error) { + out, err := getCachedANSIDecoder().Bytes(enc) + return string(out), err +} diff --git a/winlogbeat/sys/wineventlog/format_message.go b/winlogbeat/sys/wineventlog/format_message.go index 9c1cf8254ac..4bc03166939 100644 --- a/winlogbeat/sys/wineventlog/format_message.go +++ b/winlogbeat/sys/wineventlog/format_message.go @@ -75,39 +75,43 @@ func evtFormatMessage(metadataHandle EvtHandle, eventHandle EvtHandle, messageID valuesPtr = &values[0] } - // best guess render buffer size, 16KB, to avoid rendering message twice in most cases - const bestGuessRenderBufferSize = 1 << 14 + // best guess render buffer size, to avoid rendering message twice in most cases + const bestGuessRenderBufferSize = 1 << 19 // 512KB, 256K wide characters // EvtFormatMessage operates with WCHAR buffer, assuming the size of the buffer in characters. // https://docs.microsoft.com/en-us/windows/win32/api/winevt/nf-winevt-evtformatmessage - var bufferNeeded uint32 - bufferSize := uint32(bestGuessRenderBufferSize / 2) + var wcharBufferUsed uint32 + wcharBufferSize := uint32(bestGuessRenderBufferSize / 2) // Get a buffer from the pool and adjust its length. bb := sys.NewPooledByteBuffer() defer bb.Free() - bb.Reserve(int(bufferSize * 2)) + bb.Reserve(int(wcharBufferSize * 2)) - err := _EvtFormatMessage(metadataHandle, eventHandle, messageID, valuesCount, valuesPtr, messageFlag, bufferSize, bb.PtrAt(0), &bufferNeeded) + err := _EvtFormatMessage(metadataHandle, eventHandle, messageID, valuesCount, valuesPtr, messageFlag, wcharBufferSize, bb.PtrAt(0), &wcharBufferUsed) switch err { //nolint:errorlint // This is an errno or nil. - case nil: // OK - return sys.UTF16BytesToString(bb.Bytes()) - // Ignore some errors so it can tolerate missing or mismatched parameter values. - case windows.ERROR_EVT_UNRESOLVED_VALUE_INSERT, + case nil, // OK + windows.ERROR_EVT_UNRESOLVED_VALUE_INSERT, windows.ERROR_EVT_UNRESOLVED_PARAMETER_INSERT, windows.ERROR_EVT_MAX_INSERTS_REACHED: - return sys.UTF16BytesToString(bb.Bytes()) + // wcharBufferUsed indicates the size used internally to render the message. When called with nil buffer + // EvtFormatMessage returns ERROR_INSUFFICIENT_BUFFER, but otherwise succeeds copying only up to + // wcharBufferSize to our buffer, truncating the message if our buffer was too small. + if wcharBufferUsed <= wcharBufferSize { + return sys.UTF16BytesToString(bb.Bytes()) + } + fallthrough case windows.ERROR_INSUFFICIENT_BUFFER: - bb.Reserve(int(bufferNeeded * 2)) - bufferSize = bufferNeeded + bb.Reserve(int(wcharBufferUsed * 2)) + wcharBufferSize = wcharBufferUsed default: return "", fmt.Errorf("failed in EvtFormatMessage: %w", err) } - err = _EvtFormatMessage(metadataHandle, eventHandle, messageID, valuesCount, valuesPtr, messageFlag, bufferSize, bb.PtrAt(0), &bufferNeeded) + err = _EvtFormatMessage(metadataHandle, eventHandle, messageID, valuesCount, valuesPtr, messageFlag, wcharBufferSize, bb.PtrAt(0), &wcharBufferUsed) switch err { //nolint:errorlint // This is an errno or nil. case nil: // OK diff --git a/winlogbeat/sys/wineventlog/syscall_windows.go b/winlogbeat/sys/wineventlog/syscall_windows.go index 6e03a1969cf..2dde1329e6b 100644 --- a/winlogbeat/sys/wineventlog/syscall_windows.go +++ b/winlogbeat/sys/wineventlog/syscall_windows.go @@ -442,11 +442,16 @@ func (v EvtVariant) Data(buf []byte) (interface{}, error) { switch typ { case EvtVarTypeNull: return nil, nil - case EvtVarTypeString: + case EvtVarTypeString, EvtVarTypeEvtXml: addr := unsafe.Pointer(&buf[0]) offset := v.ValueAsUintPtr() - uintptr(addr) s, err := sys.UTF16BytesToString(buf[offset:]) return s, err + case EvtVarTypeAnsiString: + addr := unsafe.Pointer(&buf[0]) + offset := v.ValueAsUintPtr() - uintptr(addr) + s, err := sys.ANSIBytesToString(buf[offset:]) + return s, err case EvtVarTypeSByte: return int8(v.ValueAsUint8()), nil case EvtVarTypeByte: @@ -476,15 +481,28 @@ func (v EvtVariant) Data(buf []byte) (interface{}, error) { return false, nil } return true, nil + case EvtVarTypeBinary: + addr := unsafe.Pointer(&buf[0]) + offset := v.ValueAsUintPtr() - uintptr(addr) + return sys.BinaryToString(buf[offset:]), nil case EvtVarTypeGuid: addr := unsafe.Pointer(&buf[0]) offset := v.ValueAsUintPtr() - uintptr(addr) guid := (*windows.GUID)(unsafe.Pointer(&buf[offset])) copy := *guid return copy, nil + case EvtVarTypeSizeT: + return v.ValueAsUintPtr(), nil case EvtVarTypeFileTime: ft := (*windows.Filetime)(unsafe.Pointer(&v.Value)) return time.Unix(0, ft.Nanoseconds()).UTC(), nil + case EvtVarTypeSysTime: + st := (*windows.Systemtime)(unsafe.Pointer(&v.Value)) + var ft windows.Filetime + if err := sys.SystemTimeToFileTime(st, &ft); err != nil { + return nil, err + } + return time.Unix(0, ft.Nanoseconds()).UTC(), nil case EvtVarTypeSid: addr := unsafe.Pointer(&buf[0]) offset := v.ValueAsUintPtr() - uintptr(addr) diff --git a/winlogbeat/sys/wineventlog/wineventlog_windows.go b/winlogbeat/sys/wineventlog/wineventlog_windows.go index 22495f6bda2..66ab869fb24 100644 --- a/winlogbeat/sys/wineventlog/wineventlog_windows.go +++ b/winlogbeat/sys/wineventlog/wineventlog_windows.go @@ -403,35 +403,35 @@ func FormatEventString( } var bufferPtr *byte - if renderBuf != nil { + if len(renderBuf) > 0 { bufferPtr = &renderBuf[0] } // EvtFormatMessage operates with WCHAR buffer, assuming the size of the buffer in characters. // https://docs.microsoft.com/en-us/windows/win32/api/winevt/nf-winevt-evtformatmessage - var bufferNeeded uint32 - bufferSize := uint32(len(renderBuf) / 2) + var wcharBufferUsed uint32 + wcharBufferSize := uint32(len(renderBuf) / 2) - err := _EvtFormatMessage(ph, eventHandle, 0, 0, nil, messageFlag, bufferSize, bufferPtr, &bufferNeeded) - if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // This is an errno. + err := _EvtFormatMessage(ph, eventHandle, 0, 0, nil, messageFlag, wcharBufferSize, bufferPtr, &wcharBufferUsed) + if err != nil && !errors.Is(err, windows.ERROR_INSUFFICIENT_BUFFER) { return fmt.Errorf("failed in EvtFormatMessage: %w", err) } else if err == nil { - // Windows API returns a null terminated WCHAR C-style string in the buffer. bufferNeeded applies - // only when ERROR_INSUFFICIENT_BUFFER is returned. Luckily the UTF16ToUTF8Bytes/UTF16ToString - // functions stop at null termination. Note, as signaled in a comment at the end of this function, - // this behavior is bad for EvtFormatMessageKeyword as then the API returns a list of null terminated - // strings in the buffer (it's fine for now as we don't use this parameter value). - return common.UTF16ToUTF8Bytes(renderBuf, out) + // wcharBufferUsed indicates the size used internally to render the message. When called with nil buffer + // EvtFormatMessage returns ERROR_INSUFFICIENT_BUFFER, but otherwise succeeds copying only up to + // wcharBufferSize to our buffer, truncating the message if our buffer was too small. + if wcharBufferUsed <= wcharBufferSize { + return common.UTF16ToUTF8Bytes(renderBuf[:wcharBufferUsed*2], out) + } } // Get a buffer from the pool and adjust its length. bb := sys.NewPooledByteBuffer() defer bb.Free() - bb.Reserve(int(bufferNeeded * 2)) - bufferSize = bufferNeeded + bb.Reserve(int(wcharBufferUsed * 2)) + wcharBufferSize = wcharBufferUsed - err = _EvtFormatMessage(ph, eventHandle, 0, 0, nil, messageFlag, bufferSize, bb.PtrAt(0), &bufferNeeded) + err = _EvtFormatMessage(ph, eventHandle, 0, 0, nil, messageFlag, wcharBufferSize, bb.PtrAt(0), &wcharBufferUsed) if err != nil { return fmt.Errorf("failed in EvtFormatMessage: %w", err) } @@ -550,20 +550,36 @@ func evtRenderProviderName(renderBuf []byte, eventHandle EvtHandle) (string, err } func renderXML(eventHandle EvtHandle, flag EvtRenderFlag, renderBuf []byte, out io.Writer) error { - var bufferUsed, propertyCount uint32 - err := _EvtRender(0, eventHandle, flag, uint32(len(renderBuf)), - &renderBuf[0], &bufferUsed, &propertyCount) - if err == ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // This is an errno or nil. - return sys.InsufficientBufferError{Cause: err, RequiredSize: int(bufferUsed)} + var bufferUsed, bufferSize, propertyCount uint32 + var bufferPtr *byte + + bufferSize = uint32(len(renderBuf)) + if bufferSize > 0 { + bufferPtr = &renderBuf[0] } - if err != nil { + err := _EvtRender(0, eventHandle, flag, bufferSize, bufferPtr, &bufferUsed, &propertyCount) + if err != nil && !errors.Is(err, windows.ERROR_INSUFFICIENT_BUFFER) { return err + } else if err == nil { + // bufferUsed indicates the size used internally to render the message. When called with nil buffer + // EvtRender returns ERROR_INSUFFICIENT_BUFFER, but otherwise succeeds copying only up to + // bufferSize to our buffer, truncating the message if our buffer was too small. + if bufferUsed <= bufferSize { + return common.UTF16ToUTF8Bytes(renderBuf[:bufferUsed], out) + } } - if int(bufferUsed) > len(renderBuf) { - return fmt.Errorf("Windows EvtRender reported that wrote %d bytes "+ - "to the buffer, but the buffer can only hold %d bytes", - bufferUsed, len(renderBuf)) + // Get a buffer from the pool and adjust its length. + bb := sys.NewPooledByteBuffer() + defer bb.Free() + + bb.Reserve(int(bufferUsed)) + bufferSize = bufferUsed + + err = _EvtRender(0, eventHandle, flag, bufferSize, bb.PtrAt(0), &bufferUsed, &propertyCount) + if err != nil { + return fmt.Errorf("failed in EvtRender: %w", err) } - return common.UTF16ToUTF8Bytes(renderBuf[:bufferUsed], out) + + return common.UTF16ToUTF8Bytes(bb.Bytes(), out) } diff --git a/winlogbeat/sys/zsyscall_windows.go b/winlogbeat/sys/zsyscall_windows.go new file mode 100644 index 00000000000..726140ed126 --- /dev/null +++ b/winlogbeat/sys/zsyscall_windows.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package sys + +import ( + "fmt" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + modkernel = windows.NewLazySystemDLL("Kernel32.dll") + + procSystemTimeToFileTime = modkernel.NewProc("SystemTimeToFileTime") +) + +func SystemTimeToFileTime(systemTime *windows.Systemtime, fileTime *windows.Filetime) error { + r1, _, err := syscall.SyscallN(procSystemTimeToFileTime.Addr(), uintptr(unsafe.Pointer(systemTime)), uintptr(unsafe.Pointer(fileTime))) + if r1 == 0 { + return fmt.Errorf("error converting system time to file time: %w", err) + } + return nil +} diff --git a/winlogbeat/sys/zsyscall_windows_test.go b/winlogbeat/sys/zsyscall_windows_test.go new file mode 100644 index 00000000000..b7bcbe0009a --- /dev/null +++ b/winlogbeat/sys/zsyscall_windows_test.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package sys + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "golang.org/x/sys/windows" +) + +func TestSystemTimeToFileTime(t *testing.T) { + ts := time.Date( + 2024, time.Month(9), 3, + 0, 0, 0, 0, time.UTC).UnixNano() + st := windows.Systemtime{ + Year: 2024, + Month: 9, + Day: 3, + } + var ft windows.Filetime + if err := SystemTimeToFileTime(&st, &ft); err != nil { + t.Fatal(err) + } + assert.Equal(t, ts, ft.Nanoseconds()) +} diff --git a/x-pack/agentbeat/main_test.go b/x-pack/agentbeat/main_test.go index 4201d651666..0333a1ebcdf 100644 --- a/x-pack/agentbeat/main_test.go +++ b/x-pack/agentbeat/main_test.go @@ -10,6 +10,8 @@ import ( "testing" "github.com/spf13/cobra" + + "github.com/elastic/beats/v7/libbeat/cfgfile" ) var ( @@ -22,11 +24,14 @@ func init() { systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") abCommand = AgentBeat() abCommand.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") abCommand.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(t *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { if err := abCommand.Execute(); err != nil { os.Exit(1) diff --git a/x-pack/auditbeat/main_test.go b/x-pack/auditbeat/main_test.go index 5d237645271..3456231f516 100644 --- a/x-pack/auditbeat/main_test.go +++ b/x-pack/auditbeat/main_test.go @@ -10,6 +10,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" "github.com/elastic/beats/v7/x-pack/auditbeat/cmd" ) @@ -21,11 +22,14 @@ func init() { systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(*testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go b/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go index a4646b6b668..93fed7096b3 100644 --- a/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go +++ b/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go @@ -26,8 +26,10 @@ import ( ) const ( - processorName = "add_session_metadata" - logName = "processor." + processorName + processorName = "add_session_metadata" + logName = "processor." + processorName + procfsType = "procfs" + kernelTracingType = "kernel_tracing" ) // InitializeModule initializes this module. @@ -36,13 +38,14 @@ func InitializeModule() { } type addSessionMetadata struct { - ctx context.Context - cancel context.CancelFunc - config config - logger *logp.Logger - db *processdb.DB - provider provider.Provider - backend string + ctx context.Context + cancel context.CancelFunc + config config + logger *logp.Logger + db *processdb.DB + provider provider.Provider + backend string + providerType string } func New(cfg *cfg.C) (beat.Processor, error) { @@ -61,51 +64,56 @@ func New(cfg *cfg.C) (beat.Processor, error) { return nil, fmt.Errorf("failed to create DB: %w", err) } - if c.Backend != "kernel_tracing" { - backfilledPIDs := db.ScrapeProcfs() - logger.Infof("backfilled %d processes", len(backfilledPIDs)) - } - var p provider.Provider + var pType string switch c.Backend { case "auto": p, err = kerneltracingprovider.NewProvider(ctx, logger) if err != nil { // Most likely cause of error is not supporting ebpf or kprobes on system, try procfs + backfilledPIDs := db.ScrapeProcfs() + logger.Infof("backfilled %d processes", len(backfilledPIDs)) p, err = procfsprovider.NewProvider(ctx, logger, db, reader, c.PIDField) if err != nil { cancel() return nil, fmt.Errorf("failed to create provider: %w", err) } logger.Info("backend=auto using procfs") + pType = procfsType } else { logger.Info("backend=auto using kernel_tracing") + pType = kernelTracingType } case "procfs": + backfilledPIDs := db.ScrapeProcfs() + logger.Infof("backfilled %d processes", len(backfilledPIDs)) p, err = procfsprovider.NewProvider(ctx, logger, db, reader, c.PIDField) if err != nil { cancel() return nil, fmt.Errorf("failed to create procfs provider: %w", err) } + pType = procfsType case "kernel_tracing": p, err = kerneltracingprovider.NewProvider(ctx, logger) if err != nil { cancel() return nil, fmt.Errorf("failed to create kernel_tracing provider: %w", err) } + pType = kernelTracingType default: cancel() return nil, fmt.Errorf("unknown backend configuration") } return &addSessionMetadata{ - ctx: ctx, - cancel: cancel, - config: c, - logger: logger, - db: db, - provider: p, - backend: c.Backend, + ctx: ctx, + cancel: cancel, + config: c, + logger: logger, + db: db, + provider: p, + backend: c.Backend, + providerType: pType, }, nil } @@ -161,12 +169,12 @@ func (p *addSessionMetadata) enrich(ev *beat.Event) (*beat.Event, error) { } var fullProcess types.Process - if p.backend == "kernel_tracing" { + if p.providerType == kernelTracingType { // kernel_tracing doesn't enrich with the processor DB; process info is taken directly from quark cache proc, err := p.provider.GetProcess(pid) if err != nil { e := fmt.Errorf("pid %v not found in db: %w", pid, err) - p.logger.Warnw("PID not found in provider", "pid", pid, "error", err) + p.logger.Debugw("PID not found in provider", "pid", pid, "error", err) return nil, e } fullProcess = *proc @@ -174,7 +182,7 @@ func (p *addSessionMetadata) enrich(ev *beat.Event) (*beat.Event, error) { fullProcess, err = p.db.GetProcess(pid) if err != nil { e := fmt.Errorf("pid %v not found in db: %w", pid, err) - p.logger.Warnw("PID not found in provider", "pid", pid, "error", err) + p.logger.Debugw("PID not found in provider", "pid", pid, "error", err) return nil, e } } diff --git a/x-pack/auditbeat/processors/sessionmd/processdb/db.go b/x-pack/auditbeat/processors/sessionmd/processdb/db.go index e18c247a859..1f97f7d0fd5 100644 --- a/x-pack/auditbeat/processors/sessionmd/processdb/db.go +++ b/x-pack/auditbeat/processors/sessionmd/processdb/db.go @@ -421,7 +421,7 @@ func (db *DB) InsertExit(exit types.ProcessExitEvent) { pid := exit.PIDs.Tgid process, ok := db.processes[pid] if !ok { - db.logger.Errorf("could not insert exit, pid %v not found in db", pid) + db.logger.Debugf("could not insert exit, pid %v not found in db", pid) return } process.ExitCode = exit.ExitCode diff --git a/x-pack/auditbeat/processors/sessionmd/procfs/procfs.go b/x-pack/auditbeat/processors/sessionmd/procfs/procfs.go index b76dfdfdb48..992e2485836 100644 --- a/x-pack/auditbeat/processors/sessionmd/procfs/procfs.go +++ b/x-pack/auditbeat/processors/sessionmd/procfs/procfs.go @@ -196,7 +196,7 @@ func (r ProcfsReader) GetAllProcesses() ([]ProcessInfo, error) { for _, proc := range procs { process_info, err := r.getProcessInfo(proc) if err != nil { - r.logger.Warnf("failed to read process info for %v", proc.PID) + r.logger.Debugf("failed to read process info for %v", proc.PID) } ret = append(ret, process_info) } diff --git a/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_linux.go b/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_linux.go index 506d840b5ef..d3ec4ba7bd3 100644 --- a/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_linux.go +++ b/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_linux.go @@ -211,7 +211,7 @@ func (p *prvdr) Sync(_ *beat.Event, pid uint32) error { func (p *prvdr) handleBackoff(now time.Time) { if p.inBackoff { if now.Sub(p.backoffStart) > backoffDuration { - p.logger.Warnw("ended backoff, skipped processes", "backoffSkipped", p.backoffSkipped) + p.logger.Infow("ended backoff, skipped processes", "backoffSkipped", p.backoffSkipped) p.inBackoff = false p.combinedWait = 0 * time.Millisecond } else { @@ -220,7 +220,7 @@ func (p *prvdr) handleBackoff(now time.Time) { } } else { if p.combinedWait > combinedWaitLimit { - p.logger.Warn("starting backoff") + p.logger.Info("starting backoff") p.inBackoff = true p.backoffStart = now p.backoffSkipped = 0 diff --git a/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider.go b/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider.go index e29e70a0549..34c3166f26f 100644 --- a/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider.go +++ b/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider.go @@ -68,7 +68,7 @@ func (p prvdr) Sync(ev *beat.Event, pid uint32) error { pe.Env = procInfo.Env pe.Filename = procInfo.Filename } else { - p.logger.Warnw("couldn't get process info from proc for pid", "pid", pid, "error", err) + p.logger.Debugw("couldn't get process info from proc for pid", "pid", pid, "error", err) // If process info couldn't be taken from procfs, populate with as much info as // possible from the event pe.PIDs.Tgid = pid diff --git a/x-pack/filebeat/docs/inputs/input-entity-analytics.asciidoc b/x-pack/filebeat/docs/inputs/input-entity-analytics.asciidoc index 5ca419acd3e..b4b701d3919 100644 --- a/x-pack/filebeat/docs/inputs/input-entity-analytics.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-entity-analytics.asciidoc @@ -959,6 +959,7 @@ Example configuration: id: okta-1 provider: okta dataset: "all" + enrich_with: ["groups", "roles"] sync_interval: "12h" update_interval: "30m" okta_domain: "OKTA_DOMAIN" @@ -992,6 +993,13 @@ or may be left empty for the default behavior which is to collect all entities. When the `dataset` is set to "devices", some user entity data is collected in order to populate the registered users and registered owner fields for each device. +[float] +===== `enrich_with` + +The metadata to enrich users with. This is an array of values that may contain +"groups", "roles" and "factors", or "none". If the array only contains "none", no +metadata is collected for users. The default behavior is to collect "groups". + [float] ===== `sync_interval` diff --git a/x-pack/filebeat/docs/inputs/input-etw.asciidoc b/x-pack/filebeat/docs/inputs/input-etw.asciidoc index c072542cf5a..dcfd4732c26 100644 --- a/x-pack/filebeat/docs/inputs/input-etw.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-etw.asciidoc @@ -9,8 +9,6 @@ ETW ++++ -beta[] - https://learn.microsoft.com/en-us/windows/win32/etw/event-tracing-portal[Event Tracing for Windows] is a powerful logging and tracing mechanism built into the Windows operating system. It provides a detailed view of application and system diff --git a/x-pack/filebeat/docs/inputs/input-http-endpoint.asciidoc b/x-pack/filebeat/docs/inputs/input-http-endpoint.asciidoc index 0ca54ab4567..afd39fec0f1 100644 --- a/x-pack/filebeat/docs/inputs/input-http-endpoint.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-http-endpoint.asciidoc @@ -332,7 +332,7 @@ For example, `["content-type"]` will become `["Content-Type"]` when the filebeat [float] ==== `preserve_original_event` -This option copies the raw unmodified body of the incoming request to the event.original field as a string before sending the event to Elasticsearch. +This option includes the JSON representation of the incoming request in the `event.original` field as a string before sending the event to Elasticsearch. The representation may not be a verbatim copy of the original message, but is guaranteed to be an [RFC7493](https://datatracker.ietf.org/doc/html/rfc7493) compliant message. [float] ==== `crc.provider` diff --git a/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc b/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc index 21766a515a8..a7dd5d7634f 100644 --- a/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc @@ -783,6 +783,8 @@ Valid when used with `type: map`. When not empty, defines a new field where the If set to true, empty or missing value will be ignored and processing will pass on to the next nested split operation instead of failing with an error. Default: `false`. +Note that if `ignore_empty_value` is `true` and the final result is empty, no event will be published, and no cursor update will be made. If a cursor update must be made for all responses, this should be set to `false` and the ingest pipeline must be configured to tolerate empty event sets. + [float] ==== `response.split[].split` @@ -1556,6 +1558,8 @@ See <> . Cursor is a list of key value objects where arbitrary values are defined. The values are interpreted as <> and a default template can be set. Cursor state is kept between input restarts and updated once all the events for a request are published. +If no event is published, no cursor update is made. This can have implications on how cursor updates should be performed when the target API returns empty response sets. + Each cursor entry is formed by: - A `value` template, which will define the value to store when evaluated. diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index c00099c3667..5e636901565 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -21,18 +21,7 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: - # Force using journald to collect system logs - #var.use_journald: true|false - - # Force using log files to collect system logs - #var.use_files: true|false - - # If use_journald and use_files are false, then - # Filebeat will autodetect whether use to journald - # to collect system logs. - - # Input configuration (advanced). - # Any input configuration option + # Input configuration (advanced). Any input configuration option # can be added under this section. #input: @@ -44,23 +33,6 @@ filebeat.modules: # Filebeat will choose the paths depending on your OS. #var.paths: - # Force using journald to collect system logs - #var.use_journald: true|false - - # Force using log files to collect system logs - #var.use_files: true|false - - # If use_journald and use_files are false, then - # Filebeat will autodetect whether use to journald - # to collect system logs. - - # A list of tags to include in events. Including 'forwarded' - # indicates that the events did not originate on this host and - # causes host.name to not be added to events. Include - # 'preserve_orginal_event' causes the pipeline to retain the raw log - # in event.original. Defaults to []. - #var.tags: [] - # Input configuration (advanced). Any input configuration option # can be added under this section. #input: diff --git a/x-pack/filebeat/input/awss3/config.go b/x-pack/filebeat/input/awss3/config.go index d80108590ce..6f485431ddf 100644 --- a/x-pack/filebeat/input/awss3/config.go +++ b/x-pack/filebeat/input/awss3/config.go @@ -7,6 +7,7 @@ package awss3 import ( "errors" "fmt" + "net/url" "time" awssdk "github.com/aws/aws-sdk-go-v2/aws" @@ -106,6 +107,13 @@ func (c *config) Validate() error { if c.ProviderOverride != "" && c.NonAWSBucketName == "" { return errors.New("provider can only be overridden when polling non-AWS S3 services") } + if c.AWSConfig.Endpoint != "" { + // Make sure the given endpoint can be parsed + _, err := url.Parse(c.AWSConfig.Endpoint) + if err != nil { + return fmt.Errorf("failed to parse endpoint: %w", err) + } + } if c.BackupConfig.NonAWSBackupToBucketName != "" && c.NonAWSBucketName == "" { return errors.New("backup to non-AWS bucket can only be used for non-AWS sources") } @@ -245,14 +253,18 @@ func (c config) getBucketARN() string { // options struct. // Should be provided as a parameter to s3.NewFromConfig. func (c config) s3ConfigModifier(o *s3.Options) { - if c.NonAWSBucketName != "" { - //nolint:staticcheck // haven't migrated to the new interface yet - o.EndpointResolver = nonAWSBucketResolver{endpoint: c.AWSConfig.Endpoint} - } - if c.AWSConfig.FIPSEnabled { o.EndpointOptions.UseFIPSEndpoint = awssdk.FIPSEndpointStateEnabled } + // Apply slightly different endpoint resolvers depending on whether we're in S3 or SQS mode. + if c.AWSConfig.Endpoint != "" { + //nolint:staticcheck // haven't migrated to the new interface yet + o.EndpointResolver = s3.EndpointResolverFromURL(c.AWSConfig.Endpoint, + func(e *awssdk.Endpoint) { + // The S3 hostname is immutable in bucket polling mode, mutable otherwise. + e.HostnameImmutable = (c.getBucketARN() != "") + }) + } o.UsePathStyle = c.PathStyle o.Retryer = retry.NewStandard(func(so *retry.StandardOptions) { @@ -269,6 +281,9 @@ func (c config) sqsConfigModifier(o *sqs.Options) { if c.AWSConfig.FIPSEnabled { o.EndpointOptions.UseFIPSEndpoint = awssdk.FIPSEndpointStateEnabled } + if c.AWSConfig.Endpoint != "" { + o.EndpointResolver = sqs.EndpointResolverFromURL(c.AWSConfig.Endpoint) + } } func (c config) getFileSelectors() []fileSelectorConfig { diff --git a/x-pack/filebeat/input/awss3/input.go b/x-pack/filebeat/input/awss3/input.go index f0fa3137974..6d62f454c42 100644 --- a/x-pack/filebeat/input/awss3/input.go +++ b/x-pack/filebeat/input/awss3/input.go @@ -7,8 +7,6 @@ package awss3 import ( "fmt" - awssdk "github.com/aws/aws-sdk-go-v2/aws" - "github.com/elastic/beats/v7/filebeat/beater" v2 "github.com/elastic/beats/v7/filebeat/input/v2" "github.com/elastic/beats/v7/libbeat/feature" @@ -48,15 +46,10 @@ func (im *s3InputManager) Create(cfg *conf.C) (v2.Input, error) { return nil, fmt.Errorf("initializing AWS config: %w", err) } - if config.AWSConfig.Endpoint != "" { - // Add a custom endpointResolver to the awsConfig so that all the requests are routed to this endpoint - awsConfig.EndpointResolverWithOptions = awssdk.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (awssdk.Endpoint, error) { - return awssdk.Endpoint{ - PartitionID: "aws", - URL: config.AWSConfig.Endpoint, - SigningRegion: awsConfig.Region, - }, nil - }) + // The awsConfig now contains the region from the credential profile or default region + // if the region is explicitly set in the config, then it wins + if config.RegionName != "" { + awsConfig.Region = config.RegionName } if config.QueueURL != "" { diff --git a/x-pack/filebeat/input/awss3/input_integration_test.go b/x-pack/filebeat/input/awss3/input_integration_test.go index 9303c5c7259..cf47f7b9230 100644 --- a/x-pack/filebeat/input/awss3/input_integration_test.go +++ b/x-pack/filebeat/input/awss3/input_integration_test.go @@ -269,6 +269,174 @@ func TestInputRunSQSOnLocalstack(t *testing.T) { assert.EqualValues(t, 0.0, s3Input.metrics.sqsWorkerUtilization.Get()) // Workers are reset after processing and hence utilization should be 0 at the end } +func TestInputRunSQSWithConfig(t *testing.T) { + tests := []struct { + name string + queue_url string + endpoint string + region string + default_region string + want string + wantErr error + }{ + { + name: "no region", + queue_url: "https://sqs.us-east-1.amazonaws.com/627959692251/test-s3-logs", + want: "us-east-1", + }, + { + name: "no region but with long endpoint", + queue_url: "https://sqs.us-east-1.abc.xyz/627959692251/test-s3-logs", + endpoint: "https://s3.us-east-1.abc.xyz", + want: "us-east-1", + }, + { + name: "no region but with short endpoint", + queue_url: "https://sqs.us-east-1.abc.xyz/627959692251/test-s3-logs", + endpoint: "https://abc.xyz", + want: "us-east-1", + }, + { + name: "no region custom queue domain", + queue_url: "https://sqs.us-east-1.xyz.abc/627959692251/test-s3-logs", + wantErr: errBadQueueURL, + }, + { + name: "region", + queue_url: "https://sqs.us-east-1.amazonaws.com/627959692251/test-s3-logs", + region: "us-west-2", + want: "us-west-2", + }, + { + name: "default_region", + queue_url: "https://sqs.us-east-1.amazonaws.com/627959692251/test-s3-logs", + default_region: "us-west-2", + want: "us-west-2", + }, + { + name: "region and default_region", + queue_url: "https://sqs.us-east-1.amazonaws.com/627959692251/test-s3-logs", + region: "us-east-2", + default_region: "us-east-3", + want: "us-east-2", + }, + { + name: "short_endpoint", + queue_url: "https://sqs.us-east-1.amazonaws.com/627959692251/test-s3-logs", + endpoint: "https://amazonaws.com", + want: "us-east-1", + }, + { + name: "long_endpoint", + queue_url: "https://sqs.us-east-1.amazonaws.com/627959692251/test-s3-logs", + endpoint: "https://s3.us-east-1.amazonaws.com", + want: "us-east-1", + }, + { + name: "region and custom short_endpoint", + queue_url: "https://sqs.us-east-1.amazonaws.com/627959692251/test-s3-logs", + region: "us-west-2", + endpoint: "https://.elastic.co", + want: "us-west-2", + }, + { + name: "region and custom long_endpoint", + queue_url: "https://sqs.us-east-1.amazonaws.com/627959692251/test-s3-logs", + region: "us-west-2", + endpoint: "https://s3.us-east-1.elastic.co", + want: "us-west-2", + }, + { + name: "region and short_endpoint", + queue_url: "https://sqs.us-east-1.amazonaws.com/627959692251/test-s3-logs", + region: "us-west-2", + endpoint: "https://amazonaws.com", + want: "us-west-2", + }, + { + name: "region and long_endpoint", + queue_url: "https://sqs.us-east-1.amazonaws.com/627959692251/test-s3-logs", + region: "us-west-2", + endpoint: "https://s3.us-east-1.amazonaws.com", + want: "us-west-2", + }, + { + name: "region and default region and short_endpoint", + queue_url: "https://sqs.us-east-1.amazonaws.com/627959692251/test-s3-logs", + region: "us-west-2", + default_region: "us-east-1", + endpoint: "https://amazonaws.com", + want: "us-west-2", + }, + { + name: "region and default region and long_endpoint", + queue_url: "https://sqs.us-east-1.amazonaws.com/627959692251/test-s3-logs", + region: "us-west-2", + default_region: "us-east-1", + endpoint: "https://s3.us-east-1.amazonaws.com", + want: "us-west-2", + }, + } + + for _, test := range tests { + logp.TestingSetup() + + // Create a filebeat config using the provided test parameters + config := "" + if test.queue_url != "" { + config += fmt.Sprintf("queue_url: %s \n", test.queue_url) + } + if test.region != "" { + config += fmt.Sprintf("region: %s \n", test.region) + } + if test.default_region != "" { + config += fmt.Sprintf("default_region: %s \n", test.default_region) + } + if test.endpoint != "" { + config += fmt.Sprintf("endpoint: %s \n", test.endpoint) + } + + s3Input := createInput(t, conf.MustNewConfigFrom(config)) + + inputCtx, cancel := newV2Context() + t.Cleanup(cancel) + time.AfterFunc(5*time.Second, func() { + cancel() + }) + + var errGroup errgroup.Group + errGroup.Go(func() error { + return s3Input.Run(inputCtx, &fakePipeline{}) + }) + + if err := errGroup.Wait(); err != nil { + // assert that err == test.wantErr + if test.wantErr != nil { + continue + } + // Print the test name to help identify the failing test + t.Fatal(test.name, err) + } + + // If the endpoint starts with s3, the endpoint resolver should be null at this point + // If the endpoint does not start with s3, the endpointresolverwithoptions should be set + // If the endpoint is not set, the endpoint resolver should be null + if test.endpoint == "" { + assert.Nil(t, s3Input.awsConfig.EndpointResolver, test.name) + assert.Nil(t, s3Input.awsConfig.EndpointResolverWithOptions, test.name) + } else if strings.HasPrefix(test.endpoint, "https://s3") { + // S3 resolvers are added later in the code than this integration test covers + assert.Nil(t, s3Input.awsConfig.EndpointResolver, test.name) + assert.Nil(t, s3Input.awsConfig.EndpointResolverWithOptions, test.name) + } else { // If the endpoint is specified but is not s3 + assert.Nil(t, s3Input.awsConfig.EndpointResolver, test.name) + assert.NotNil(t, s3Input.awsConfig.EndpointResolverWithOptions, test.name) + } + + assert.EqualValues(t, test.want, s3Input.awsConfig.Region, test.name) + } +} + func TestInputRunSQS(t *testing.T) { logp.TestingSetup() diff --git a/x-pack/filebeat/input/awss3/input_test.go b/x-pack/filebeat/input/awss3/input_test.go index 432bd360bfc..4a2160e5800 100644 --- a/x-pack/filebeat/input/awss3/input_test.go +++ b/x-pack/filebeat/input/awss3/input_test.go @@ -88,8 +88,20 @@ func TestRegionSelection(t *testing.T) { want: "us-west-3", }, { - name: "abc.xyz_and_domain_with_blank_endpoint", + name: "abc.xyz_and_domain_with_matching_endpoint_and_scheme", queueURL: "https://sqs.us-east-1.abc.xyz/627959692251/test-s3-logs", + endpoint: "https://abc.xyz", + want: "us-east-1", + }, + { + name: "abc.xyz_and_domain_with_matching_url_endpoint", + queueURL: "https://sqs.us-east-1.abc.xyz/627959692251/test-s3-logs", + endpoint: "https://s3.us-east-1.abc.xyz", + want: "us-east-1", + }, + { + name: "abc.xyz_and_no_region_term", + queueURL: "https://sqs.abc.xyz/627959692251/test-s3-logs", wantErr: errBadQueueURL, }, { @@ -118,7 +130,7 @@ func TestRegionSelection(t *testing.T) { { name: "non_aws_vpce_without_endpoint", queueURL: "https://vpce-test.sqs.us-east-1.vpce.abc.xyz/12345678912/sqs-queue", - wantErr: errBadQueueURL, + want: "us-east-1", }, { name: "non_aws_vpce_with_region_override", diff --git a/x-pack/filebeat/input/awss3/s3.go b/x-pack/filebeat/input/awss3/s3.go index 9901d5fe41d..a4865022850 100644 --- a/x-pack/filebeat/input/awss3/s3.go +++ b/x-pack/filebeat/input/awss3/s3.go @@ -110,12 +110,3 @@ func getProviderFromDomain(endpoint string, ProviderOverride string) string { } return "unknown" } - -type nonAWSBucketResolver struct { - endpoint string -} - -func (n nonAWSBucketResolver) ResolveEndpoint(region string, options s3.EndpointResolverOptions) (awssdk.Endpoint, error) { - //nolint:staticcheck // haven't migrated to the new interface yet - return awssdk.Endpoint{URL: n.endpoint, SigningRegion: region, HostnameImmutable: true, Source: awssdk.EndpointSourceCustom}, nil -} diff --git a/x-pack/filebeat/input/awss3/sqs.go b/x-pack/filebeat/input/awss3/sqs.go index 36985f73720..b41468d2ac9 100644 --- a/x-pack/filebeat/input/awss3/sqs.go +++ b/x-pack/filebeat/input/awss3/sqs.go @@ -31,29 +31,30 @@ const ( var errBadQueueURL = errors.New("QueueURL is not in format: https://sqs.{REGION_ENDPOINT}.{ENDPOINT}/{ACCOUNT_NUMBER}/{QUEUE_NAME} or https://{VPC_ENDPOINT}.sqs.{REGION_ENDPOINT}.vpce.{ENDPOINT}/{ACCOUNT_NUMBER}/{QUEUE_NAME}") -func getRegionFromQueueURL(queueURL, endpoint string) string { +func getRegionFromQueueURL(queueURL string) string { // get region from queueURL + // Example for custom domain queue: https://sqs.us-east-1.abc.xyz/12345678912/test-s3-logs // Example for sqs queue: https://sqs.us-east-1.amazonaws.com/12345678912/test-s3-logs // Example for vpce: https://vpce-test.sqs.us-east-1.vpce.amazonaws.com/12345678912/sqs-queue + // We use a simple heuristic that works for all essential cases: + // - If queue hostname is sqs.X.*, return region X + // - If queue hostname is X.sqs.Y.*, return region Y + // Hosts that don't follow this convention need the input config to + // specify a custom endpoint and an explicit region. u, err := url.Parse(queueURL) if err != nil { return "" } + hostSplit := strings.SplitN(u.Hostname(), ".", 5) - // check for sqs queue url - host := strings.SplitN(u.Host, ".", 3) - if len(host) == 3 && host[0] == "sqs" { - if host[2] == endpoint || (endpoint == "" && strings.HasPrefix(host[2], "amazonaws.")) { - return host[1] - } + // check for sqs-style queue url + if len(hostSplit) >= 4 && hostSplit[0] == "sqs" { + return hostSplit[1] } - // check for vpce url - host = strings.SplitN(u.Host, ".", 5) - if len(host) == 5 && host[1] == "sqs" { - if host[4] == endpoint || (endpoint == "" && strings.HasPrefix(host[4], "amazonaws.")) { - return host[2] - } + // check for vpce-style url + if len(hostSplit) == 5 && hostSplit[1] == "sqs" { + return hostSplit[2] } return "" diff --git a/x-pack/filebeat/input/awss3/sqs_input.go b/x-pack/filebeat/input/awss3/sqs_input.go index a4308af45a8..596586c7569 100644 --- a/x-pack/filebeat/input/awss3/sqs_input.go +++ b/x-pack/filebeat/input/awss3/sqs_input.go @@ -88,14 +88,19 @@ func (in *sqsReaderInput) setup( in.log = inputContext.Logger.With("queue_url", in.config.QueueURL) in.pipeline = pipeline - in.detectedRegion = getRegionFromQueueURL(in.config.QueueURL, in.config.AWSConfig.Endpoint) + in.detectedRegion = getRegionFromQueueURL(in.config.QueueURL) if in.config.RegionName != "" { + // Configured region always takes precedence in.awsConfig.Region = in.config.RegionName } else if in.detectedRegion != "" { + // Only use detected region if there is no explicit region configured. in.awsConfig.Region = in.detectedRegion + } else if in.config.AWSConfig.DefaultRegion != "" { + // If we can't find anything else, fall back on the default. + in.awsConfig.Region = in.config.AWSConfig.DefaultRegion } else { - // If we can't get a region from the config or the URL, return an error. - return fmt.Errorf("failed to get AWS region from queue_url: %w", errBadQueueURL) + // If we can't find a usable region, return an error + return fmt.Errorf("region not specified and failed to get AWS region from queue_url: %w", errBadQueueURL) } in.sqs = &awsSQSAPI{ diff --git a/x-pack/filebeat/input/cel/input_test.go b/x-pack/filebeat/input/cel/input_test.go index 1667fe7c282..9e4fe746d76 100644 --- a/x-pack/filebeat/input/cel/input_test.go +++ b/x-pack/filebeat/input/cel/input_test.go @@ -10,7 +10,6 @@ import ( "flag" "fmt" "io" - "math/rand" "net/http" "net/http/httptest" "net/url" @@ -1947,7 +1946,8 @@ func retryHandler() http.HandlerFunc { w.Write([]byte(`{"hello":"world"}`)) return } - w.WriteHeader(rand.Intn(100) + 500) + // Any 5xx except 501 will result in a retry. + w.WriteHeader(500) count++ } } diff --git a/x-pack/filebeat/input/entityanalytics/provider/activedirectory/activedirectory.go b/x-pack/filebeat/input/entityanalytics/provider/activedirectory/activedirectory.go index ab1a37cbced..c66004807b4 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/activedirectory/activedirectory.go +++ b/x-pack/filebeat/input/entityanalytics/provider/activedirectory/activedirectory.go @@ -130,6 +130,10 @@ func (p *adInput) Run(inputCtx v2.Context, store *kvstore.Store, client beat.Cli p.cfg.UserAttrs = withMandatory(p.cfg.UserAttrs, "distinguishedName", "whenChanged") p.cfg.GrpAttrs = withMandatory(p.cfg.GrpAttrs, "distinguishedName", "whenChanged") + var ( + last time.Time + err error + ) for { select { case <-inputCtx.Cancelation.Done(): @@ -137,9 +141,9 @@ func (p *adInput) Run(inputCtx v2.Context, store *kvstore.Store, client beat.Cli return inputCtx.Cancelation.Err() } return nil - case <-syncTimer.C: - start := time.Now() - if err := p.runFullSync(inputCtx, store, client); err != nil { + case start := <-syncTimer.C: + last, err = p.runFullSync(inputCtx, store, client) + if err != nil { p.logger.Errorw("Error running full sync", "error", err) p.metrics.syncError.Inc() } @@ -157,9 +161,9 @@ func (p *adInput) Run(inputCtx v2.Context, store *kvstore.Store, client beat.Cli } updateTimer.Reset(p.cfg.UpdateInterval) p.logger.Debugf("Next update expected at: %v", time.Now().Add(p.cfg.UpdateInterval)) - case <-updateTimer.C: - start := time.Now() - if err := p.runIncrementalUpdate(inputCtx, store, client); err != nil { + case start := <-updateTimer.C: + last, err = p.runIncrementalUpdate(inputCtx, store, last, client) + if err != nil { p.logger.Errorw("Error running incremental update", "error", err) p.metrics.updateError.Inc() } @@ -192,13 +196,13 @@ outer: // identities from Azure Active Directory, enrich users with group memberships, // and publishes all known users (regardless if they have been modified) to the // given beat.Client. -func (p *adInput) runFullSync(inputCtx v2.Context, store *kvstore.Store, client beat.Client) error { +func (p *adInput) runFullSync(inputCtx v2.Context, store *kvstore.Store, client beat.Client) (time.Time, error) { p.logger.Debugf("Running full sync...") p.logger.Debugf("Opening new transaction...") state, err := newStateStore(store) if err != nil { - return fmt.Errorf("unable to begin transaction: %w", err) + return time.Time{}, fmt.Errorf("unable to begin transaction: %w", err) } p.logger.Debugf("Transaction opened") defer func() { // If commit is successful, call to this close will be no-op. @@ -210,48 +214,89 @@ func (p *adInput) runFullSync(inputCtx v2.Context, store *kvstore.Store, client ctx := ctxtool.FromCanceller(inputCtx.Cancelation) p.logger.Debugf("Starting fetch...") - _, err = p.doFetchUsers(ctx, state, true) + users, err := p.doFetchUsers(ctx, state, true) if err != nil { - return err + return time.Time{}, err } - if len(state.users) != 0 { - tracker := kvstore.NewTxTracker(ctx) - - start := time.Now() - p.publishMarker(start, start, inputCtx.ID, true, client, tracker) - for _, u := range state.users { - p.publishUser(u, state, inputCtx.ID, client, tracker) + if len(users) != 0 || state.len() != 0 { + // Active Directory does not have a notion of deleted users + // beyond absence from the directory, so compare found users + // with users already known by the state store and if any + // are in the store but not returned in the previous fetch, + // mark them as deleted and publish the deletion. We do not + // have the time of the deletion, so use now. + if state.len() != 0 { + found := make(map[string]bool) + for _, u := range users { + found[u.ID] = true + } + deleted := make(map[string]*User) + now := time.Now() + state.forEach(func(u *User) { + if u.State == Deleted { + // We have already seen that this is deleted + // so we do not need to publish again. The + // user will be deleted from the store when + // the state is closed. + return + } + if found[u.ID] { + // We have the user, so we do not need to + // mark it as deleted. + return + } + // This modifies the state store's copy since u + // is a pointer held by the state store map. + u.State = Deleted + u.WhenChanged = now + deleted[u.ID] = u + }) + for _, u := range deleted { + users = append(users, u) + } + } + if len(users) != 0 { + var tracker *kvstore.TxTracker + start := time.Now() + p.publishMarker(start, start, inputCtx.ID, true, client, tracker) + tracker = kvstore.NewTxTracker(ctx) + for _, u := range users { + p.publishUser(u, state, inputCtx.ID, client, tracker) + } + end := time.Now() + p.publishMarker(end, end, inputCtx.ID, false, client, tracker) + tracker.Wait() } - - end := time.Now() - p.publishMarker(end, end, inputCtx.ID, false, client, tracker) - - tracker.Wait() } if ctx.Err() != nil { - return ctx.Err() + return time.Time{}, ctx.Err() } - state.lastSync = time.Now() + // state.whenChanged is modified by the call to doFetchUsers to be + // the latest modification time for all of the users that have been + // collected in that call. This will not include any of the deleted + // users since they were not collected. + latest := state.whenChanged + state.lastSync = latest err = state.close(true) if err != nil { - return fmt.Errorf("unable to commit state: %w", err) + return time.Time{}, fmt.Errorf("unable to commit state: %w", err) } - return nil + return latest, nil } // runIncrementalUpdate will run an incremental update. The process is similar // to full synchronization, except only users which have changed (newly // discovered, modified, or deleted) will be published. -func (p *adInput) runIncrementalUpdate(inputCtx v2.Context, store *kvstore.Store, client beat.Client) error { +func (p *adInput) runIncrementalUpdate(inputCtx v2.Context, store *kvstore.Store, last time.Time, client beat.Client) (time.Time, error) { p.logger.Debugf("Running incremental update...") state, err := newStateStore(store) if err != nil { - return fmt.Errorf("unable to begin transaction: %w", err) + return last, fmt.Errorf("unable to begin transaction: %w", err) } defer func() { // If commit is successful, call to this close will be no-op. closeErr := state.close(false) @@ -263,62 +308,37 @@ func (p *adInput) runIncrementalUpdate(inputCtx v2.Context, store *kvstore.Store ctx := ctxtool.FromCanceller(inputCtx.Cancelation) updatedUsers, err := p.doFetchUsers(ctx, state, false) if err != nil { - return err + return last, err } - var tracker *kvstore.TxTracker - if len(updatedUsers) != 0 || state.len() != 0 { - // Active Directory does not have a notion of deleted users - // beyond absence from the directory, so compare found users - // with users already known by the state store and if any - // are in the store but not returned in the previous fetch, - // mark them as deleted and publish the deletion. We do not - // have the time of the deletion, so use now. - if state.len() != 0 { - found := make(map[string]bool) - for _, u := range updatedUsers { - found[u.ID] = true - } - deleted := make(map[string]*User) - now := time.Now() - state.forEach(func(u *User) { - if u.State == Deleted || found[u.ID] { - return - } - // This modifies the state store's copy since u - // is a pointer held by the state store map. - u.State = Deleted - u.WhenChanged = now - deleted[u.ID] = u - }) - for _, u := range deleted { - updatedUsers = append(updatedUsers, u) - } - } - if len(updatedUsers) != 0 { - tracker = kvstore.NewTxTracker(ctx) - for _, u := range updatedUsers { - p.publishUser(u, state, inputCtx.ID, client, tracker) - } - tracker.Wait() + if len(updatedUsers) != 0 { + tracker := kvstore.NewTxTracker(ctx) + for _, u := range updatedUsers { + p.publishUser(u, state, inputCtx.ID, client, tracker) } + tracker.Wait() } if ctx.Err() != nil { - return ctx.Err() + return last, ctx.Err() } - state.lastUpdate = time.Now() + // state.whenChanged is modified by the call to doFetchUsers to be + // the latest modification time for all of the users that have been + // collected in that call. + latest := state.whenChanged + state.lastUpdate = latest if err = state.close(true); err != nil { - return fmt.Errorf("unable to commit state: %w", err) + return last, fmt.Errorf("unable to commit state: %w", err) } - return nil + return latest, nil } // doFetchUsers handles fetching user identities from Active Directory. If // fullSync is true, then any existing whenChanged will be ignored, forcing a -// full synchronization from Active Directory. +// full synchronization from Active Directory. The whenChanged time of state +// is modified to be the time stamp of the latest User.WhenChanged value. // Returns a set of modified users by ID. func (p *adInput) doFetchUsers(ctx context.Context, state *stateStore, fullSync bool) ([]*User, error) { var since time.Time @@ -332,31 +352,14 @@ func (p *adInput) doFetchUsers(ctx context.Context, state *stateStore, fullSync return nil, err } - var ( - users []*User - whenChanged time.Time - ) - if fullSync { - for _, u := range entries { - state.storeUser(u) - if u.WhenChanged.After(whenChanged) { - whenChanged = u.WhenChanged - } + users := make([]*User, 0, len(entries)) + for _, u := range entries { + users = append(users, state.storeUser(u)) + if u.WhenChanged.After(state.whenChanged) { + state.whenChanged = u.WhenChanged } - } else { - users = make([]*User, 0, len(entries)) - for _, u := range entries { - users = append(users, state.storeUser(u)) - if u.WhenChanged.After(whenChanged) { - whenChanged = u.WhenChanged - } - } - p.logger.Debugf("processed %d users from API", len(users)) } - if whenChanged.After(state.whenChanged) { - state.whenChanged = whenChanged - } - + p.logger.Debugf("processed %d users from API", len(users)) return users, nil } diff --git a/x-pack/filebeat/input/entityanalytics/provider/activedirectory/statestore.go b/x-pack/filebeat/input/entityanalytics/provider/activedirectory/statestore.go index 74486ebaac6..c81ece21a30 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/activedirectory/statestore.go +++ b/x-pack/filebeat/input/entityanalytics/provider/activedirectory/statestore.go @@ -170,6 +170,13 @@ func (s *stateStore) close(commit bool) (err error) { } for key, value := range s.users { + if value.State == Deleted { + err = s.tx.Delete(usersBucket, []byte(key)) + if err != nil { + return fmt.Errorf("unable to delete user %q from state: %w", key, err) + } + continue + } err = s.tx.Set(usersBucket, []byte(key), value) if err != nil { return fmt.Errorf("unable to save user %q to state: %w", key, err) diff --git a/x-pack/filebeat/input/entityanalytics/provider/azuread/authenticator/oauth2/oauth2.go b/x-pack/filebeat/input/entityanalytics/provider/azuread/authenticator/oauth2/oauth2.go index 1e134842dcc..f4c38fc909c 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/azuread/authenticator/oauth2/oauth2.go +++ b/x-pack/filebeat/input/entityanalytics/provider/azuread/authenticator/oauth2/oauth2.go @@ -74,7 +74,7 @@ func (a *oauth2) renewToken(ctx context.Context) error { reqValues := url.Values{ "client_id": []string{a.conf.ClientID}, "scope": a.conf.Scopes, - "client_secret": []string{url.QueryEscape(a.conf.Secret)}, + "client_secret": []string{a.conf.Secret}, "grant_type": []string{"client_credentials"}, } reqEncoded := reqValues.Encode() diff --git a/x-pack/filebeat/input/entityanalytics/provider/azuread/authenticator/oauth2/oauth2_test.go b/x-pack/filebeat/input/entityanalytics/provider/azuread/authenticator/oauth2/oauth2_test.go index 1ec9d7dad45..1d4da19292e 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/azuread/authenticator/oauth2/oauth2_test.go +++ b/x-pack/filebeat/input/entityanalytics/provider/azuread/authenticator/oauth2/oauth2_test.go @@ -18,7 +18,7 @@ import ( "github.com/elastic/elastic-agent-libs/logp" ) -func testSetupServer(t *testing.T, tokenValue string, expiresIn int) *httptest.Server { +func testSetupServer(t *testing.T, expectedClientSecret string, tokenValue string, expiresIn int) *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { payload := authResponse{ TokenType: "Bearer", @@ -28,6 +28,7 @@ func testSetupServer(t *testing.T, tokenValue string, expiresIn int) *httptest.S } data, err := json.Marshal(payload) require.NoError(t, err) + require.Equal(t, expectedClientSecret, r.FormValue("client_secret")) _, err = w.Write(data) require.NoError(t, err) @@ -62,12 +63,13 @@ func TestRenew(t *testing.T) { value := "test-value" expiresIn := 1000 - srv := testSetupServer(t, value, expiresIn) + clientSecret := "value&chars=to|escape" // #nosec G101 + srv := testSetupServer(t, clientSecret, value, expiresIn) defer srv.Close() cfg, err := config.NewConfigFrom(&conf{ Endpoint: "http://" + srv.Listener.Addr().String(), - Secret: "value", + Secret: clientSecret, ClientID: "client-id", TenantID: "tenant-id", }) @@ -90,7 +92,7 @@ func TestRenew(t *testing.T) { cachedToken := "cached-value" expireTime := time.Now().Add(1000 * time.Second) - srv := testSetupServer(t, cachedToken, 1000) + srv := testSetupServer(t, "no-client-secret-used", cachedToken, 1000) defer srv.Close() cfg, err := config.NewConfigFrom(&conf{ diff --git a/x-pack/filebeat/input/entityanalytics/provider/jamf/internal/jamf/jamf.go b/x-pack/filebeat/input/entityanalytics/provider/jamf/internal/jamf/jamf.go index d30562ca567..98f98499882 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/jamf/internal/jamf/jamf.go +++ b/x-pack/filebeat/input/entityanalytics/provider/jamf/internal/jamf/jamf.go @@ -205,9 +205,6 @@ func GetComputers(ctx context.Context, cli *http.Client, tenant string, tok Toke // GetUsers returns Jamf users using the list users API endpoint. tenant is the // Jamf user domain and key is the API token to use for the query. If user is not empty, // details for the specific user are returned, otherwise a list of all users is returned. -// The query parameter holds queries as described in https://developer.Jamf.com/docs/reference/user-query/ -// with the query syntax described at https://developer.Jamf.com/docs/reference/core-Jamf-api/#filter. -// Parts of the response may be omitted using the omit parameter. // // See https://developer.jamf.com/jamf-pro/reference/findusers for details. func GetUsers(ctx context.Context, cli *http.Client, tenant string, tok Token, query url.Values) ([]User, error) { diff --git a/x-pack/filebeat/input/entityanalytics/provider/okta/conf.go b/x-pack/filebeat/input/entityanalytics/provider/okta/conf.go index 2bab4c9e67d..41a3895a70d 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/okta/conf.go +++ b/x-pack/filebeat/input/entityanalytics/provider/okta/conf.go @@ -23,6 +23,7 @@ func defaultConfig() conf { transport.Timeout = 30 * time.Second return conf{ + EnrichWith: []string{"groups"}, SyncInterval: 24 * time.Hour, UpdateInterval: 15 * time.Minute, LimitWindow: time.Minute, @@ -48,6 +49,12 @@ type conf struct { // the API. It can be ""/"all", "users", or // "devices". Dataset string `config:"dataset"` + // EnrichWith specifies the additional data that + // will be used to enrich user data. It can include + // "groups", "roles" and "factors". + // If it is a single element with "none", no + // enrichment is performed. + EnrichWith []string `config:"enrich_with"` // SyncInterval is the time between full // synchronisation operations. diff --git a/x-pack/filebeat/input/entityanalytics/provider/okta/okta.go b/x-pack/filebeat/input/entityanalytics/provider/okta/okta.go index 404f069a7ec..5d68cf3f5c4 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/okta/okta.go +++ b/x-pack/filebeat/input/entityanalytics/provider/okta/okta.go @@ -15,6 +15,7 @@ import ( "net/url" "os" "path/filepath" + "slices" "strings" "time" @@ -460,7 +461,7 @@ func (p *oktaInput) doFetchUsers(ctx context.Context, state *stateStore, fullSyn if fullSync { for _, u := range batch { - p.addGroup(ctx, u, state) + p.addUserMetadata(ctx, u, state) if u.LastUpdated.After(lastUpdated) { lastUpdated = u.LastUpdated } @@ -468,7 +469,7 @@ func (p *oktaInput) doFetchUsers(ctx context.Context, state *stateStore, fullSyn } else { users = grow(users, len(batch)) for _, u := range batch { - su := p.addGroup(ctx, u, state) + su := p.addUserMetadata(ctx, u, state) users = append(users, su) if u.LastUpdated.After(lastUpdated) { lastUpdated = u.LastUpdated @@ -500,14 +501,41 @@ func (p *oktaInput) doFetchUsers(ctx context.Context, state *stateStore, fullSyn return users, nil } -func (p *oktaInput) addGroup(ctx context.Context, u okta.User, state *stateStore) *User { +func (p *oktaInput) addUserMetadata(ctx context.Context, u okta.User, state *stateStore) *User { su := state.storeUser(u) - groups, _, err := okta.GetUserGroupDetails(ctx, p.client, p.cfg.OktaDomain, p.cfg.OktaToken, u.ID, p.lim, p.cfg.LimitWindow, p.logger) - if err != nil { - p.logger.Warnf("failed to get user group membership for %s: %v", u.ID, err) + switch len(p.cfg.EnrichWith) { + case 1: + if p.cfg.EnrichWith[0] != "none" { + break + } + fallthrough + case 0: return su } - su.Groups = groups + if slices.Contains(p.cfg.EnrichWith, "groups") { + groups, _, err := okta.GetUserGroupDetails(ctx, p.client, p.cfg.OktaDomain, p.cfg.OktaToken, u.ID, p.lim, p.cfg.LimitWindow, p.logger) + if err != nil { + p.logger.Warnf("failed to get user group membership for %s: %v", u.ID, err) + } else { + su.Groups = groups + } + } + if slices.Contains(p.cfg.EnrichWith, "factors") { + factors, _, err := okta.GetUserFactors(ctx, p.client, p.cfg.OktaDomain, p.cfg.OktaToken, u.ID, p.lim, p.cfg.LimitWindow, p.logger) + if err != nil { + p.logger.Warnf("failed to get user factors for %s: %v", u.ID, err) + } else { + su.Factors = factors + } + } + if slices.Contains(p.cfg.EnrichWith, "roles") { + roles, _, err := okta.GetUserRoles(ctx, p.client, p.cfg.OktaDomain, p.cfg.OktaToken, u.ID, p.lim, p.cfg.LimitWindow, p.logger) + if err != nil { + p.logger.Warnf("failed to get user roles for %s: %v", u.ID, err) + } else { + su.Roles = roles + } + } return su } diff --git a/x-pack/filebeat/input/entityanalytics/provider/okta/okta_test.go b/x-pack/filebeat/input/entityanalytics/provider/okta/okta_test.go index c13cf4040c5..5752370c4ce 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/okta/okta_test.go +++ b/x-pack/filebeat/input/entityanalytics/provider/okta/okta_test.go @@ -13,6 +13,7 @@ import ( "net/http/httptest" "net/url" "path" + "slices" "strings" "testing" "time" @@ -31,13 +32,14 @@ func TestOktaDoFetch(t *testing.T) { tests := []struct { dataset string + enrichWith []string wantUsers bool wantDevices bool }{ - {dataset: "", wantUsers: true, wantDevices: true}, - {dataset: "all", wantUsers: true, wantDevices: true}, - {dataset: "users", wantUsers: true, wantDevices: false}, - {dataset: "devices", wantUsers: false, wantDevices: true}, + {dataset: "", enrichWith: []string{"groups"}, wantUsers: true, wantDevices: true}, + {dataset: "all", enrichWith: []string{"groups"}, wantUsers: true, wantDevices: true}, + {dataset: "users", enrichWith: []string{"groups", "roles", "factors"}, wantUsers: true, wantDevices: false}, + {dataset: "devices", enrichWith: []string{"groups"}, wantUsers: false, wantDevices: true}, } for _, test := range tests { @@ -56,14 +58,18 @@ func TestOktaDoFetch(t *testing.T) { window = time.Minute key = "token" users = `[{"id":"USERID","status":"STATUS","created":"2023-05-14T13:37:20.000Z","activated":null,"statusChanged":"2023-05-15T01:50:30.000Z","lastLogin":"2023-05-15T01:59:20.000Z","lastUpdated":"2023-05-15T01:50:32.000Z","passwordChanged":"2023-05-15T01:50:32.000Z","type":{"id":"typeid"},"profile":{"firstName":"name","lastName":"surname","mobilePhone":null,"secondEmail":null,"login":"name.surname@example.com","email":"name.surname@example.com"},"credentials":{"password":{"value":"secret"},"emails":[{"value":"name.surname@example.com","status":"VERIFIED","type":"PRIMARY"}],"provider":{"type":"OKTA","name":"OKTA"}},"_links":{"self":{"href":"https://localhost/api/v1/users/USERID"}}}]` + roles = `[{"id":"IFIFAX2BIRGUSTQ","label":"Application administrator","type":"APP_ADMIN","status":"ACTIVE","created":"2019-02-06T16:17:40.000Z","lastUpdated":"2019-02-06T16:17:40.000Z","assignmentType":"USER"},{"id":"JBCUYUC7IRCVGS27IFCE2SKO","label":"Help Desk administrator","type":"HELP_DESK_ADMIN","status":"ACTIVE","created":"2019-02-06T16:17:40.000Z","lastUpdated":"2019-02-06T16:17:40.000Z","assignmentType":"USER"},{"id":"ra125eqBFpETrMwu80g4","label":"Organization administrator","type":"ORG_ADMIN","status":"ACTIVE","created":"2019-02-06T16:17:40.000Z","lastUpdated":"2019-02-06T16:17:40.000Z","assignmentType":"USER"},{"id":"gra25fapn1prGTBKV0g4","label":"API Access Management administrator","type":"API_ACCESS_MANAGEMENT_ADMIN","status":"ACTIVE","created\"":"2019-02-06T16:20:57.000Z","lastUpdated\"":"2019-02-06T16:20:57.000Z","assignmentType\"":"GROUP"}]` groups = `[{"id":"USERID","profile":{"description":"All users in your organization","name":"Everyone"}}]` + factors = `[{"id":"ufs2bysphxKODSZKWVCT","factorType":"question","provider":"OKTA","vendorName":"OKTA","status":"ACTIVE","created":"2014-04-15T18:10:06.000Z","lastUpdated":"2014-04-15T18:10:06.000Z","profile":{"question":"favorite_art_piece","questionText":"What is your favorite piece of art?"}},{"id":"ostf2gsyictRQDSGTDZE","factorType":"token:software:totp","provider":"OKTA","status":"PENDING_ACTIVATION","created":"2014-06-27T20:27:33.000Z","lastUpdated":"2014-06-27T20:27:33.000Z","profile":{"credentialId":"dade.murphy@example.com"}},{"id":"sms2gt8gzgEBPUWBIFHN","factorType":"sms","provider":"OKTA","status":"ACTIVE","created":"2014-06-27T20:27:26.000Z","lastUpdated":"2014-06-27T20:27:26.000Z","profile":{"phoneNumber":"+1-555-415-1337"}}]` devices = `[{"id":"DEVICEID","status":"STATUS","created":"2019-10-02T18:03:07.000Z","lastUpdated":"2019-10-02T18:03:07.000Z","profile":{"displayName":"Example Device name 1","platform":"WINDOWS","serialNumber":"XXDDRFCFRGF3M8MD6D","sid":"S-1-11-111","registered":true,"secureHardwarePresent":false,"diskEncryptionType":"ALL_INTERNAL_VOLUMES"},"resourceType":"UDDevice","resourceDisplayName":{"value":"Example Device name 1","sensitive":false},"resourceAlternateId":null,"resourceId":"DEVICEID","_links":{"activate":{"href":"https://localhost/api/v1/devices/DEVICEID/lifecycle/activate","hints":{"allow":["POST"]}},"self":{"href":"https://localhost/api/v1/devices/DEVICEID","hints":{"allow":["GET","PATCH","PUT"]}},"users":{"href":"https://localhost/api/v1/devices/DEVICEID/users","hints":{"allow":["GET"]}}}}]` ) data := map[string]string{ "users": users, + "roles": roles, "groups": groups, "devices": devices, + "factors": factors, } var wantUsers []User @@ -88,29 +94,50 @@ func TestOktaDoFetch(t *testing.T) { t.Fatalf("failed to unmarshal device data: %v", err) } } + var wantFactors []okta.Factor + if slices.Contains(test.enrichWith, "factors") { + err := json.Unmarshal([]byte(factors), &wantFactors) + if err != nil { + t.Fatalf("failed to unmarshal factor data: %v", err) + } + } + var wantRoles []okta.Role + if slices.Contains(test.enrichWith, "roles") { + err := json.Unmarshal([]byte(roles), &wantRoles) + if err != nil { + t.Fatalf("failed to unmarshal role data: %v", err) + } + } wantStates := make(map[string]State) // Set the number of repeats. const repeats = 3 var n int - ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + setHeaders := func(w http.ResponseWriter) { // Leave 49 remaining, reset in one minute. w.Header().Add("x-rate-limit-limit", "50") w.Header().Add("x-rate-limit-remaining", "49") w.Header().Add("x-rate-limit-reset", fmt.Sprint(time.Now().Add(time.Minute).Unix())) - - if strings.HasPrefix(r.URL.Path, "/api/v1/users") && strings.HasSuffix(r.URL.Path, "groups") { - // Give the groups if this is a get user groups request. - userid := strings.TrimSuffix(strings.TrimPrefix(r.URL.Path, "/api/v1/users/"), "/groups") - fmt.Fprintln(w, strings.ReplaceAll(data["groups"], "USERID", userid)) - return - } - if strings.HasPrefix(r.URL.Path, "/api/v1/device") && strings.HasSuffix(r.URL.Path, "users") { - // Give one user if this is a get device users request. - fmt.Fprintln(w, data["users"]) + } + mux := http.NewServeMux() + mux.Handle("/api/v1/users/{userid}/{metadata}", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + setHeaders(w) + attr := r.PathValue("metadata") + if attr != "groups" { + fmt.Fprintln(w, data[attr]) return } + // Give the groups if this is a get user groups request. + userid := r.PathValue("userid") + fmt.Fprintln(w, strings.ReplaceAll(data[attr], "USERID", userid)) + })) + mux.Handle("/api/v1/devices/{deviceid}/users", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + setHeaders(w) + fmt.Fprintln(w, data["users"]) + })) + mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + setHeaders(w) base := path.Base(r.URL.Path) @@ -143,6 +170,7 @@ func TestOktaDoFetch(t *testing.T) { ) fmt.Fprintln(w, replacer.Replace(data[base])) })) + ts := httptest.NewTLSServer(mux) defer ts.Close() u, err := url.Parse(ts.URL) @@ -154,6 +182,7 @@ func TestOktaDoFetch(t *testing.T) { OktaDomain: u.Host, OktaToken: key, Dataset: test.dataset, + EnrichWith: test.enrichWith, }, client: ts.Client(), lim: rate.NewLimiter(1, 1), @@ -196,6 +225,12 @@ func TestOktaDoFetch(t *testing.T) { if g.ID != wantID { t.Errorf("unexpected user ID for user %d: got:%s want:%s", i, g.ID, wantID) } + if len(g.Factors) != len(wantFactors) { + t.Errorf("number of factors for user %d: got:%d want:%d", i, len(g.Factors), len(wantFactors)) + } + if len(g.Roles) != len(wantRoles) { + t.Errorf("number of roles for user %d: got:%d want:%d", i, len(g.Roles), len(wantRoles)) + } for j, gg := range g.Groups { if gg.ID != wantID { t.Errorf("unexpected used ID for user group %d in %d: got:%s want:%s", j, i, gg.ID, wantID) diff --git a/x-pack/filebeat/input/entityanalytics/provider/okta/statestore.go b/x-pack/filebeat/input/entityanalytics/provider/okta/statestore.go index 401b3353d14..4e9254e56b8 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/okta/statestore.go +++ b/x-pack/filebeat/input/entityanalytics/provider/okta/statestore.go @@ -37,8 +37,10 @@ const ( type User struct { okta.User `json:"properties"` - Groups []okta.Group `json:"groups"` - State State `json:"state"` + Groups []okta.Group `json:"groups"` + Roles []okta.Role `json:"roles"` + Factors []okta.Factor `json:"factors"` + State State `json:"state"` } type Device struct { diff --git a/x-pack/filebeat/input/etw/input.go b/x-pack/filebeat/input/etw/input.go index f030ada04e0..b41e7347a3e 100644 --- a/x-pack/filebeat/input/etw/input.go +++ b/x-pack/filebeat/input/etw/input.go @@ -79,7 +79,7 @@ type etwInput struct { func Plugin() input.Plugin { return input.Plugin{ Name: inputName, - Stability: feature.Beta, + Stability: feature.Stable, Info: "Collect ETW logs.", Manager: stateless.NewInputManager(configure), } diff --git a/x-pack/filebeat/input/http_endpoint/handler.go b/x-pack/filebeat/input/http_endpoint/handler.go index 67a1e07af86..27f4d12253e 100644 --- a/x-pack/filebeat/input/http_endpoint/handler.go +++ b/x-pack/filebeat/input/http_endpoint/handler.go @@ -119,7 +119,7 @@ func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { r.Body = io.NopCloser(&buf) } - objs, _, status, err := httpReadJSON(body, h.program) + objs, status, err := httpReadJSON(body, h.program) if err != nil { h.sendAPIErrorResponse(txID, w, r, h.log, status, err) h.metrics.apiErrors.Add(1) @@ -333,18 +333,18 @@ func (h *handler) publishEvent(obj, headers mapstr.M, acker *batchACKTracker) er return nil } -func httpReadJSON(body io.Reader, prg *program) (objs []mapstr.M, rawMessages []json.RawMessage, status int, err error) { +func httpReadJSON(body io.Reader, prg *program) (objs []mapstr.M, status int, err error) { if body == http.NoBody { - return nil, nil, http.StatusNotAcceptable, errBodyEmpty + return nil, http.StatusNotAcceptable, errBodyEmpty } - obj, rawMessage, err := decodeJSON(body, prg) + obj, err := decodeJSON(body, prg) if err != nil { - return nil, nil, http.StatusBadRequest, err + return nil, http.StatusBadRequest, err } - return obj, rawMessage, http.StatusOK, err + return obj, http.StatusOK, err } -func decodeJSON(body io.Reader, prg *program) (objs []mapstr.M, rawMessages []json.RawMessage, err error) { +func decodeJSON(body io.Reader, prg *program) (objs []mapstr.M, err error) { decoder := json.NewDecoder(body) for decoder.More() { var raw json.RawMessage @@ -352,45 +352,46 @@ func decodeJSON(body io.Reader, prg *program) (objs []mapstr.M, rawMessages []js if err == io.EOF { //nolint:errorlint // This will never be a wrapped error. break } - return nil, nil, fmt.Errorf("malformed JSON object at stream position %d: %w", decoder.InputOffset(), err) + return nil, fmt.Errorf("malformed JSON object at stream position %d: %w", decoder.InputOffset(), err) } var obj interface{} if err = newJSONDecoder(bytes.NewReader(raw)).Decode(&obj); err != nil { - return nil, nil, fmt.Errorf("malformed JSON object at stream position %d: %w", decoder.InputOffset(), err) + return nil, fmt.Errorf("malformed JSON object at stream position %d: %w", decoder.InputOffset(), err) } if prg != nil { obj, err = prg.eval(obj) if err != nil { - return nil, nil, err + return nil, err } - // Re-marshal to ensure the raw bytes agree with the constructed object. - raw, err = json.Marshal(obj) - if err != nil { - return nil, nil, fmt.Errorf("failed to remarshal object: %w", err) + if _, ok := obj.([]interface{}); ok { + // Re-marshal to ensure the raw bytes agree with the constructed object. + // This is only necessary when the program constructs an array return. + raw, err = json.Marshal(obj) + if err != nil { + return nil, fmt.Errorf("failed to remarshal object: %w", err) + } } } switch v := obj.(type) { case map[string]interface{}: objs = append(objs, v) - rawMessages = append(rawMessages, raw) case []interface{}: - nobjs, nrawMessages, err := decodeJSONArray(bytes.NewReader(raw)) + nobjs, err := decodeJSONArray(bytes.NewReader(raw)) if err != nil { - return nil, nil, fmt.Errorf("recursive error %d: %w", decoder.InputOffset(), err) + return nil, fmt.Errorf("recursive error %d: %w", decoder.InputOffset(), err) } objs = append(objs, nobjs...) - rawMessages = append(rawMessages, nrawMessages...) default: - return nil, nil, fmt.Errorf("%w: %T", errUnsupportedType, v) + return nil, fmt.Errorf("%w: %T", errUnsupportedType, v) } } for i := range objs { jsontransform.TransformNumbers(objs[i]) } - return objs, rawMessages, nil + return objs, nil } type program struct { @@ -506,17 +507,17 @@ func (p *program) eval(obj interface{}) (interface{}, error) { } } -func decodeJSONArray(raw *bytes.Reader) (objs []mapstr.M, rawMessages []json.RawMessage, err error) { +func decodeJSONArray(raw *bytes.Reader) (objs []mapstr.M, err error) { dec := newJSONDecoder(raw) token, err := dec.Token() if err != nil { if err == io.EOF { //nolint:errorlint // This will never be a wrapped error. - return nil, nil, nil + return nil, nil } - return nil, nil, fmt.Errorf("failed reading JSON array: %w", err) + return nil, fmt.Errorf("failed reading JSON array: %w", err) } if token != json.Delim('[') { - return nil, nil, fmt.Errorf("malformed JSON array, not starting with delimiter [ at position: %d", dec.InputOffset()) + return nil, fmt.Errorf("malformed JSON array, not starting with delimiter [ at position: %d", dec.InputOffset()) } for dec.More() { @@ -525,21 +526,20 @@ func decodeJSONArray(raw *bytes.Reader) (objs []mapstr.M, rawMessages []json.Raw if err == io.EOF { //nolint:errorlint // This will never be a wrapped error. break } - return nil, nil, fmt.Errorf("malformed JSON object at stream position %d: %w", dec.InputOffset(), err) + return nil, fmt.Errorf("malformed JSON object at stream position %d: %w", dec.InputOffset(), err) } var obj interface{} if err := newJSONDecoder(bytes.NewReader(raw)).Decode(&obj); err != nil { - return nil, nil, fmt.Errorf("malformed JSON object at stream position %d: %w", dec.InputOffset(), err) + return nil, fmt.Errorf("malformed JSON object at stream position %d: %w", dec.InputOffset(), err) } m, ok := obj.(map[string]interface{}) if ok { - rawMessages = append(rawMessages, raw) objs = append(objs, m) } } - return objs, rawMessages, nil + return objs, nil } func getIncludedHeaders(r *http.Request, headerConf []string) (includedHeaders mapstr.M) { diff --git a/x-pack/filebeat/input/http_endpoint/handler_test.go b/x-pack/filebeat/input/http_endpoint/handler_test.go index 131596f1fc3..2ac763f052b 100644 --- a/x-pack/filebeat/input/http_endpoint/handler_test.go +++ b/x-pack/filebeat/input/http_endpoint/handler_test.go @@ -8,7 +8,6 @@ import ( "bytes" "compress/gzip" "context" - "encoding/json" "errors" "flag" "io" @@ -38,13 +37,12 @@ func Test_httpReadJSON(t *testing.T) { log := logp.NewLogger("http_endpoint_test") tests := []struct { - name string - body string - program string - wantObjs []mapstr.M - wantStatus int - wantErr bool - wantRawMessage []json.RawMessage + name string + body string + program string + wantObjs []mapstr.M + wantStatus int + wantErr bool }{ { name: "single object", @@ -82,10 +80,6 @@ func Test_httpReadJSON(t *testing.T) { name: "sequence of objects accepted (LF)", body: `{"a":"1"} {"a":"2"}`, - wantRawMessage: []json.RawMessage{ - []byte(`{"a":"1"}`), - []byte(`{"a":"2"}`), - }, wantObjs: []mapstr.M{{"a": "1"}, {"a": "2"}}, wantStatus: http.StatusOK, }, @@ -110,26 +104,14 @@ func Test_httpReadJSON(t *testing.T) { wantErr: true, }, { - name: "array of objects in stream", - body: `{"a":"1"} [{"a":"2"},{"a":"3"}] {"a":"4"}`, - wantRawMessage: []json.RawMessage{ - []byte(`{"a":"1"}`), - []byte(`{"a":"2"}`), - []byte(`{"a":"3"}`), - []byte(`{"a":"4"}`), - }, + name: "array of objects in stream", + body: `{"a":"1"} [{"a":"2"},{"a":"3"}] {"a":"4"}`, wantObjs: []mapstr.M{{"a": "1"}, {"a": "2"}, {"a": "3"}, {"a": "4"}}, wantStatus: http.StatusOK, }, { name: "numbers", body: `{"a":1} [{"a":false},{"a":3.14}] {"a":-4}`, - wantRawMessage: []json.RawMessage{ - []byte(`{"a":1}`), - []byte(`{"a":false}`), - []byte(`{"a":3.14}`), - []byte(`{"a":-4}`), - }, wantObjs: []mapstr.M{ {"a": int64(1)}, {"a": false}, @@ -171,13 +153,6 @@ func Test_httpReadJSON(t *testing.T) { "timestamp": string(obj.timestamp), // leave timestamp in unix milli for ingest to handle. "event": r, })`, - wantRawMessage: []json.RawMessage{ - []byte(`{"event":{"data":"aGVsbG8=","number":1},"requestId":"ed4acda5-034f-9f42-bba1-f29aea6d7d8f","timestamp":"1578090901599"}`), - []byte(`{"event":{"data":"c21hbGwgd29ybGQ=","number":9007199254740991},"requestId":"ed4acda5-034f-9f42-bba1-f29aea6d7d8f","timestamp":"1578090901599"}`), - []byte(`{"event":{"data":"aGVsbG8gd29ybGQ=","number":"9007199254740992"},"requestId":"ed4acda5-034f-9f42-bba1-f29aea6d7d8f","timestamp":"1578090901599"}`), - []byte(`{"event":{"data":"YmlnIHdvcmxk","number":"9223372036854775808"},"requestId":"ed4acda5-034f-9f42-bba1-f29aea6d7d8f","timestamp":"1578090901599"}`), - []byte(`{"event":{"data":"d2lsbCBpdCBiZSBmcmllbmRzIHdpdGggbWU=","number":3.14},"requestId":"ed4acda5-034f-9f42-bba1-f29aea6d7d8f","timestamp":"1578090901599"}`), - }, wantObjs: []mapstr.M{ {"event": map[string]any{"data": "aGVsbG8=", "number": int64(1)}, "requestId": "ed4acda5-034f-9f42-bba1-f29aea6d7d8f", "timestamp": "1578090901599"}, {"event": map[string]any{"data": "c21hbGwgd29ybGQ=", "number": int64(9007199254740991)}, "requestId": "ed4acda5-034f-9f42-bba1-f29aea6d7d8f", "timestamp": "1578090901599"}, @@ -194,7 +169,7 @@ func Test_httpReadJSON(t *testing.T) { if err != nil { t.Fatalf("failed to compile program: %v", err) } - gotObjs, rawMessages, gotStatus, err := httpReadJSON(strings.NewReader(tt.body), prg) + gotObjs, gotStatus, err := httpReadJSON(strings.NewReader(tt.body), prg) if (err != nil) != tt.wantErr { t.Errorf("httpReadJSON() error = %v, wantErr %v", err, tt.wantErr) return @@ -205,10 +180,6 @@ func Test_httpReadJSON(t *testing.T) { if gotStatus != tt.wantStatus { t.Errorf("httpReadJSON() gotStatus = %v, want %v", gotStatus, tt.wantStatus) } - if tt.wantRawMessage != nil { - assert.Equal(t, tt.wantRawMessage, rawMessages) - } - assert.Equal(t, len(gotObjs), len(rawMessages)) }) } } diff --git a/x-pack/filebeat/input/httpjson/input_test.go b/x-pack/filebeat/input/httpjson/input_test.go index 4f09d8f057f..1416efa3c78 100644 --- a/x-pack/filebeat/input/httpjson/input_test.go +++ b/x-pack/filebeat/input/httpjson/input_test.go @@ -8,7 +8,6 @@ import ( "context" "fmt" "io" - "math/rand" "net/http" "net/http/httptest" "os" @@ -1724,7 +1723,8 @@ func retryHandler() http.HandlerFunc { _, _ = w.Write([]byte(`{"hello":"world"}`)) return } - w.WriteHeader(rand.Intn(100) + 500) + // Any 5xx except 501 will result in a retry. + w.WriteHeader(500) count += 1 } } diff --git a/x-pack/filebeat/main_test.go b/x-pack/filebeat/main_test.go index 3d5424c3088..71b32b9e6e8 100644 --- a/x-pack/filebeat/main_test.go +++ b/x-pack/filebeat/main_test.go @@ -9,6 +9,7 @@ import ( "os" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" cmd "github.com/elastic/beats/v7/libbeat/cmd" "github.com/elastic/beats/v7/libbeat/tests/system/template" fbcmd "github.com/elastic/beats/v7/x-pack/filebeat/cmd" @@ -24,11 +25,14 @@ func init() { systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") fbCommand = fbcmd.Filebeat() fbCommand.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") fbCommand.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(t *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { if err := fbCommand.Execute(); err != nil { os.Exit(1) diff --git a/x-pack/functionbeat/main_test.go b/x-pack/functionbeat/main_test.go index ecb5ac12435..1c21f88e289 100644 --- a/x-pack/functionbeat/main_test.go +++ b/x-pack/functionbeat/main_test.go @@ -10,6 +10,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" "github.com/elastic/beats/v7/x-pack/functionbeat/manager/cmd" ) @@ -21,11 +22,14 @@ func init() { systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(t *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/x-pack/functionbeat/provider/aws/cmd/root.go b/x-pack/functionbeat/provider/aws/cmd/root.go index fc7a7e0c251..c8e7ba17d62 100644 --- a/x-pack/functionbeat/provider/aws/cmd/root.go +++ b/x-pack/functionbeat/provider/aws/cmd/root.go @@ -7,6 +7,7 @@ package cmd import ( "flag" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/x-pack/functionbeat/function/beater" funcmd "github.com/elastic/beats/v7/x-pack/functionbeat/function/cmd" ) @@ -20,6 +21,9 @@ var RootCmd *funcmd.FunctionCmd func init() { RootCmd = funcmd.NewFunctionCmd(Name, beater.New) RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("d")) + cfgfile.AddAllowedBackwardsCompatibleFlag("d") RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("v")) + cfgfile.AddAllowedBackwardsCompatibleFlag("v") RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("e")) + cfgfile.AddAllowedBackwardsCompatibleFlag("e") } diff --git a/x-pack/functionbeat/provider/aws/main_test.go b/x-pack/functionbeat/provider/aws/main_test.go index dad745420cb..f180fd28275 100644 --- a/x-pack/functionbeat/provider/aws/main_test.go +++ b/x-pack/functionbeat/provider/aws/main_test.go @@ -10,6 +10,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/x-pack/functionbeat/provider/aws/cmd" ) @@ -20,12 +21,14 @@ func init() { systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(t *testing.T) { - + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/x-pack/functionbeat/provider/local/main_test.go b/x-pack/functionbeat/provider/local/main_test.go index cc941b40c02..7c617b300a6 100644 --- a/x-pack/functionbeat/provider/local/main_test.go +++ b/x-pack/functionbeat/provider/local/main_test.go @@ -10,6 +10,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/x-pack/functionbeat/provider/local/cmd" ) @@ -20,11 +21,14 @@ func init() { systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(t *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/x-pack/heartbeat/main_test.go b/x-pack/heartbeat/main_test.go index 91fe8b60ad8..44d5882a990 100644 --- a/x-pack/heartbeat/main_test.go +++ b/x-pack/heartbeat/main_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/elastic/beats/v7/heartbeat/cmd" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" ) @@ -19,11 +20,14 @@ func init() { testing.Init() systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(_ *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/x-pack/libbeat/common/aws/acker.go b/x-pack/libbeat/common/aws/acker.go deleted file mode 100644 index 95fbe14b774..00000000000 --- a/x-pack/libbeat/common/aws/acker.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package aws - -import ( - "context" - "sync" - - "github.com/elastic/beats/v7/libbeat/beat" - "github.com/elastic/beats/v7/libbeat/common/acker" -) - -// EventACKTracker tracks the publishing state of S3 objects. Specifically -// it tracks the number of message acknowledgements that are pending from the -// output. It can be used to wait until all ACKs have been received for one or -// more S3 objects. -type EventACKTracker struct { - sync.Mutex - PendingACKs int64 - ctx context.Context - cancel context.CancelFunc -} - -func NewEventACKTracker(ctx context.Context) *EventACKTracker { - ctx, cancel := context.WithCancel(ctx) - return &EventACKTracker{ctx: ctx, cancel: cancel} -} - -// Add increments the number of pending ACKs. -func (a *EventACKTracker) Add() { - a.Lock() - a.PendingACKs++ - a.Unlock() -} - -// ACK decrements the number of pending ACKs. -func (a *EventACKTracker) ACK() { - a.Lock() - defer a.Unlock() - - if a.PendingACKs <= 0 { - panic("misuse detected: negative ACK counter") - } - - a.PendingACKs-- - if a.PendingACKs == 0 { - a.cancel() - } -} - -// Wait waits for the number of pending ACKs to be zero. -// Wait must be called sequentially only after every expected -// `Add` calls are made. Failing to do so could reset the pendingACKs -// property to 0 and would results in Wait returning after additional -// calls to `Add` are made without a corresponding `ACK` call. -func (a *EventACKTracker) Wait() { - // If there were never any pending ACKs then cancel the context. (This can - // happen when a document contains no events or cannot be read due to an error). - a.Lock() - if a.PendingACKs == 0 { - a.cancel() - } - a.Unlock() - - // Wait. - <-a.ctx.Done() -} - -// NewEventACKHandler returns a beat ACKer that can receive callbacks when -// an event has been ACKed an output. If the event contains a private metadata -// pointing to an eventACKTracker then it will invoke the trackers ACK() method -// to decrement the number of pending ACKs. -func NewEventACKHandler() beat.EventListener { - return acker.ConnectionOnly( - acker.EventPrivateReporter(func(_ int, privates []interface{}) { - for _, private := range privates { - if ack, ok := private.(*EventACKTracker); ok { - ack.ACK() - } - } - }), - ) -} diff --git a/x-pack/libbeat/common/aws/acker_test.go b/x-pack/libbeat/common/aws/acker_test.go deleted file mode 100644 index 3c470f0b922..00000000000 --- a/x-pack/libbeat/common/aws/acker_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package aws - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/beats/v7/libbeat/beat" -) - -func TestEventACKTracker(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - acker := NewEventACKTracker(ctx) - acker.Add() - acker.ACK() - - assert.EqualValues(t, 0, acker.PendingACKs) - assert.ErrorIs(t, acker.ctx.Err(), context.Canceled) -} - -func TestEventACKTrackerNoACKs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - acker := NewEventACKTracker(ctx) - acker.Wait() - - assert.EqualValues(t, 0, acker.PendingACKs) - assert.ErrorIs(t, acker.ctx.Err(), context.Canceled) -} - -func TestEventACKHandler(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Create acker. Add one pending ACK. - acker := NewEventACKTracker(ctx) - acker.Add() - - // Create an ACK handler and simulate one ACKed event. - ackHandler := NewEventACKHandler() - ackHandler.AddEvent(beat.Event{Private: acker}, true) - ackHandler.ACKEvents(1) - - assert.EqualValues(t, 0, acker.PendingACKs) - assert.ErrorIs(t, acker.ctx.Err(), context.Canceled) -} - -func TestEventACKHandlerWait(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - // Create acker. Add one pending ACK. - acker := NewEventACKTracker(ctx) - acker.Add() - acker.ACK() - acker.Wait() - acker.Add() - - assert.EqualValues(t, 1, acker.PendingACKs) - assert.ErrorIs(t, acker.ctx.Err(), context.Canceled) -} diff --git a/x-pack/libbeat/libbeat_test.go b/x-pack/libbeat/libbeat_test.go index 338ebd7e5fb..a3df546f3f3 100644 --- a/x-pack/libbeat/libbeat_test.go +++ b/x-pack/libbeat/libbeat_test.go @@ -8,6 +8,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" ) @@ -18,11 +19,14 @@ func init() { systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started func TestSystem(t *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/x-pack/metricbeat/main_test.go b/x-pack/metricbeat/main_test.go index b092682ccf3..e96a9932765 100644 --- a/x-pack/metricbeat/main_test.go +++ b/x-pack/metricbeat/main_test.go @@ -8,6 +8,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" "github.com/elastic/beats/v7/x-pack/metricbeat/cmd" ) @@ -18,11 +19,14 @@ func init() { testing.Init() systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(t *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/x-pack/metricbeat/module/aws/_meta/docs.asciidoc b/x-pack/metricbeat/module/aws/_meta/docs.asciidoc index 0b224bf5630..acd5de719da 100644 --- a/x-pack/metricbeat/module/aws/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/aws/_meta/docs.asciidoc @@ -134,6 +134,25 @@ Enforces the use of FIPS service endpoints. See < 500 { + base.Logger().Debug("apigateway_max_results config value can not exceed value 500. Setting apigateway_max_results=500") + *config.LimitRestAPI = 500 + } else if *config.LimitRestAPI <= 0 { + base.Logger().Debug("apigateway_max_results config value can not be <=0. Setting apigateway_max_results=25") + *config.LimitRestAPI = 25 + } + } + // Construct MetricSet with a full regions list if config.Regions == nil { svcEC2 := ec2.NewFromConfig(awsConfig, func(o *ec2.Options) { diff --git a/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go b/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go index ed043e8c38f..355c6710093 100644 --- a/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go +++ b/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go @@ -6,12 +6,15 @@ package cloudwatch import ( "fmt" + "maps" "reflect" "strconv" "strings" "time" awssdk "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/apigateway" + "github.com/aws/aws-sdk-go-v2/service/apigatewayv2" "github.com/aws/aws-sdk-go-v2/service/cloudwatch" "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi" @@ -23,13 +26,25 @@ import ( "github.com/elastic/elastic-agent-libs/logp" ) +const checkns = "AWS/ApiGateway" +const checkresource_type = "apigateway:restapis" + var ( - metricsetName = "cloudwatch" - defaultStatistics = []string{"Average", "Maximum", "Minimum", "Sum", "SampleCount"} - dimensionSeparator = "," - dimensionValueWildcard = "*" + metricsetName = "cloudwatch" + defaultStatistics = []string{"Average", "Maximum", "Minimum", "Sum", "SampleCount"} + dimensionSeparator = "," + dimensionValueWildcard = "*" + checkns_lower = strings.ToLower(checkns) + checkresource_type_lower = strings.ToLower(checkresource_type) ) +type APIClients struct { + CloudWatchClient *cloudwatch.Client + Resourcegroupstaggingapi *resourcegroupstaggingapi.Client + Apigateway *apigateway.Client + Apigatewayv2 *apigatewayv2.Client +} + // init registers the MetricSet with the central registry as soon as the program // starts. The New function will be called later to instantiate an instance of // the MetricSet for each host defined in the module's configuration. After the @@ -123,7 +138,8 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { startTime, endTime := aws.GetStartTimeEndTime(time.Now(), m.Period, m.Latency, m.PreviousEndTime) m.PreviousEndTime = endTime m.Logger().Debugf("startTime = %s, endTime = %s", startTime, endTime) - + // Initialise the map that will be used in case APIGateway api is configured. Infoapi includes Name_of_API:ID_of_API entries + infoapi := make(map[string]string) // Check statistic method in config err := m.checkStatistics() if err != nil { @@ -147,13 +163,12 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { m.logger.Debugf("Collecting metrics from AWS region %s", regionName) beatsConfig := m.MetricSet.AwsConfig.Copy() beatsConfig.Region = regionName - - svcCloudwatch, svcResourceAPI, err := m.createAwsRequiredClients(beatsConfig, regionName, config) + APIClients, err := m.createAwsRequiredClients(beatsConfig, regionName, config) if err != nil { m.Logger().Warn("skipping metrics list from region '%s'", regionName) } - eventsWithIdentifier, err := m.createEvents(svcCloudwatch, svcResourceAPI, listMetricDetailTotal.metricsWithStats, listMetricDetailTotal.resourceTypeFilters, regionName, startTime, endTime) + eventsWithIdentifier, err := m.createEvents(APIClients.CloudWatchClient, APIClients.Resourcegroupstaggingapi, listMetricDetailTotal.metricsWithStats, listMetricDetailTotal.resourceTypeFilters, infoapi, regionName, startTime, endTime) if err != nil { return fmt.Errorf("createEvents failed for region %s: %w", regionName, err) } @@ -173,7 +188,7 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { beatsConfig := m.MetricSet.AwsConfig.Copy() beatsConfig.Region = regionName - svcCloudwatch, svcResourceAPI, err := m.createAwsRequiredClients(beatsConfig, regionName, config) + APIClients, err := m.createAwsRequiredClients(beatsConfig, regionName, config) if err != nil { m.Logger().Warn("skipping metrics list from region '%s'", regionName, err) continue @@ -183,13 +198,13 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { // otherwise only retrieve metrics from the specific namespaces from the config var listMetricsOutput []aws.MetricWithID if len(namespaceDetailTotal) == 0 { - listMetricsOutput, err = aws.GetListMetricsOutput("*", regionName, m.Period, m.IncludeLinkedAccounts, m.OwningAccount, m.MonitoringAccountID, svcCloudwatch) + listMetricsOutput, err = aws.GetListMetricsOutput("*", regionName, m.Period, m.IncludeLinkedAccounts, m.OwningAccount, m.MonitoringAccountID, APIClients.CloudWatchClient) if err != nil { m.Logger().Errorf("Error while retrieving the list of metrics for region %s and namespace %s: %w", regionName, "*", err) } } else { for namespace := range namespaceDetailTotal { - listMetricsOutputPerNamespace, err := aws.GetListMetricsOutput(namespace, regionName, m.Period, m.IncludeLinkedAccounts, m.OwningAccount, m.MonitoringAccountID, svcCloudwatch) + listMetricsOutputPerNamespace, err := aws.GetListMetricsOutput(namespace, regionName, m.Period, m.IncludeLinkedAccounts, m.OwningAccount, m.MonitoringAccountID, APIClients.CloudWatchClient) if err != nil { m.Logger().Errorf("Error while retrieving the list of metrics for region %s and namespace %s: %w", regionName, namespace, err) } @@ -203,14 +218,50 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { for namespace, namespaceDetails := range namespaceDetailTotal { m.logger.Debugf("Collected metrics from namespace %s", namespace) - // filter listMetricsOutput by detailed configuration per each namespace filteredMetricWithStatsTotal := filterListMetricsOutput(listMetricsOutput, namespace, namespaceDetails) // get resource type filters and tags filters for each namespace resourceTypeTagFilters := constructTagsFilters(namespaceDetails) - eventsWithIdentifier, err := m.createEvents(svcCloudwatch, svcResourceAPI, filteredMetricWithStatsTotal, resourceTypeTagFilters, regionName, startTime, endTime) + //Check whether namespace is APIGW + if strings.Contains(strings.ToLower(namespace), checkns_lower) { + useonlyrest := false + if len(resourceTypeTagFilters) == 1 { + for key := range resourceTypeTagFilters { + if strings.Compare(strings.ToLower(key), checkresource_type_lower) == 0 { + useonlyrest = true + } + } + } + // inforestapi includes only Rest APIs + if useonlyrest { + infoapi, err = aws.GetAPIGatewayRestAPIOutput(APIClients.Apigateway, config.LimitRestAPI) + if err != nil { + m.Logger().Errorf("could not get rest apis output: %v", err) + } + } else { + // infoapi includes only Rest APIs + // apiGatewayAPI includes only WebSocket and HTTP APIs + infoapi, err = aws.GetAPIGatewayRestAPIOutput(APIClients.Apigateway, config.LimitRestAPI) + if err != nil { + m.Logger().Errorf("could not get rest apis output: %v", err) + } + + apiGatewayAPI, err := aws.GetAPIGatewayAPIOutput(APIClients.Apigatewayv2) + if err != nil { + m.Logger().Errorf("could not get http and websocket apis output: %v", err) + } + if len(apiGatewayAPI) > 0 { + maps.Copy(infoapi, apiGatewayAPI) + } + + } + + m.Logger().Debugf("infoapi response: %v", infoapi) + + } + eventsWithIdentifier, err := m.createEvents(APIClients.CloudWatchClient, APIClients.Resourcegroupstaggingapi, filteredMetricWithStatsTotal, resourceTypeTagFilters, infoapi, regionName, startTime, endTime) if err != nil { return fmt.Errorf("createEvents failed for region %s: %w", regionName, err) } @@ -233,23 +284,32 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { } // createAwsRequiredClients will return the two necessary client instances to do Metric requests to the AWS API -func (m *MetricSet) createAwsRequiredClients(beatsConfig awssdk.Config, regionName string, config aws.Config) (*cloudwatch.Client, *resourcegroupstaggingapi.Client, error) { +func (m *MetricSet) createAwsRequiredClients(beatsConfig awssdk.Config, regionName string, config aws.Config) (APIClients, error) { m.logger.Debugf("Collecting metrics from AWS region %s", regionName) - svcCloudwatchClient := cloudwatch.NewFromConfig(beatsConfig, func(o *cloudwatch.Options) { + APIClients := APIClients{} + APIClients.CloudWatchClient = cloudwatch.NewFromConfig(beatsConfig, func(o *cloudwatch.Options) { if config.AWSConfig.FIPSEnabled { o.EndpointOptions.UseFIPSEndpoint = awssdk.FIPSEndpointStateEnabled } }) - svcResourceAPIClient := resourcegroupstaggingapi.NewFromConfig(beatsConfig, func(o *resourcegroupstaggingapi.Options) { + APIClients.Resourcegroupstaggingapi = resourcegroupstaggingapi.NewFromConfig(beatsConfig, func(o *resourcegroupstaggingapi.Options) { if config.AWSConfig.FIPSEnabled { o.EndpointOptions.UseFIPSEndpoint = awssdk.FIPSEndpointStateEnabled } }) - return svcCloudwatchClient, svcResourceAPIClient, nil + APIClients.Apigateway = apigateway.NewFromConfig(beatsConfig, func(o *apigateway.Options) { + + }) + + APIClients.Apigatewayv2 = apigatewayv2.NewFromConfig(beatsConfig, func(o *apigatewayv2.Options) { + + }) + + return APIClients, nil } // filterListMetricsOutput compares config details with listMetricsOutput and filter out the ones don't match @@ -470,7 +530,7 @@ func insertRootFields(event mb.Event, metricValue float64, labels []string) mb.E return event } -func (m *MetricSet) createEvents(svcCloudwatch cloudwatch.GetMetricDataAPIClient, svcResourceAPI resourcegroupstaggingapi.GetResourcesAPIClient, listMetricWithStatsTotal []metricsWithStatistics, resourceTypeTagFilters map[string][]aws.Tag, regionName string, startTime time.Time, endTime time.Time) (map[string]mb.Event, error) { +func (m *MetricSet) createEvents(svcCloudwatch cloudwatch.GetMetricDataAPIClient, svcResourceAPI resourcegroupstaggingapi.GetResourcesAPIClient, listMetricWithStatsTotal []metricsWithStatistics, resourceTypeTagFilters map[string][]aws.Tag, infoAPImap map[string]string, regionName string, startTime time.Time, endTime time.Time) (map[string]mb.Event, error) { // Initialize events for each identifier. events := make(map[string]mb.Event) @@ -580,6 +640,13 @@ func (m *MetricSet) createEvents(svcCloudwatch cloudwatch.GetMetricDataAPIClient // And tags are only store under s3BucketName in resourceTagMap. subIdentifiers := strings.Split(identifierValue, dimensionSeparator) for _, subIdentifier := range subIdentifiers { + + if len(infoAPImap) > 0 { // If infoAPImap includes data + if valAPIName, ok := infoAPImap[subIdentifier]; ok { + subIdentifier = valAPIName + } + } + if _, ok := events[uniqueIdentifierValue]; !ok { // when tagsFilter is not empty but no entry in // resourceTagMap for this identifier, do not initialize diff --git a/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch_test.go b/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch_test.go index 08f878f9bb3..45b250c4f76 100644 --- a/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch_test.go +++ b/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch_test.go @@ -13,6 +13,7 @@ import ( "testing" "time" + "github.com/aws/aws-sdk-go-v2/service/apigateway" cloudwatchtypes "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" resourcegroupstaggingapitypes "github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi/types" "github.com/aws/smithy-go/middleware" @@ -1255,6 +1256,14 @@ func (m *MockResourceGroupsTaggingClient) GetResources(context.Context, *resourc }, nil } +// MockResourceGroupsTaggingClient2 is used for unit tests. +type MockResourceGroupsTaggingClient2 struct{} + +// GetResources implements resourcegroupstaggingapi.GetResourcesAPIClient. +func (m *MockResourceGroupsTaggingClient2) GetResources(context.Context, *apigateway.GetResourcesInput, ...func(*apigateway.Options)) (*apigateway.GetResourcesOutput, error) { + return &apigateway.GetResourcesOutput{}, nil +} + func TestCreateEventsWithIdentifier(t *testing.T) { m := MetricSet{} m.CloudwatchConfigs = []Config{{Statistic: []string{"Average"}}} @@ -1262,6 +1271,8 @@ func TestCreateEventsWithIdentifier(t *testing.T) { m.logger = logp.NewLogger("test") mockTaggingSvc := &MockResourceGroupsTaggingClient{} + infoAPImap := make(map[string]string) + mockCloudwatchSvc := &MockCloudWatchClient{} listMetricWithStatsTotal := []metricsWithStatistics{{ listMetric1, @@ -1272,7 +1283,7 @@ func TestCreateEventsWithIdentifier(t *testing.T) { var previousEndTime time.Time startTime, endTime := aws.GetStartTimeEndTime(time.Now(), m.MetricSet.Period, m.MetricSet.Latency, previousEndTime) - events, err := m.createEvents(mockCloudwatchSvc, mockTaggingSvc, listMetricWithStatsTotal, resourceTypeTagFilters, regionName, startTime, endTime) + events, err := m.createEvents(mockCloudwatchSvc, mockTaggingSvc, listMetricWithStatsTotal, resourceTypeTagFilters, infoAPImap, regionName, startTime, endTime) assert.NoError(t, err) assert.Equal(t, 1, len(events)) @@ -1293,6 +1304,7 @@ func TestCreateEventsWithoutIdentifier(t *testing.T) { mockTaggingSvc := &MockResourceGroupsTaggingClient{} mockCloudwatchSvc := &MockCloudWatchClientWithoutDim{} + infoAPImap := make(map[string]string) listMetricWithStatsTotal := []metricsWithStatistics{ { cloudwatchMetric: aws.MetricWithID{ @@ -1318,7 +1330,7 @@ func TestCreateEventsWithoutIdentifier(t *testing.T) { var previousEndTime time.Time startTime, endTime := aws.GetStartTimeEndTime(time.Now(), m.MetricSet.Period, m.MetricSet.Latency, previousEndTime) - events, err := m.createEvents(mockCloudwatchSvc, mockTaggingSvc, listMetricWithStatsTotal, resourceTypeTagFilters, regionName, startTime, endTime) + events, err := m.createEvents(mockCloudwatchSvc, mockTaggingSvc, listMetricWithStatsTotal, resourceTypeTagFilters, infoAPImap, regionName, startTime, endTime) assert.NoError(t, err) expectedID := " " + " " + regionName + accountID + namespace @@ -1339,6 +1351,7 @@ func TestCreateEventsWithDataGranularity(t *testing.T) { mockTaggingSvc := &MockResourceGroupsTaggingClient{} mockCloudwatchSvc := &MockCloudWatchClientWithDataGranularity{} + infoAPImap := make(map[string]string) listMetricWithStatsTotal := []metricsWithStatistics{ { listMetric1, @@ -1354,7 +1367,7 @@ func TestCreateEventsWithDataGranularity(t *testing.T) { var previousEndTime time.Time startTime, endTime := aws.GetStartTimeEndTime(time.Now(), m.MetricSet.Period, m.MetricSet.Latency, previousEndTime) - events, err := m.createEvents(mockCloudwatchSvc, mockTaggingSvc, listMetricWithStatsTotal, resourceTypeTagFilters, regionName, startTime, endTime) + events, err := m.createEvents(mockCloudwatchSvc, mockTaggingSvc, listMetricWithStatsTotal, resourceTypeTagFilters, infoAPImap, regionName, startTime, endTime) assert.NoError(t, err) expectedID := " " + regionName + accountID @@ -1380,6 +1393,7 @@ func TestCreateEventsWithTagsFilter(t *testing.T) { m.logger = logp.NewLogger("test") mockTaggingSvc := &MockResourceGroupsTaggingClient{} + infoAPImap := make(map[string]string) mockCloudwatchSvc := &MockCloudWatchClient{} listMetricWithStatsTotal := []metricsWithStatistics{ { @@ -1398,7 +1412,7 @@ func TestCreateEventsWithTagsFilter(t *testing.T) { var previousEndTime time.Time startTime, endTime := aws.GetStartTimeEndTime(time.Now(), m.MetricSet.Period, m.MetricSet.Latency, previousEndTime) - events, err := m.createEvents(mockCloudwatchSvc, mockTaggingSvc, listMetricWithStatsTotal, resourceTypeTagFilters, regionName, startTime, endTime) + events, err := m.createEvents(mockCloudwatchSvc, mockTaggingSvc, listMetricWithStatsTotal, resourceTypeTagFilters, infoAPImap, regionName, startTime, endTime) assert.NoError(t, err) assert.Equal(t, 1, len(events)) @@ -1410,7 +1424,7 @@ func TestCreateEventsWithTagsFilter(t *testing.T) { }, } - events, err = m.createEvents(mockCloudwatchSvc, mockTaggingSvc, listMetricWithStatsTotal, resourceTypeTagFilters, regionName, startTime, endTime) + events, err = m.createEvents(mockCloudwatchSvc, mockTaggingSvc, listMetricWithStatsTotal, resourceTypeTagFilters, infoAPImap, regionName, startTime, endTime) assert.NoError(t, err) assert.Equal(t, 0, len(events)) } @@ -1560,12 +1574,13 @@ func TestCreateEventsTimestamp(t *testing.T) { } resourceTypeTagFilters := map[string][]aws.Tag{} + infoAPImap := make(map[string]string) var previousEndTime time.Time startTime, endTime := aws.GetStartTimeEndTime(time.Now(), m.MetricSet.Period, m.MetricSet.Latency, previousEndTime) cloudwatchMock := &MockCloudWatchClientWithoutDim{} resGroupTaggingClientMock := &MockResourceGroupsTaggingClient{} - events, err := m.createEvents(cloudwatchMock, resGroupTaggingClientMock, listMetricWithStatsTotal, resourceTypeTagFilters, regionName, startTime, endTime) + events, err := m.createEvents(cloudwatchMock, resGroupTaggingClientMock, listMetricWithStatsTotal, resourceTypeTagFilters, infoAPImap, regionName, startTime, endTime) assert.NoError(t, err) assert.Equal(t, timestamp, events[" "+regionName+accountID+namespace+"-0"].Timestamp) } diff --git a/x-pack/metricbeat/module/aws/utils.go b/x-pack/metricbeat/module/aws/utils.go index b5c1a924913..b233786c466 100644 --- a/x-pack/metricbeat/module/aws/utils.go +++ b/x-pack/metricbeat/module/aws/utils.go @@ -11,7 +11,11 @@ import ( "strings" "time" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/apigateway" + "github.com/aws/aws-sdk-go-v2/service/apigatewayv2" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch" "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi" @@ -107,6 +111,52 @@ func GetListMetricsOutput(namespace string, regionName string, period time.Durat return metricWithAccountID, nil } +// GetAPIGatewayRestAPIOutput function gets results from apigw api. +// GetRestApis Apigateway API is used to retrieve only the REST API specified info. This returns a map with the names and ids of RestAPIs configured +// Limit variable defines maximum number of returned results per page. The default value is 25 and the maximum value is 500. +func GetAPIGatewayRestAPIOutput(svcRestApi *apigateway.Client, limit *int32) (map[string]string, error) { + input := &apigateway.GetRestApisInput{} + if limit != nil { + input = &apigateway.GetRestApisInput{ + Limit: limit, + } + } + ctx, cancel := getContextWithTimeout(DefaultApiTimeout) + defer cancel() + result, err := svcRestApi.GetRestApis(ctx, input) + if err != nil { + return nil, fmt.Errorf("error retrieving GetRestApis %w", err) + } + + // Iterate and display the APIs + infoRestAPImap := make(map[string]string, len(result.Items)) + for _, api := range result.Items { + infoRestAPImap[aws.ToString(api.Name)] = aws.ToString(api.Id) + } + return infoRestAPImap, nil +} + +// GetAPIGatewayAPIOutput function gets results from apigatewayv2 api. +// GetApis Apigateway API is used to retrieve the HTTP and WEBSOCKET specified info. This returns a map with the names and ids of relevant APIs configured +func GetAPIGatewayAPIOutput(svcHttpApi *apigatewayv2.Client) (map[string]string, error) { + input := &apigatewayv2.GetApisInput{} + + ctx, cancel := getContextWithTimeout(DefaultApiTimeout) + defer cancel() + result, err := svcHttpApi.GetApis(ctx, input) + + if err != nil { + return nil, fmt.Errorf("error retrieving GetApis %w", err) + } + + // Iterate and display the APIs + infoAPImap := make(map[string]string, len(result.Items)) + for _, api := range result.Items { + infoAPImap[aws.ToString(api.Name)] = aws.ToString(api.ApiId) + } + return infoAPImap, nil +} + // GetMetricDataResults function uses MetricDataQueries to get metric data output. func GetMetricDataResults(metricDataQueries []types.MetricDataQuery, svc cloudwatch.GetMetricDataAPIClient, startTime time.Time, endTime time.Time) ([]types.MetricDataResult, error) { maxNumberOfMetricsRetrieved := 500 diff --git a/x-pack/metricbeat/module/awsfargate/fields.go b/x-pack/metricbeat/module/awsfargate/fields.go index e7a68db138f..fd22574c8a5 100644 --- a/x-pack/metricbeat/module/awsfargate/fields.go +++ b/x-pack/metricbeat/module/awsfargate/fields.go @@ -19,5 +19,5 @@ func init() { // AssetAwsfargate returns asset data. // This is the base64 encoded zlib format compressed contents of module/awsfargate. func AssetAwsfargate() string { - return "eJzsWt9v2zYQfs9fcQgGdGsb56XYgx8KeG6KBVjSIEnXR+VMnW3OEqmSVF132/8+kKJsWT9sWU4yJ4gfLfLu++6++ySLPoEZLfqAcz1GNUFDRwCGm4j6cDz4cgMfs2+PjwBC0kzxxHAp+vD+CADgbrXvDmIZphEBk1FEzGiw2/1FiMkozjSMlYzBoJ7ZbzBEg0AiTCQXpncEoCgi1NSHERk8AhhzikLdd7lOQGBMJaj2YxYJ9WGiZJr4b4rbiluZFAa5INWLcESR7jEZBxjjDylwrgNiOmBRqg2p5dY8/IwWc6nCwvdr5Tgb3sAw2+pydUqdLzlZi7ADgmWEzhhsa05QiS7ZXVsH15ed84Y05oLbgCdjjHm06IxiFQlKkfbA9I2U5rJ7aQqgfKhsdQWbXR1og0ZXchVVXsn0vnAB4G4V5i6nq8FMV8NopmhgTopAM4UJhdl4FufWAXcx4Ke/z4Y3wfDT5e3g/PLsOrg4ux18GNwOgs/X58Gf7/49tWtPs7W1w51/ykOef8pTu9aubLaC0mRsakKlPMX5hJ+veJKQkr/UpnOl2yfXsumVIHkKHpIwfMzXvGanHO9LFwGGy/lfRQdkSmrt0GhAEea11G9hPuVsCvQ1xUiDkYWhcDV6A69OX8Gbwtc87DXXKyTNFYVOcqm+P1a3UwIfG7LYMJbKKdmVOBOtG11b9w0IZ0LOxYPgc5G7o4splmoRTFGFQcRjbmrRaYYRhcE4klhe0ALi76hCuHCJ4A+bowNOlqS1yMq2VIF0nQrDY4Lh1efcfnql9XXjX8w9IyUo6iWszL1VgVwGqWI0fUhIMRJ1K7aU0X6uss04IZBjcKS48OBAJ8ioTKxCQUgVPwEeYHFixH9QCKOFU4pI4xEpu8E2kklFlTZW2BrOZuVpW1GNpJh0I2ARuNjt668X2tDBlV4ajDybrAm20h7qNiqHKKUNfPaVlGf9kJLyQF2KZiCpJnVodffVttC2DYKDf4jaqXDYVzGO6aNYUJu6u9n432t+u5zQVONkK9yD0EkJcxdZ5KxCrmdcdnuO+cD1DM5PP3V9iFGE5afLzdlblmfAWBqnERoKXQ4NYaq4mLjKRHycTdeUVs/ydY1qAF8kIJO6QYJtw9SShv1cLvvYmcgK7mhhqDPgXLibgrQk9ZsN4Qjtx0etXnvtTGfn+rNUKRLG9yGx9kZMijoBF27RpL5xRoG18gdGmnmCu2cYmSeG80+g6GtK2ui31pcFCpnhru9hDnyO3Dwy6hwn6MTW2SKw0uACvqaUUvZL0vPaiYvbXt+n+yayst0s6ZLURg9svhtvuZmElChi1uf68GvvXTe37CDxpUoUr53Ae3Vwl+RZWHh3Jofq4ZaRIfEEbdz34sXHX3y8hsjuPp7p6cCMfLvIlwJP4xjV4nGexlE8J1e3P4BkQgrtlmfl7u4JPW/W07T5QmNerP7F6muI7G717kXQgTh9xVAbVL5+ttbtdY8/Luv4sqd8hL+eXI7+otqXatmFYGPPC2uCGJOEi4nfcPz6uFuFr3Hua+UP/LP/6lizc3XS/mrPXgUuDKkxsurorf5hEVcPMuE+bq9DF9m4V39WAFLAFy5COa8rdpt7Z5O84ZHvQdl4NgfK8SaEswOAe0U4821u1YwlfMW/oaFgLtXMCleT6W12mAY+27i04OGxgMcCmkwrDmPkUY/JtPZ9dktb3AruI/IIXBJSzW/p6/4wAI9QOm+OLn0zOqWbLXAPD7i+uVlzqyqApzj6npEi7f7AY9UImv+oOaIpcqg/m4G2QoRWZzQ7cLloYJFH33jm5I53HkIxn9250X1oJsbvB6CYC/yes2k4xoND13kN7hyvIGMtufe6x8VIppVfMM1y2PZU1kTi/uzyn1r25WdvLpiM7S3HxWweiFDJJKn9ZdD++LodIp9phSxBNqO6OcmxkVJS7XeyvglaFt7ejttD8gseoVybMVWFLFPzouQXJT8JJf8XAAD//5wYkBM=" + return "eJzsWl1v27gSfc+vGAQX6L1t49wFin3wQwGvm2IDbNIgSbePypga21xLpEpScd3d/e8LUpIl68OWZcfrFNGjRM6cMx+HEqkzmNGiDzjXY1QTNHQCYLgJqA+ngy938DG5e3oC4JNmikeGS9GH9ycAAA/5vAcIpR8HBEwGATGjwU5PH0JIRnGmYaxkCAb1zN5BHw0CCT+SXJjeCYCigFBTH0Zk8ARgzCnwdd/5OgOBIZWg2sssIurDRMk4Su8UpxWnMikMckGqF+CIAt1jMvQwxO9S4Fx7xLTHglgbUsupmfkZLeZS+YX7K+G4GN7BMJnqfHVynQ05W7GwBYKlhc4YbGrOUIku3l1aB7fXnf36NOaCW4NnYwx5sOiMIrcEJUs7YHokpbnsHpoCqNRUMrqCzY72tEGjK76KVV7x9L7wAOAhN/OQ0dVgpnkzmikamJMi0ExhRH7SnsW+dcCdDfjPnxfDO2/46fp+cHl9cetdXdwPPgzuB97n20vv93d/n9ux58nY2ubOrnKTZ1e5a1fSlfSWV+qMdUmohKfYn/DfGx5FpOT/at250O3ia5n0ipHMBfdJGD7mK1qzlY/3pYcAw2X/59YBmZJaOzQaUPhZLPVbmE85mwJ9jTHQYGShKVyM3sCr81fwpnCb+73mePmkuSLflVys98fqfkqQ2obENoylcpXsQpwUrWtdG/c1CGdCzsWT4HOWu6MLKZRq4U1R+V7AQ25q0WmGAfneOJBYHtAC4q+ofLhyjuA366MDThbFtcjKslSBdBsLw0OC4c3nTH56pfF17V/0PSMlKOhFrMy9VYCcB6lCNH2ISDESdSM2hNFeN8lknBDIMThSXKTgQEfI6C3Qt0iR1uQDakB4xCAmq3ZzIgH/d034U5l+haiQKnwGbMHixIB/Jx9GC1dPIg5HpOwEm24mFek9xcRwNit3bh6QQIpJN5oWp7NdZteMSC+0oaNLkDQYpGySVNl8JFB3TUFK+BjLcg3rw5RnGpunLM+UjnPRDCTWpI4tO2lOLLT9CKQjeYx1WGF6mOpz8TiINObMmtG4bvzXM3O/1IRY42TnkktIHUXNlZg9XYll3H2uZ1x2e+/7wPUMLs8/dX3pU4Tlt/H13lsGccBYHMYBGvKdDw1+rLiYuPgFfJz085Tyb5+6dDaALxKQUV1TwqbGbEnDXtfLbHcmksMdLQx1BpyV9zojLUn9Yk04QrvxUfk24dZ0to4/i5UiYdI8RFYqiUlRV8CFVwdSj5yRZxePJ0aaKIdbpYzMHMPlJ1D0NSZt9Fur8QKFTHDX5zADPkduDow6wwk6snG2CGxpcAFfY4op+fJOeW3FxU2vz9O+ieTinDhdklqrgc0r+4Ylx6dIEbM614efe++6qWWHEl9WieK1HbhXBXdOfggJ787kWDXcMjIknqGMp7l40fEXHa8hsr2OJ/V0ZEK+uciXBR6HIarFYd7G7efIj6Pq9gNIRqTQTvmh1N29oWfJep4yX0jMi9S/SH0Nke2l3m0XHYnSVwS1ocpXzyK7bfekx4sdN3vKvzysOpejP6h26y154K3NeWGMF2IUcTFJJ5y+Pu0W4Vucp7FKf5BI/m2yYufipNOnPfsUuDCkxsiqrZf/kRJWD35hH8vr0Fk2boPQFoAU8IULX87rgt1m7WwqbzjwGpS0Z7OhDG9EODsCuDeEszTNrZKxhK/4Ixry5lLNbOFqMr31CtPAZxOXFjxSLJBiAU2mFYcx8qDHZFy7691SFjeC+4g8AOeEVPNeft0PFnCA0KXi6Nw3o1O6WQJ30IDbu7sVtaoCeI6tnzJSpN0PT7YaQfPvNadTRQ71JzjQthCh1UnOFlyuGlhk1vdwfuWOip6irj67M6h9VFaI346grq7wW8bGxWx9JR1lN9TgzvAKMla4e697XIxkXPnOaS6HTe9uTST2J6p/1bIvv6FzwWRoFyZns7khfCWjqPb7of2BeTtEqaccWYRsRnV9kmEjpaTa7Sx/HbTEvF2020NKBxwgXOsxVQtZxualkl8q+VlU8j8BAAD//26nA2k=" } diff --git a/x-pack/metricbeat/module/awsfargate/task_stats/_meta/fields.yml b/x-pack/metricbeat/module/awsfargate/task_stats/_meta/fields.yml index b79be57b70c..e9d6a6a5be9 100644 --- a/x-pack/metricbeat/module/awsfargate/task_stats/_meta/fields.yml +++ b/x-pack/metricbeat/module/awsfargate/task_stats/_meta/fields.yml @@ -34,12 +34,12 @@ type: scaled_float format: percent description: > - Percentage of time in kernel space. + Percentage of time in kernel space, expressed as a value between 0 and 1. - name: kernel.norm.pct type: scaled_float format: percent description: > - Percentage of time in kernel space normalized by the number of CPU cores. + Percentage of time in kernel space normalized by the number of CPU cores, expressed as a value between 0 and 1. - name: kernel.ticks type: long description: > @@ -48,12 +48,12 @@ type: scaled_float format: percent description: > - Percentage of total CPU time in the system. + Percentage of total CPU time in the system, expressed as a value between 0 and 1. - name: system.norm.pct type: scaled_float format: percent description: > - Percentage of total CPU time in the system normalized by the number of CPU cores. + Percentage of total CPU time in the system normalized by the number of CPU cores, expressed as a value between 0 and 1. - name: system.ticks type: long description: > @@ -62,12 +62,12 @@ type: scaled_float format: percent description: > - Percentage of time in user space. + Percentage of time in user space, expressed as a value between 0 and 1. - name: user.norm.pct type: scaled_float format: percent description: > - Percentage of time in user space normalized by the number of CPU cores. + Percentage of time in user space normalized by the number of CPU cores, expressed as a value between 0 and 1. - name: user.ticks type: long description: > @@ -76,12 +76,12 @@ type: scaled_float format: percent description: > - Total CPU usage. + Total CPU usage, expressed as a value between 0 and 1. - name: total.norm.pct type: scaled_float format: percent description: > - Total CPU usage normalized by the number of CPU cores. + Total CPU usage normalized by the number of CPU cores, expressed as a value between 0 and 1. - name: diskio type: group description: Disk I/O metrics. @@ -244,7 +244,7 @@ type: scaled_float format: percent description: > - Memory resident set size percentage. + Memory resident set size percentage, expressed as a value between 0 and 1. - name: usage type: group description: > diff --git a/x-pack/metricbeat/module/containerd/cpu/_meta/fields.yml b/x-pack/metricbeat/module/containerd/cpu/_meta/fields.yml index e2a99f3d432..3e3a1d24caa 100644 --- a/x-pack/metricbeat/module/containerd/cpu/_meta/fields.yml +++ b/x-pack/metricbeat/module/containerd/cpu/_meta/fields.yml @@ -36,17 +36,17 @@ type: scaled_float format: percent description: > - Percentage of total CPU time normalized by the number of CPU cores + Percentage of total CPU time normalized by the number of CPU cores, expressed as a value between 0 and 1. - name: kernel.pct type: scaled_float format: percent description: > - Percentage of time in kernel space normalized by the number of CPU cores. + Percentage of time in kernel space normalized by the number of CPU cores, expressed as a value between 0 and 1. - name: user.pct type: scaled_float format: percent description: > - Percentage of time in user space normalized by the number of CPU cores. + Percentage of time in user space normalized by the number of CPU cores, expressed as a value between 0 and 1. - name: cpu.*.ns type: object object_type: double diff --git a/x-pack/metricbeat/module/containerd/fields.go b/x-pack/metricbeat/module/containerd/fields.go index 9cc2c58832f..a7506778b51 100644 --- a/x-pack/metricbeat/module/containerd/fields.go +++ b/x-pack/metricbeat/module/containerd/fields.go @@ -19,5 +19,5 @@ func init() { // AssetContainerd returns asset data. // This is the base64 encoded zlib format compressed contents of module/containerd. func AssetContainerd() string { - return "eJzUmE1v8zYMx+/5FEQvAwY0u+cwYC1QoBi6Fdt6LmSZTrXoxdBL0/TTP5Bsp44t20qeBnF0KAq/kD/Sf5JSbmGDuxVQJS1hEnW+ALDMclzBzf3+4s0CQCNHYnAFGVqyAMjRUM1Ky5Rcwe8LAICvF8BYYg1QxTlSizkUWolDLwVDnptVePEWJBHYwfDL7kpcwVorV9ZXIm79epSF0oL4y0Bk5Z8Zy6gBkilnW6Z/MaCdlEyuvy6aZW2pTdUm839NSSju7zRwG9xt1R54BLGTob7FxlfGN0z1/LST4Ff3eyR4v+OKbuDxt79BoNWM7qOORd4myvGdHYQ+Fv4EhF9/EYGgCsgCUMR641gj6ZqOZyPB6R+UOuE48XL0dg3kTnsV2DcEzopA5P/fq6JjIpahNqwqTe9ew8uVXEduTiCHXDmRofZsJ0G3ZLWzeDRgVVXDLycEcOdfDfDHsTfcW81sXHs/KYJg+OpUcBr1XGTg6S3K05RgnBBE787XEPzkuE5V+KauStRhAl6tOkKTaD7CUTLpDVFauskRmjao/3HSMoFw//wSm5tDc3hsnpqdsSiWVlnCo2rOlct4t+lNpPE/bw2cQV3tgIKPQB3wTYnSApNgkCqZH8TwReYMWac32ynVb1BL7EY4bnLMbNu0jAkNxhPYrAQ9+uVT92eIoEoLSCJVnb3BkH3+rzngF6+f9HBjEr6qeG1dNUcFvCypHQzaUMIxfy24IrGHmmZZoqYoY08k0D9XL3to3xJDDPtKl94DZ5+YQ7YL/VLu54R/iCodadOHNTu3CH1cTNZwEM5NaXEuRyt1pmGGJv4tQdLSLX9dRmupClJl/2M0B9WN14lqSwjRU/bqy0dp35gJgL3hLVCog33etx2Be5bHxvRW6Q2Ta4M2opNJjYzrYyJxT4GzIQCDtrFD1hgf3dp0v/Lgpm1sw5a0y6jSCBoNy/22wvMZ9jlARqhl7/jAeM/Z+Qgrn1AwjpWROBol9C2+3zkHVPA2hsPkJXLVeJ3M1nG7wwnfL6En1EoKvxd229jU9lKQjwscVZ7IR0Md8jHcfC89XOpK4FzRcMitqYcaCSTv686X3IP2MpHegjC+pMpF05OU5QSgB8I4BCeoh1E4E2yY4oxabCWqgogXbvQgdnLl1oei6yvdBjy5gi9VBjWonVs11FzFrIviUJ4jVWG2pKv8k2vi3y0pr7AiKuzZ10PAnF01BKp510Jblp1KWPwIAAD//wv4XH8=" + return "eJzUmMtu6zYQhvd+isHZFCgat916UaAJECAo0gZtsw4oauSwpkiBlzjO0xdDSY4sURe7dSxrcXBgWTPf/P5nOMoNbHC3Aq6VY0KhSRcATjiJK/h2t//w2wLAoERmcQUJOrYASNFyIwontFrBLwsAgM8HwDrmLHAtJXKHKWRG54dZMoEytavw4A0olmMLgy63K3AFa6N9UX0SSUvXg8q0yRl9DEyV+YV1gltgifauEfo7C8YrJdT680O7rCI1qZpk9K8tGMf9nRpug7ut3gMPILYU6kascyVyI3QnT1MEutq/x4Tst1LzDTz8+Afk6Izg+6pjlTeJUnwTB6UPlT8CQdfvLEfQGSQBKBK9TmyQtUPH1ZiQ9FfOfe4lIztSXAupN+QC94ogRRaI6P97V7RCxBRqwurCdu7VvFKrdeTmCHLQyucJGmI7Cbphq53DowHLrup/eEIBt/RogD+OvebeGuHi3vuPJgiBr84Fp1HPxQZE71Cd5gTr85yZ3fkGAp0c1+kKGuq6QBNOwKt1RxgS9Y9wlE06hygv/OgROu2g/tMrJ3KEu6fn2LnZdw4Pnad2Zx3mS6cdk1E3p9onsj30RmT8m6KBt2jKDSjkCNQB3xaoHAgFFrlW6UENn2TesvX0YTvm+g0ahe0Kh0MOhW2GVjGjwbCA9TXBj3SRdL+FCkpZQDGlK/V6Syb9r7ngZ/LP9HJjFr6qel3VNUcVvCy46y3aciYxfcmkZrEv1cOyQMNRxb4xgf6pfJigaSSGGvadriiDFB+YQrIL81Ltzwn6EtcG7Q+A74VBazEFZoHBG5MeaYhtERX8FGbIz8uR1p6bEFS+UBUchNerr5GD2n6mYoQT4Qul4IVffr+Mtm8phU7+wahS5Y2XkQafIATV0mlp0sK9ChsAO/tCjrk+WC3/t7fuTuShzWCrzUaotUUXcdOok4ZdNCLcY+CsCcCiq+OwNca3BWPbv3Lvnji0I05abEoZwaAVKW0yxGfFRw8Z40684b2QnWTnIyxzQiYklkHiaJzx1/iKdQ6okG0IR6hLaFVnHVXruIV0JPdzmAmVk8KfKNtjbGyjzdn7Bd6OHtl7TR306B++lz6Cqk6QUvPwXl1R9w0SmLxKnk/cg/EyIm/GhFxy7aPyTFJ5AtA9ExJCEjT9KFLkop/ijF5sCFVCxBs3+u53cudW72HX17o1+OQOvlQbVKBubt1QcWWzbopDew50hd2ytvNP7om/tqy4wo4osWffDwFzdt0QqObdC01btjph8W8AAAD//wJ7gxo=" } diff --git a/x-pack/osquerybeat/main_test.go b/x-pack/osquerybeat/main_test.go index f9ed09ee1a8..30a9b88efb6 100644 --- a/x-pack/osquerybeat/main_test.go +++ b/x-pack/osquerybeat/main_test.go @@ -10,6 +10,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/x-pack/osquerybeat/cmd" ) @@ -20,11 +21,14 @@ func init() { systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(_ *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/x-pack/packetbeat/main_test.go b/x-pack/packetbeat/main_test.go index 234d68f8169..a332f5c9935 100644 --- a/x-pack/packetbeat/main_test.go +++ b/x-pack/packetbeat/main_test.go @@ -8,6 +8,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" "github.com/elastic/beats/v7/x-pack/packetbeat/cmd" ) @@ -18,11 +19,14 @@ func init() { testing.Init() systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(t *testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/x-pack/winlogbeat/cmd/export.go b/x-pack/winlogbeat/cmd/export.go index 54f9f02e18c..facd2fb0e92 100644 --- a/x-pack/winlogbeat/cmd/export.go +++ b/x-pack/winlogbeat/cmd/export.go @@ -10,6 +10,7 @@ import ( "github.com/spf13/cobra" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/cmd/instance" "github.com/elastic/beats/v7/winlogbeat/module" libversion "github.com/elastic/elastic-agent-libs/version" @@ -48,7 +49,9 @@ func GenExportPipelineCmd(settings instance.Settings) *cobra.Command { } genExportPipelineCmd.Flags().String("es.version", settings.Version, "Elasticsearch version (required)") + cfgfile.AddAllowedBackwardsCompatibleFlag("es.version") genExportPipelineCmd.Flags().String("dir", "", "Specify directory for exporting pipelines. Default is current directory.") + cfgfile.AddAllowedBackwardsCompatibleFlag("dir") return genExportPipelineCmd } diff --git a/x-pack/winlogbeat/main_test.go b/x-pack/winlogbeat/main_test.go index 2b8547a8172..b4c0f3b124a 100644 --- a/x-pack/winlogbeat/main_test.go +++ b/x-pack/winlogbeat/main_test.go @@ -8,6 +8,7 @@ import ( "flag" "testing" + "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/beats/v7/libbeat/tests/system/template" "github.com/elastic/beats/v7/x-pack/winlogbeat/cmd" ) @@ -18,11 +19,14 @@ func init() { testing.Init() systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } // Test started when the test binary is started. Only calls main. func TestSystem(*testing.T) { + cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { main() } diff --git a/x-pack/winlogbeat/module/testing.go b/x-pack/winlogbeat/module/testing.go index 3dc628b80a9..f1d38fceac8 100644 --- a/x-pack/winlogbeat/module/testing.go +++ b/x-pack/winlogbeat/module/testing.go @@ -5,6 +5,7 @@ package module import ( + "context" "encoding/json" "flag" "fmt" @@ -105,7 +106,9 @@ func testIngestPipeline(t *testing.T, pipeline, pattern string, p *params) { } defer conn.Close() - err = conn.Connect() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err = conn.Connect(ctx) if err != nil { t.Fatalf("unexpected error making connection: %v", err) } diff --git a/x-pack/winlogbeat/module/wintest/docker_test.go b/x-pack/winlogbeat/module/wintest/docker_test.go index e45826f3b08..db7ab341a27 100644 --- a/x-pack/winlogbeat/module/wintest/docker_test.go +++ b/x-pack/winlogbeat/module/wintest/docker_test.go @@ -82,7 +82,9 @@ func TestDocker(t *testing.T) { } defer conn.Close() - err = conn.Connect() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err = conn.Connect(ctx) if err != nil { t.Fatalf("unexpected error making connection: %v", err) } diff --git a/x-pack/winlogbeat/module/wintest/simulate_test.go b/x-pack/winlogbeat/module/wintest/simulate_test.go index 1bda1d5fb17..b54d12f1d96 100644 --- a/x-pack/winlogbeat/module/wintest/simulate_test.go +++ b/x-pack/winlogbeat/module/wintest/simulate_test.go @@ -11,6 +11,7 @@ package wintest_test import ( + "context" "encoding/json" "fmt" "os" @@ -72,7 +73,9 @@ func TestSimulate(t *testing.T) { } defer conn.Close() - err = conn.Connect() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err = conn.Connect(ctx) if err != nil { t.Fatalf("unexpected error making connection: %v", err) }