diff --git a/.buildkite/auditbeat/auditbeat-pipeline.yml b/.buildkite/auditbeat/auditbeat-pipeline.yml index 62c0d86e259a..798939bbf324 100644 --- a/.buildkite/auditbeat/auditbeat-pipeline.yml +++ b/.buildkite/auditbeat/auditbeat-pipeline.yml @@ -134,4 +134,4 @@ steps: steps: - label: Package pipeline - commands: ".buildkite/auditbeat/scripts/package-step.sh | buildkite-agent pipeline upload" + commands: ".buildkite/auditbeat/scripts/package-step.sh" diff --git a/.buildkite/auditbeat/scripts/package-step.sh b/.buildkite/auditbeat/scripts/package-step.sh index 6c113f4e3cae..cb06895879ac 100755 --- a/.buildkite/auditbeat/scripts/package-step.sh +++ b/.buildkite/auditbeat/scripts/package-step.sh @@ -13,7 +13,7 @@ changeset="^auditbeat/ ^\.buildkite/auditbeat/" if are_files_changed "$changeset"; then - cat <<-YAML + bk_pipeline=$(cat <<-YAML steps: - label: ":ubuntu: Packaging Linux X86" key: "package-linux-x86" @@ -43,6 +43,8 @@ if are_files_changed "$changeset"; then imagePrefix: "${IMAGE_UBUNTU_ARM_64}" instanceType: "t4g.large" YAML +) + echo "${bk_pipeline}" | buildkite-agent pipeline upload else buildkite-agent annotate "No required files changed. Skipped packaging" --style 'warning' --context 'ctx-warning' exit 0 diff --git a/.buildkite/filebeat/filebeat-pipeline.yml b/.buildkite/filebeat/filebeat-pipeline.yml index f86ad7da11d4..e811d286953c 100644 --- a/.buildkite/filebeat/filebeat-pipeline.yml +++ b/.buildkite/filebeat/filebeat-pipeline.yml @@ -137,4 +137,4 @@ steps: steps: - label: Package pipeline - commands: ".buildkite/filebeat/scripts/package-step.sh | buildkite-agent pipeline upload" + commands: ".buildkite/filebeat/scripts/package-step.sh" diff --git a/.buildkite/filebeat/scripts/package-step.sh b/.buildkite/filebeat/scripts/package-step.sh index c2a1bf41a01d..f8fa02db81d5 100755 --- a/.buildkite/filebeat/scripts/package-step.sh +++ b/.buildkite/filebeat/scripts/package-step.sh @@ -13,7 +13,7 @@ changeset="^filebeat/ ^\.buildkite/filebeat/" if are_files_changed "$changeset"; then - cat <<-YAML + bk_pipeline=$(cat <<-YAML steps: - label: ":ubuntu: Packaging Linux X86" key: "package-linux-x86" @@ -43,6 +43,8 @@ if are_files_changed "$changeset"; then imagePrefix: "${IMAGE_UBUNTU_ARM_64}" instanceType: "t4g.large" YAML +) + echo "${bk_pipeline}" | buildkite-agent pipeline upload else buildkite-agent annotate "No required files changed. Skipped packaging" --style 'warning' --context 'ctx-warning' exit 0 diff --git a/.buildkite/heartbeat/heartbeat-pipeline.yml b/.buildkite/heartbeat/heartbeat-pipeline.yml index 93cbfc32008f..bf645a2b295b 100644 --- a/.buildkite/heartbeat/heartbeat-pipeline.yml +++ b/.buildkite/heartbeat/heartbeat-pipeline.yml @@ -139,4 +139,4 @@ steps: steps: - label: Package pipeline - commands: ".buildkite/heartbeat/scripts/package-step.sh | buildkite-agent pipeline upload" + commands: ".buildkite/heartbeat/scripts/package-step.sh" diff --git a/.buildkite/heartbeat/scripts/package-step.sh b/.buildkite/heartbeat/scripts/package-step.sh index 05ef69b131ad..03790edfa5f9 100755 --- a/.buildkite/heartbeat/scripts/package-step.sh +++ b/.buildkite/heartbeat/scripts/package-step.sh @@ -13,7 +13,7 @@ changeset="^heartbeat/ ^\.buildkite/heartbeat/" if are_files_changed "$changeset"; then - cat <<-YAML + bk_pipeline=$(cat <<-YAML steps: - label: ":ubuntu: Packaging Linux X86" key: "package-linux-x86" @@ -43,6 +43,8 @@ if are_files_changed "$changeset"; then imagePrefix: "${IMAGE_UBUNTU_ARM_64}" instanceType: "t4g.large" YAML +) + echo "${bk_pipeline}" | buildkite-agent pipeline upload else buildkite-agent annotate "No required files changed. Skipped packaging" --style 'warning' --context 'ctx-warning' exit 0 diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command index cf66e4edf6e0..4dda7a884dda 100644 --- a/.buildkite/hooks/pre-command +++ b/.buildkite/hooks/pre-command @@ -12,8 +12,7 @@ if [[ "$BUILDKITE_PIPELINE_SLUG" == "filebeat" || "$BUILDKITE_PIPELINE_SLUG" == fi fi - -if [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-metricbeat" || "$BUILDKITE_PIPELINE_SLUG" == "beats-libbeat" || "$BUILDKITE_PIPELINE_SLUG" == "beats-packetbeat" ]]; then +if [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-metricbeat" || "$BUILDKITE_PIPELINE_SLUG" == "beats-libbeat" || "$BUILDKITE_PIPELINE_SLUG" == "beats-packetbeat" || "$BUILDKITE_PIPELINE_SLUG" == "beats-winlogbeat" ]]; then source .buildkite/scripts/setenv.sh if [[ "${BUILDKITE_COMMAND}" =~ ^buildkite-agent ]]; then echo "Skipped pre-command when running the Upload pipeline" diff --git a/.buildkite/libbeat/pipeline.libbeat.yml b/.buildkite/libbeat/pipeline.libbeat.yml index cfaf78183609..8d57db531b2f 100644 --- a/.buildkite/libbeat/pipeline.libbeat.yml +++ b/.buildkite/libbeat/pipeline.libbeat.yml @@ -39,7 +39,3 @@ steps: - label: ":linux: Load dynamic Libbeat pipeline" key: "libbeat-pipeline" command: ".buildkite/scripts/generate_libbeat_pipeline.sh" - agents: - provider: "gcp" - image: "${IMAGE_UBUNTU_X86_64}" - machineType: "${GCP_DEFAULT_MACHINE_TYPE}" diff --git a/.buildkite/metricbeat/pipeline.yml b/.buildkite/metricbeat/pipeline.yml index d882edaaa0a1..59279df2d549 100644 --- a/.buildkite/metricbeat/pipeline.yml +++ b/.buildkite/metricbeat/pipeline.yml @@ -8,7 +8,6 @@ env: IMAGE_WIN_2019: "family/core-windows-2019" IMAGE_WIN_2022: "family/core-windows-2022" IMAGE_MACOS_X86_64: "generic-13-ventura-x64" - GO_AGENT_IMAGE: "golang:${GO_VERSION}" BEATS_PROJECT_NAME: "metricbeat" GCP_DEFAULT_MACHINE_TYPE: "c2d-highcpu-8" GCP_HI_PERF_MACHINE_TYPE: "c2d-highcpu-16" @@ -46,7 +45,3 @@ steps: - label: ":linux: Load dynamic metricbeat pipeline" key: "metricbeat-pipeline" command: ".buildkite/scripts/generate_metricbeat_pipeline.sh" - agents: - provider: "gcp" - image: "${IMAGE_UBUNTU_X86_64}" - machineType: "${GCP_DEFAULT_MACHINE_TYPE}" diff --git a/.buildkite/packetbeat/pipeline.packetbeat.yml b/.buildkite/packetbeat/pipeline.packetbeat.yml index 0dbb87fc1d10..648548db7b6e 100644 --- a/.buildkite/packetbeat/pipeline.packetbeat.yml +++ b/.buildkite/packetbeat/pipeline.packetbeat.yml @@ -53,6 +53,3 @@ steps: - label: ":linux: Load dynamic packetbeat pipeline" key: "packetbeat-pipeline" command: ".buildkite/scripts/generate_packetbeat_pipeline.sh" - agents: - provider: "gcp" - image: "${IMAGE_UBUNTU_X86_64}" diff --git a/.buildkite/scripts/common.sh b/.buildkite/scripts/common.sh index f51097091f4a..d6566de6e875 100755 --- a/.buildkite/scripts/common.sh +++ b/.buildkite/scripts/common.sh @@ -7,11 +7,12 @@ platform_type="$(uname)" platform_type_lowercase=$(echo "$platform_type" | tr '[:upper:]' '[:lower:]') arch_type="$(uname -m)" GITHUB_PR_TRIGGER_COMMENT=${GITHUB_PR_TRIGGER_COMMENT:-""} +GITHUB_PR_LABELS=${GITHUB_PR_LABELS:-""} ONLY_DOCS=${ONLY_DOCS:-"true"} - [ -z "${runLibbeat+x}" ] && runLibbeat="$(buildkite-agent meta-data get runLibbeat --default ${runLibbeat:-"false"})" [ -z "${runMetricbeat+x}" ] && runMetricbeat="$(buildkite-agent meta-data get runMetricbeat --default ${runMetricbeat:-"false"})" [ -z "${runPacketbeat+x}" ] && runPacketbeat="$(buildkite-agent meta-data get runPacketbeat --default ${runPacketbeat:-"false"})" +[ -z "${runWinlogbeat+x}" ] && runWinlogbeat="$(buildkite-agent meta-data get runWinlogbeat --default ${runWinlogbeat:-"false"})" [ -z "${runLibBeatArmTest+x}" ] && runLibBeatArmTest="$(buildkite-agent meta-data get runLibbeat --default ${runLibbeat:-"false"})" [ -z "${runPacketbeatArmTest+x}" ] && runPacketbeatArmTest="$(buildkite-agent meta-data get runPacketbeatArmTest --default ${runPacketbeatArmTest:-"false"})" [ -z "${runMetricbeatMacOsTests+x}" ] && runMetricbeatMacOsTests="$(buildkite-agent meta-data get runMetricbeatMacOsTests --default ${runMetricbeatMacOsTests:-"false"})" @@ -29,6 +30,10 @@ packetbeat_changeset=( "^packetbeat/.*" ) +winlogbeat_changeset=( + "^winlogbeat/.*" + ) + oss_changeset=( "^go.mod" "^pytest.ini" @@ -231,18 +236,23 @@ are_changed_only_paths() { are_conditions_met_mandatory_tests() { if are_paths_changed "${oss_changeset[@]}" || are_paths_changed "${ci_changeset[@]}" ]]; then # from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/metricbeat/Jenkinsfile.yml#L3-L12 - if [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-metricbeat" ]]; then - if are_paths_changed "${metricbeat_changeset[@]}" || [[ "${GITHUB_PR_TRIGGER_COMMENT}" == "/test metricbeat" || "${GITHUB_PR_LABELS}" =~ Metricbeat || "${runMetricbeat}" == "true" ]]; then - return 0 - fi - elif [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-libbeat" ]]; then - if are_paths_changed "${libbeat_changeset[@]}" || [[ "${GITHUB_PR_TRIGGER_COMMENT}" == "/test libbeat" || "${GITHUB_PR_LABELS}" =~ libbeat || "${runLibbeat}" == "true" ]]; then - return 0 - fi - elif [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-packetbeat" ]]; then - if are_paths_changed "${packetbeat_changeset[@]}" || [[ "${GITHUB_PR_TRIGGER_COMMENT}" == "/test packetbeat" || "${GITHUB_PR_LABELS}" =~ Packetbeat || "${runPacketbeat}" == "true" ]]; then - return 0 - fi + return 0 + fi + if [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-metricbeat" ]]; then + if are_paths_changed "${metricbeat_changeset[@]}" || [[ "${GITHUB_PR_TRIGGER_COMMENT}" == "/test metricbeat" || "${GITHUB_PR_LABELS}" =~ Metricbeat || "${runMetricbeat}" == "true" ]]; then + return 0 + fi + elif [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-libbeat" ]]; then + if are_paths_changed "${libbeat_changeset[@]}" || [[ "${GITHUB_PR_TRIGGER_COMMENT}" == "/test libbeat" || "${GITHUB_PR_LABELS}" =~ libbeat || "${runLibbeat}" == "true" ]]; then + return 0 + fi + elif [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-packetbeat" ]]; then + if are_paths_changed "${packetbeat_changeset[@]}" || [[ "${GITHUB_PR_TRIGGER_COMMENT}" == "/test packetbeat" || "${GITHUB_PR_LABELS}" =~ Packetbeat || "${runPacketbeat}" == "true" ]]; then + return 0 + fi + elif [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-winlogbeat" ]]; then + if are_paths_changed "${winlogbeat_changeset[@]}" || [[ "${GITHUB_PR_TRIGGER_COMMENT}" == "/test winlogbeat" || "${GITHUB_PR_LABELS}" =~ Winlogbeat || "${runWinlogbeat}" == "true" ]]; then + return 0 fi fi return 1 @@ -280,14 +290,8 @@ are_conditions_met_macos_tests() { are_conditions_met_packaging() { if are_conditions_met_mandatory_tests; then #from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/Jenkinsfile#L145-L171 - if [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-metricbeat" ]]; then - if [[ "${BUILDKITE_TAG}" == "" || "${BUILDKITE_PULL_REQUEST}" != "" ]]; then # from https://github.com/elastic/beats/blob/c5e79a25d05d5bdfa9da4d187fe89523faa42afc/metricbeat/Jenkinsfile.yml#L101-L103 - return 0 - fi - elif [[ "$BUILDKITE_PIPELINE_SLUG" == "beats-packetbeat" ]]; then - if [[ "${BUILDKITE_TAG}" == "" || "${BUILDKITE_PULL_REQUEST}" != "" ]]; then - return 0 - fi + if [[ "${BUILDKITE_TAG}" == "" || "${BUILDKITE_PULL_REQUEST}" != "" ]]; then + return 0 fi fi return 1 diff --git a/.buildkite/scripts/generate_winlogbeat_pipeline.sh b/.buildkite/scripts/generate_winlogbeat_pipeline.sh new file mode 100755 index 000000000000..1eb1b459c921 --- /dev/null +++ b/.buildkite/scripts/generate_winlogbeat_pipeline.sh @@ -0,0 +1,109 @@ +#!/usr/bin/env bash + +source .buildkite/scripts/common.sh + +set -euo pipefail + +pipelineName="pipeline.winlogbeat-dynamic.yml" + +echo "Add the mandatory and extended tests without additional conditions into the pipeline" +if are_conditions_met_mandatory_tests; then + cat > $pipelineName <<- YAML + +steps: + + - group: "Mandatory Tests" + key: "mandatory-tests" + steps: + + - label: ":negative_squared_cross_mark: Cross compile" + key: "mandatory-cross-compile" + command: ".buildkite/scripts/crosscompile.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "${GCP_DEFAULT_MACHINE_TYPE}" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" + + - label: ":windows: Windows 2016/2019/2022 Unit Tests - {{matrix.image}}" + command: ".buildkite/scripts/win_unit_tests.ps1" + key: "mandatory-win-unit-tests" + agents: + provider: "gcp" + image: "{{matrix.image}}" + machine_type: "${GCP_WIN_MACHINE_TYPE}" + disk_size: 100 + disk_type: "pd-ssd" + matrix: + setup: + image: + - "${IMAGE_WIN_2016}" + - "${IMAGE_WIN_2019}" + - "${IMAGE_WIN_2022}" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" + +# echo "Add the extended windows tests into the pipeline" +# TODO: ADD conditions from the main pipeline + + - group: "Extended Windows Tests" + key: "extended-win-tests" + steps: + + - label: ":windows: Windows 10 Unit Tests" + key: "extended-win-10-unit-tests" + command: ".buildkite/scripts/win_unit_tests.ps1" + agents: + provider: "gcp" + image: "${IMAGE_WIN_10}" + machine_type: "${GCP_WIN_MACHINE_TYPE}" + disk_size: 100 + disk_type: "pd-ssd" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" + + - label: ":windows: Windows 11 Unit Tests" + key: "extended-win-11-unit-tests" + command: ".buildkite/scripts/win_unit_tests.ps1" + agents: + provider: "gcp" + image: "${IMAGE_WIN_11}" + machine_type: "${GCP_WIN_MACHINE_TYPE}" + disk_size: 100 + disk_type: "pd-ssd" + artifact_paths: "${BEATS_PROJECT_NAME}/build/*.*" +YAML +else + echo "The conditions don't match to requirements for generating pipeline steps." + exit 0 +fi + +echo "Check and add the Packaging into the pipeline" +if are_conditions_met_packaging; then + cat >> $pipelineName <<- YAML + + - wait: ~ + depends_on: + - step: "mandatory-tests" + allow_failure: false + + - group: "Packaging" # TODO: check conditions for future the main pipeline migration: https://github.com/elastic/beats/pull/28589 + key: "packaging" + steps: + - label: ":linux: Packaging Linux" + key: "packaging-linux" + command: ".buildkite/scripts/packaging.sh" + agents: + provider: "gcp" + image: "${IMAGE_UBUNTU_X86_64}" + machineType: "${GCP_HI_PERF_MACHINE_TYPE}" + env: + PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" + + +YAML +fi + +echo "--- Printing dynamic steps" #TODO: remove if the pipeline is public +cat $pipelineName + +echo "--- Loading dynamic steps" +buildkite-agent pipeline upload $pipelineName diff --git a/.buildkite/scripts/win_unit_tests.ps1 b/.buildkite/scripts/win_unit_tests.ps1 index 34833d183ffa..da0ffc105a51 100644 --- a/.buildkite/scripts/win_unit_tests.ps1 +++ b/.buildkite/scripts/win_unit_tests.ps1 @@ -14,9 +14,12 @@ function withChoco { Import-Module "$env:ChocolateyInstall\helpers\chocolateyProfile.psm1" } function withGolang($version) { - Write-Host "-- Install golang $version --" - choco install -y golang --version=$version - refreshenv + $downloadPath = Join-Path $env:TEMP "go_installer.msi" + $goInstallerUrl = "https://golang.org/dl/go$version.windows-amd64.msi" + Invoke-WebRequest -Uri $goInstallerUrl -OutFile $downloadPath + Start-Process -FilePath "msiexec.exe" -ArgumentList "/i $downloadPath /quiet" -Wait + $goBinPath = "${env:ProgramFiles}\Go\bin" + $env:Path += ";$goBinPath" go version } function withPython($version) { diff --git a/.buildkite/winlogbeat/pipeline.winlogbeat.yml b/.buildkite/winlogbeat/pipeline.winlogbeat.yml index 34321b61161b..9cea6564ca8d 100644 --- a/.buildkite/winlogbeat/pipeline.winlogbeat.yml +++ b/.buildkite/winlogbeat/pipeline.winlogbeat.yml @@ -1,5 +1,35 @@ # yaml-language-server: $schema=https://raw.githubusercontent.com/buildkite/pipeline-schema/main/schema.json +env: + IMAGE_UBUNTU_X86_64: "family/core-ubuntu-2204" + IMAGE_WIN_10: "family/general-windows-10" + IMAGE_WIN_11: "family/general-windows-11" + IMAGE_WIN_2016: "family/core-windows-2016" + IMAGE_WIN_2019: "family/core-windows-2019" + IMAGE_WIN_2022: "family/core-windows-2022" + GCP_DEFAULT_MACHINE_TYPE: "c2d-highcpu-8" + GCP_HI_PERF_MACHINE_TYPE: "c2d-highcpu-16" + GCP_WIN_MACHINE_TYPE: "n2-standard-8" + BEATS_PROJECT_NAME: "winlogbeat" steps: - - label: "Example test" - command: echo "Hello!" + + - input: "Input Parameters" + key: "input-run-all-stages" + fields: + - select: "Packetbeat - runWinlogbeat" + key: "runWinlogbeat" + options: + - label: "True" + value: "true" + - label: "False" + value: "false" + default: "false" + if: "build.source == 'ui'" + + - wait: ~ + if: "build.source == 'ui'" + allow_dependency_failure: false + + - label: ":linux: Load dynamic winlogbeat pipeline" + key: "winlogbeat-pipeline" + command: ".buildkite/scripts/generate_winlogbeat_pipeline.sh" diff --git a/.github/workflows/check-audtibeat.yml b/.github/workflows/check-auditbeat.yml similarity index 100% rename from .github/workflows/check-audtibeat.yml rename to .github/workflows/check-auditbeat.yml diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 9e27962b7e55..ae82464b5bd5 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -130,6 +130,7 @@ Setting environmental variable ELASTIC_NETINFO:false in Elastic Agent pod will d *Auditbeat* - Add linux capabilities to processes in the system/process. {pull}37453[37453] +- Add opt-in eBPF backend for file_integrity module. {pull}37223[37223] *Filebeat* diff --git a/NOTICE.txt b/NOTICE.txt index 573e544bb2e8..0bff24902d8d 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -12257,11 +12257,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/elastic/ebpfevents -Version: v0.3.2 +Version: v0.4.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/ebpfevents@v0.3.2/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/ebpfevents@v0.4.0/LICENSE.txt: The https://github.com/elastic/ebpfevents repository contains source code under various licenses: diff --git a/auditbeat/.gitignore b/auditbeat/.gitignore index 3cd551fd5066..7c8dbc055013 100644 --- a/auditbeat/.gitignore +++ b/auditbeat/.gitignore @@ -6,4 +6,3 @@ module/*/_meta/config.yml /auditbeat /auditbeat.test /docs/html_docs - diff --git a/auditbeat/auditbeat.reference.yml b/auditbeat/auditbeat.reference.yml index 883760ab410b..bcb150a1dadd 100644 --- a/auditbeat/auditbeat.reference.yml +++ b/auditbeat/auditbeat.reference.yml @@ -92,6 +92,11 @@ auditbeat.modules: # Auditbeat will ignore files unless they match a pattern. #include_files: #- '/\.ssh($|/)' + # Select the backend which will be used to source events. + # "fsnotify" doesn't have the ability to associate user data to file events. + # Valid values: auto, fsnotify, kprobes, ebpf. + # Default: fsnotify. + backend: fsnotify # Scan over the configured file paths at startup and send events for new or # modified files since the last time Auditbeat was running. diff --git a/auditbeat/docker-compose.yml b/auditbeat/docker-compose.yml index adf338889883..e0479381efff 100644 --- a/auditbeat/docker-compose.yml +++ b/auditbeat/docker-compose.yml @@ -14,11 +14,15 @@ services: - KIBANA_PORT=5601 volumes: - ${PWD}/..:/go/src/github.com/elastic/beats/ + - /sys:/sys command: make privileged: true pid: host cap_add: - AUDIT_CONTROL + - BPF + - PERFMON + - SYS_RESOURCE # This is a proxy used to block beats until all services are healthy. # See: https://github.com/docker/compose/issues/4369 diff --git a/auditbeat/docs/modules/file_integrity.asciidoc b/auditbeat/docs/modules/file_integrity.asciidoc index a12c4df47ca0..cc354b6ff85a 100644 --- a/auditbeat/docs/modules/file_integrity.asciidoc +++ b/auditbeat/docs/modules/file_integrity.asciidoc @@ -28,8 +28,13 @@ to only send events for new or modified files. The operating system features that power this feature are as follows. -* Linux - `inotify` is used, and therefore the kernel must have inotify support. +* Linux - Multiple backends are supported: `auto`, `fsnotify`, `kprobes`, `ebpf`. +By default, `fsnotify` is used, and therefore the kernel must have inotify support. Inotify was initially merged into the 2.6.13 Linux kernel. +The eBPF backend uses modern eBPF features and supports 5.10.16+ kernels. +FSNotify doesn't have the ability to associate user data to file events. +The preferred backend can be selected by specifying the `backend` config option. +Since eBPF and Kprobes are in technical preview, `auto` will default to `fsnotify`. * macOS (Darwin) - Uses the `FSEvents` API, present since macOS 10.5. This API coalesces multiple changes to a file into a single event. {beatname_uc} translates this coalesced changes into a meaningful sequence of actions. However, @@ -144,6 +149,9 @@ of this directories are watched. If `recursive` is set to `true`, the `file_integrity` module will watch for changes on this directories and all their subdirectories. +*`backend`*:: (*Linux only*) Select the backend which will be used to +source events. Valid values: `auto`, `fsnotify`, `kprobes`, `ebpf`. Default: `fsnotify`. + include::{docdir}/auditbeat-options.asciidoc[] diff --git a/auditbeat/module/file_integrity/_meta/config.yml.tmpl b/auditbeat/module/file_integrity/_meta/config.yml.tmpl index 588a6279eee2..b3a3784d15c3 100644 --- a/auditbeat/module/file_integrity/_meta/config.yml.tmpl +++ b/auditbeat/module/file_integrity/_meta/config.yml.tmpl @@ -55,6 +55,14 @@ #- '/\.ssh($|/)' {{- end }} + {{- if eq .GOOS "linux" }} + # Select the backend which will be used to source events. + # "fsnotify" doesn't have the ability to associate user data to file events. + # Valid values: auto, fsnotify, kprobes, ebpf. + # Default: fsnotify. + backend: fsnotify + {{- end }} + # Scan over the configured file paths at startup and send events for new or # modified files since the last time Auditbeat was running. scan_at_start: true diff --git a/auditbeat/module/file_integrity/_meta/docs.asciidoc b/auditbeat/module/file_integrity/_meta/docs.asciidoc index 0f32ef64f930..062e966e69b3 100644 --- a/auditbeat/module/file_integrity/_meta/docs.asciidoc +++ b/auditbeat/module/file_integrity/_meta/docs.asciidoc @@ -21,8 +21,13 @@ to only send events for new or modified files. The operating system features that power this feature are as follows. -* Linux - `inotify` is used, and therefore the kernel must have inotify support. +* Linux - Multiple backends are supported: `auto`, `fsnotify`, `kprobes`, `ebpf`. +By default, `fsnotify` is used, and therefore the kernel must have inotify support. Inotify was initially merged into the 2.6.13 Linux kernel. +The eBPF backend uses modern eBPF features and supports 5.10.16+ kernels. +FSNotify doesn't have the ability to associate user data to file events. +The preferred backend can be selected by specifying the `backend` config option. +Since eBPF and Kprobes are in technical preview, `auto` will default to `fsnotify`. * macOS (Darwin) - Uses the `FSEvents` API, present since macOS 10.5. This API coalesces multiple changes to a file into a single event. {beatname_uc} translates this coalesced changes into a meaningful sequence of actions. However, @@ -137,4 +142,7 @@ of this directories are watched. If `recursive` is set to `true`, the `file_integrity` module will watch for changes on this directories and all their subdirectories. +*`backend`*:: (*Linux only*) Select the backend which will be used to +source events. Valid values: `auto`, `fsnotify`, `kprobes`, `ebpf`. Default: `fsnotify`. + include::{docdir}/auditbeat-options.asciidoc[] diff --git a/auditbeat/module/file_integrity/config.go b/auditbeat/module/file_integrity/config.go index e431e6407667..db934b027893 100644 --- a/auditbeat/module/file_integrity/config.go +++ b/auditbeat/module/file_integrity/config.go @@ -18,10 +18,12 @@ package file_integrity import ( + "errors" "fmt" "math" "path/filepath" "regexp" + "runtime" "sort" "strings" @@ -72,6 +74,25 @@ const ( XXH64 HashType = "xxh64" ) +type Backend string + +const ( + BackendFSNotify Backend = "fsnotify" + BackendKprobes Backend = "kprobes" + BackendEBPF Backend = "ebpf" + BackendAuto Backend = "auto" +) + +func (b *Backend) Unpack(v string) error { + *b = Backend(v) + switch *b { + case BackendFSNotify, BackendKprobes, BackendEBPF, BackendAuto: + return nil + default: + return fmt.Errorf("invalid backend: %q", v) + } +} + // Config contains the configuration parameters for the file integrity // metricset. type Config struct { @@ -86,6 +107,7 @@ type Config struct { Recursive bool `config:"recursive"` // Recursive enables recursive monitoring of directories. ExcludeFiles []match.Matcher `config:"exclude_files"` IncludeFiles []match.Matcher `config:"include_files"` + Backend Backend `config:"backend"` } // Validate validates the config data and return an error explaining all the @@ -160,6 +182,11 @@ nextHash: if err != nil { errs = append(errs, fmt.Errorf("invalid scan_rate_per_sec value: %w", err)) } + + if c.Backend != "" && c.Backend != BackendAuto && runtime.GOOS != "linux" { + errs = append(errs, errors.New("backend can only be specified on linux")) + } + return errs.Err() } diff --git a/auditbeat/module/file_integrity/event.go b/auditbeat/module/file_integrity/event.go index fd4d68828a44..c7dfb7032e80 100644 --- a/auditbeat/module/file_integrity/event.go +++ b/auditbeat/module/file_integrity/event.go @@ -65,11 +65,14 @@ const ( // SourceFSNotify identifies events triggered by a notification from the // file system. SourceFSNotify + // SourceEBPF identifies events triggered by an eBPF program. + SourceEBPF ) var sourceNames = map[Source]string{ SourceScan: "scan", SourceFSNotify: "fsnotify", + SourceEBPF: "ebpf", } // Type identifies the file type (e.g. dir, file, symlink). @@ -91,12 +94,20 @@ const ( FileType DirType SymlinkType + CharDeviceType + BlockDeviceType + FIFOType + SocketType ) var typeNames = map[Type]string{ - FileType: "file", - DirType: "dir", - SymlinkType: "symlink", + FileType: "file", + DirType: "dir", + SymlinkType: "symlink", + CharDeviceType: "char_device", + BlockDeviceType: "block_device", + FIFOType: "fifo", + SocketType: "socket", } // Digest is an output of a hash function. @@ -189,36 +200,42 @@ func NewEventFromFileInfo( switch event.Info.Type { case FileType: - if event.Info.Size <= maxFileSize { - hashes, nbytes, err := hashFile(event.Path, maxFileSize, hashTypes...) - if err != nil { - event.errors = append(event.errors, err) - event.hashFailed = true - } else if hashes != nil { - // hashFile returns nil hashes and no error when: - // - There's no hashes configured. - // - File size at the time of hashing is larger than configured limit. - event.Hashes = hashes - event.Info.Size = nbytes - } - - if len(fileParsers) != 0 && event.ParserResults == nil { - event.ParserResults = make(mapstr.M) - } - for _, p := range fileParsers { - err = p.Parse(event.ParserResults, path) - if err != nil { - event.errors = append(event.errors, err) - } - } - } + fillHashes(&event, path, maxFileSize, hashTypes, fileParsers) case SymlinkType: - event.TargetPath, _ = filepath.EvalSymlinks(event.Path) + event.TargetPath, err = filepath.EvalSymlinks(event.Path) + if err != nil { + event.errors = append(event.errors, err) + } } return event } +func fillHashes(event *Event, path string, maxFileSize uint64, hashTypes []HashType, fileParsers []FileParser) { + if event.Info.Size <= maxFileSize { + hashes, nbytes, err := hashFile(event.Path, maxFileSize, hashTypes...) + if err != nil { + event.errors = append(event.errors, err) + event.hashFailed = true + } else if hashes != nil { + // hashFile returns nil hashes and no error when: + // - There's no hashes configured. + // - File size at the time of hashing is larger than configured limit. + event.Hashes = hashes + event.Info.Size = nbytes + } + + if len(fileParsers) != 0 && event.ParserResults == nil { + event.ParserResults = make(mapstr.M) + } + for _, p := range fileParsers { + if err = p.Parse(event.ParserResults, path); err != nil { + event.errors = append(event.errors, err) + } + } + } +} + // NewEvent creates a new Event. Any errors that occur are included in the // returned Event. func NewEvent( diff --git a/auditbeat/module/file_integrity/event_linux.go b/auditbeat/module/file_integrity/event_linux.go new file mode 100644 index 000000000000..7643d03a6b42 --- /dev/null +++ b/auditbeat/module/file_integrity/event_linux.go @@ -0,0 +1,199 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux + +package file_integrity + +import ( + "os" + "os/user" + "path/filepath" + "strconv" + "time" + + "github.com/elastic/ebpfevents" +) + +// NewEventFromEbpfEvent creates a new Event from an ebpfevents.Event. +func NewEventFromEbpfEvent( + ee ebpfevents.Event, + maxFileSize uint64, + hashTypes []HashType, + fileParsers []FileParser, + isExcludedPath func(string) bool, +) (Event, bool) { + var ( + path, target string + action Action + metadata Metadata + err error + ) + switch ee.Type { + case ebpfevents.EventTypeFileCreate: + action = Created + + fileCreateEvent := ee.Body.(*ebpfevents.FileCreate) + path = fileCreateEvent.Path + if isExcludedPath(path) { + event := Event{Path: path} + return event, false + } + target = fileCreateEvent.SymlinkTargetPath + metadata, err = metadataFromFileCreate(fileCreateEvent) + case ebpfevents.EventTypeFileRename: + action = Moved + + fileRenameEvent := ee.Body.(*ebpfevents.FileRename) + path = fileRenameEvent.NewPath + if isExcludedPath(path) { + event := Event{Path: path} + return event, false + } + target = fileRenameEvent.SymlinkTargetPath + metadata, err = metadataFromFileRename(fileRenameEvent) + case ebpfevents.EventTypeFileDelete: + action = Deleted + + fileDeleteEvent := ee.Body.(*ebpfevents.FileDelete) + path = fileDeleteEvent.Path + if isExcludedPath(path) { + event := Event{Path: path} + return event, false + } + target = fileDeleteEvent.SymlinkTargetPath + case ebpfevents.EventTypeFileModify: + fileModifyEvent := ee.Body.(*ebpfevents.FileModify) + + switch fileModifyEvent.ChangeType { + case ebpfevents.FileChangeTypeContent: + action = Updated + case ebpfevents.FileChangeTypePermissions, ebpfevents.FileChangeTypeOwner, ebpfevents.FileChangeTypeXattrs: + action = AttributesModified + } + + path = fileModifyEvent.Path + if isExcludedPath(path) { + event := Event{Path: path} + return event, false + } + target = fileModifyEvent.SymlinkTargetPath + metadata, err = metadataFromFileModify(fileModifyEvent) + } + + event := Event{ + Timestamp: time.Now().UTC(), + Path: path, + TargetPath: target, + Info: &metadata, + Source: SourceEBPF, + Action: action, + errors: make([]error, 0), + } + if err != nil { + event.errors = append(event.errors, err) + } + + if event.Action == Deleted { + event.Info = nil + } else { + switch event.Info.Type { + case FileType: + fillHashes(&event, path, maxFileSize, hashTypes, fileParsers) + case SymlinkType: + var err error + event.TargetPath, err = filepath.EvalSymlinks(event.Path) + if err != nil { + event.errors = append(event.errors, err) + } + } + } + + return event, true +} + +func metadataFromFileCreate(evt *ebpfevents.FileCreate) (Metadata, error) { + var md Metadata + fillExtendedAttributes(&md, evt.Path) + err := fillFileInfo(&md, evt.Finfo) + return md, err +} + +func metadataFromFileRename(evt *ebpfevents.FileRename) (Metadata, error) { + var md Metadata + fillExtendedAttributes(&md, evt.NewPath) + err := fillFileInfo(&md, evt.Finfo) + return md, err +} + +func metadataFromFileModify(evt *ebpfevents.FileModify) (Metadata, error) { + var md Metadata + fillExtendedAttributes(&md, evt.Path) + err := fillFileInfo(&md, evt.Finfo) + return md, err +} + +func fillFileInfo(md *Metadata, finfo ebpfevents.FileInfo) error { + md.Inode = finfo.Inode + md.UID = finfo.Uid + md.GID = finfo.Gid + md.Size = finfo.Size + md.MTime = finfo.Mtime + md.CTime = finfo.Ctime + md.Type = typeFromEbpfType(finfo.Type) + md.Mode = finfo.Mode + md.SetUID = finfo.Mode&os.ModeSetuid != 0 + md.SetGID = finfo.Mode&os.ModeSetgid != 0 + + u, err := user.LookupId(strconv.FormatUint(uint64(finfo.Uid), 10)) + if err != nil { + md.Owner = "n/a" + md.Group = "n/a" + return err + } + md.Owner = u.Username + + g, err := user.LookupGroupId(strconv.FormatUint(uint64(finfo.Gid), 10)) + if err != nil { + md.Group = "n/a" + return err + } + md.Group = g.Name + + return nil +} + +func typeFromEbpfType(typ ebpfevents.FileType) Type { + switch typ { + case ebpfevents.FileTypeFile: + return FileType + case ebpfevents.FileTypeDir: + return DirType + case ebpfevents.FileTypeSymlink: + return SymlinkType + case ebpfevents.FileTypeCharDevice: + return CharDeviceType + case ebpfevents.FileTypeBlockDevice: + return BlockDeviceType + case ebpfevents.FileTypeNamedPipe: + return FIFOType + case ebpfevents.FileTypeSocket: + return SocketType + default: + return UnknownType + } +} diff --git a/auditbeat/module/file_integrity/event_linux_test.go b/auditbeat/module/file_integrity/event_linux_test.go new file mode 100644 index 000000000000..1a440afb8f17 --- /dev/null +++ b/auditbeat/module/file_integrity/event_linux_test.go @@ -0,0 +1,74 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux + +package file_integrity + +import ( + "os" + "os/user" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/ebpfevents" +) + +func TestNewEventFromEbpfEvent(t *testing.T) { + ebpfEvent := ebpfevents.Event{ + Header: ebpfevents.Header{ + Type: ebpfevents.EventTypeFileCreate, + }, + Body: &ebpfevents.FileCreate{ + Finfo: ebpfevents.FileInfo{ + Type: ebpfevents.FileTypeFile, + Inode: 1234, + Mode: os.FileMode(0o644), + Size: 2345, + Uid: 3456, + Gid: 4567, + }, + Path: "/foo", + SymlinkTargetPath: "/bar", + }, + } + expectedEvent := Event{ + Action: Created, + Path: "/foo", + TargetPath: "/bar", + Info: &Metadata{ + Type: FileType, + Inode: 1234, + UID: 3456, + GID: 4567, + Size: 2345, + Owner: "n/a", + Group: "n/a", + Mode: os.FileMode(0o644), + }, + Source: SourceEBPF, + errors: []error{user.UnknownUserIdError(3456)}, + } + + event, ok := NewEventFromEbpfEvent( + ebpfEvent, 0, []HashType{}, []FileParser{}, func(path string) bool { return false }) + assert.True(t, ok) + event.Timestamp = expectedEvent.Timestamp + + assert.Equal(t, expectedEvent, event) +} diff --git a/auditbeat/module/file_integrity/eventreader_ebpf.go b/auditbeat/module/file_integrity/eventreader_ebpf.go new file mode 100644 index 000000000000..2fb452861e84 --- /dev/null +++ b/auditbeat/module/file_integrity/eventreader_ebpf.go @@ -0,0 +1,128 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux + +package file_integrity + +import ( + "path/filepath" + "strings" + "time" + + "github.com/elastic/beats/v7/libbeat/ebpf" + "github.com/elastic/ebpfevents" + "github.com/elastic/elastic-agent-libs/logp" +) + +const clientName = "fim" + +type ebpfReader struct { + watcher *ebpf.Watcher + done <-chan struct{} + config Config + log *logp.Logger + eventC chan Event + parsers []FileParser + paths map[string]struct{} + + _records <-chan ebpfevents.Record +} + +func (r *ebpfReader) Start(done <-chan struct{}) (<-chan Event, error) { + watcher, err := ebpf.GetWatcher() + if err != nil { + return nil, err + } + r.watcher = watcher + r.done = done + + mask := ebpf.EventMask(ebpfevents.EventTypeFileCreate | ebpfevents.EventTypeFileRename | ebpfevents.EventTypeFileDelete | ebpfevents.EventTypeFileModify) + r._records = r.watcher.Subscribe(clientName, mask) + + go r.consumeEvents() + + r.log.Infow("started ebpf watcher", "file_path", r.config.Paths, "recursive", r.config.Recursive) + return r.eventC, nil +} + +func (r *ebpfReader) consumeEvents() { + defer close(r.eventC) + defer r.watcher.Unsubscribe(clientName) + + for { + select { + case rec := <-r._records: + if rec.Error != nil { + r.log.Errorf("ebpf watcher error: %v", rec.Error) + continue + } + + switch rec.Event.Type { + case ebpfevents.EventTypeFileCreate, ebpfevents.EventTypeFileRename, ebpfevents.EventTypeFileDelete, ebpfevents.EventTypeFileModify: + default: + r.log.Warnf("received unwanted ebpf event: %s", rec.Event.Type.String()) + continue + } + + start := time.Now() + e, ok := NewEventFromEbpfEvent( + *rec.Event, + r.config.MaxFileSizeBytes, + r.config.HashTypes, + r.parsers, + r.excludedPath, + ) + if !ok { + continue + } + e.rtt = time.Since(start) + + r.log.Debugw("received ebpf event", "file_path", e.Path) + r.eventC <- e + case <-r.done: + r.log.Debug("ebpf watcher terminated") + return + } + } +} + +func (r *ebpfReader) excludedPath(path string) bool { + dir, err := filepath.Abs(filepath.Dir(path)) + if err != nil { + r.log.Errorf("ebpf watcher error: resolve abs path %q: %v", path, err) + return true + } + + if r.config.IsExcludedPath(dir) { + return true + } + + if !r.config.Recursive { + if _, ok := r.paths[dir]; ok { + return false + } + } else { + for p := range r.paths { + if strings.HasPrefix(dir, p) { + return false + } + } + } + + return true +} diff --git a/auditbeat/module/file_integrity/eventreader_fsevents.go b/auditbeat/module/file_integrity/eventreader_fsevents.go index 8a5844b3eea1..035b2bf90b41 100644 --- a/auditbeat/module/file_integrity/eventreader_fsevents.go +++ b/auditbeat/module/file_integrity/eventreader_fsevents.go @@ -31,7 +31,7 @@ import ( "github.com/elastic/elastic-agent-libs/logp" ) -type fsreader struct { +type fsEventsReader struct { stream *fsevents.EventStream config Config eventC chan Event @@ -89,7 +89,7 @@ var flagNames = map[fsevents.EventFlags]string{ } // NewEventReader creates a new EventProducer backed by FSEvents macOS facility. -func NewEventReader(c Config) (EventProducer, error) { +func NewEventReader(c Config, logger *logp.Logger) (EventProducer, error) { stream := &fsevents.EventStream{ Paths: c.Paths, // NoDefer: Ignore Latency field and send events as fast as possible. @@ -108,28 +108,27 @@ func NewEventReader(c Config) (EventProducer, error) { stream.Flags |= fsevents.IgnoreSelf } - log := logp.NewLogger(moduleName) var dirs []os.FileInfo if !c.Recursive { for _, path := range c.Paths { if info, err := getFileInfo(path); err == nil { dirs = append(dirs, info) } else { - log.Warnw("Failed to get file info", "file_path", path, "error", err) + logger.Warnw("Failed to get file info", "file_path", path, "error", err) } } } - return &fsreader{ + return &fsEventsReader{ stream: stream, config: c, eventC: make(chan Event, 1), watchedDirs: dirs, - log: log, + log: logger, parsers: FileParsers(c), }, nil } -func (r *fsreader) Start(done <-chan struct{}) (<-chan Event, error) { +func (r *fsEventsReader) Start(done <-chan struct{}) (<-chan Event, error) { r.stream.Start() go r.consumeEvents(done) r.log.Infow("Started FSEvents watcher", @@ -138,7 +137,7 @@ func (r *fsreader) Start(done <-chan struct{}) (<-chan Event, error) { return r.eventC, nil } -func (r *fsreader) consumeEvents(done <-chan struct{}) { +func (r *fsEventsReader) consumeEvents(done <-chan struct{}) { defer close(r.eventC) defer r.stream.Stop() @@ -209,7 +208,7 @@ func getFileInfo(path string) (os.FileInfo, error) { return info, fmt.Errorf("failed to stat: %w", err) } -func (r *fsreader) isWatched(path string) bool { +func (r *fsEventsReader) isWatched(path string) bool { if r.config.Recursive { return true } diff --git a/auditbeat/module/file_integrity/eventreader_fsnotify.go b/auditbeat/module/file_integrity/eventreader_fsnotify.go index b49bb7b7905e..0420d0f8f814 100644 --- a/auditbeat/module/file_integrity/eventreader_fsnotify.go +++ b/auditbeat/module/file_integrity/eventreader_fsnotify.go @@ -32,7 +32,7 @@ import ( "github.com/elastic/elastic-agent-libs/logp" ) -type reader struct { +type fsNotifyReader struct { watcher monitor.Watcher config Config eventC chan Event @@ -41,16 +41,7 @@ type reader struct { parsers []FileParser } -// NewEventReader creates a new EventProducer backed by fsnotify. -func NewEventReader(c Config) (EventProducer, error) { - return &reader{ - config: c, - log: logp.NewLogger(moduleName), - parsers: FileParsers(c), - }, nil -} - -func (r *reader) Start(done <-chan struct{}) (<-chan Event, error) { +func (r *fsNotifyReader) Start(done <-chan struct{}) (<-chan Event, error) { watcher, err := monitor.New(r.config.Recursive, r.config.IsExcludedPath) if err != nil { return nil, err @@ -105,17 +96,18 @@ func (r *reader) Start(done <-chan struct{}) (<-chan Event, error) { return r.eventC, nil } -func (r *reader) enqueueEvents(done <-chan struct{}) (events []*Event) { +func (r *fsNotifyReader) enqueueEvents(done <-chan struct{}) []*Event { + events := make([]*Event, 0) for { ev := r.nextEvent(done) if ev == nil { - return + return events } events = append(events, ev) } } -func (r *reader) consumeEvents(done <-chan struct{}) { +func (r *fsNotifyReader) consumeEvents(done <-chan struct{}) { defer close(r.eventC) defer r.watcher.Close() @@ -129,7 +121,7 @@ func (r *reader) consumeEvents(done <-chan struct{}) { } } -func (r *reader) nextEvent(done <-chan struct{}) *Event { +func (r *fsNotifyReader) nextEvent(done <-chan struct{}) *Event { for { select { case <-done: diff --git a/auditbeat/module/file_integrity/eventreader_linux.go b/auditbeat/module/file_integrity/eventreader_linux.go new file mode 100644 index 000000000000..9365ff551b3e --- /dev/null +++ b/auditbeat/module/file_integrity/eventreader_linux.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux + +package file_integrity + +import ( + "errors" + + "github.com/elastic/elastic-agent-libs/logp" +) + +func NewEventReader(c Config, logger *logp.Logger) (EventProducer, error) { + if c.Backend == BackendAuto || c.Backend == BackendFSNotify || c.Backend == "" { + // Auto and unset defaults to fsnotify + l := logger.Named("fsnotify") + l.Info("selected backend: fsnotify") + return &fsNotifyReader{ + config: c, + log: l, + parsers: FileParsers(c), + }, nil + } + + if c.Backend == BackendEBPF { + l := logger.Named("ebpf") + l.Info("selected backend: ebpf") + + paths := make(map[string]struct{}) + for _, p := range c.Paths { + paths[p] = struct{}{} + } + + return &ebpfReader{ + config: c, + log: l, + parsers: FileParsers(c), + paths: paths, + eventC: make(chan Event), + }, nil + } + + // unimplemented + return nil, errors.ErrUnsupported +} diff --git a/auditbeat/module/file_integrity/eventreader_other.go b/auditbeat/module/file_integrity/eventreader_other.go new file mode 100644 index 000000000000..e9027a8b47d1 --- /dev/null +++ b/auditbeat/module/file_integrity/eventreader_other.go @@ -0,0 +1,32 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build freebsd || openbsd || netbsd || windows + +package file_integrity + +import ( + "github.com/elastic/elastic-agent-libs/logp" +) + +func NewEventReader(c Config, logger *logp.Logger) (EventProducer, error) { + return &fsNotifyReader{ + config: c, + log: logger.Named("fsnotify"), + parsers: FileParsers(c), + }, nil +} diff --git a/auditbeat/module/file_integrity/eventreader_test.go b/auditbeat/module/file_integrity/eventreader_test.go index 5ed273b76b45..d34f59f08c2b 100644 --- a/auditbeat/module/file_integrity/eventreader_test.go +++ b/auditbeat/module/file_integrity/eventreader_test.go @@ -31,6 +31,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/elastic/elastic-agent-libs/logp" ) func init() { @@ -49,7 +51,7 @@ func TestEventReader(t *testing.T) { // Create a new EventProducer. config := defaultConfig config.Paths = []string{dir} - r, err := NewEventReader(config) + r, err := NewEventReader(config, logp.NewLogger("")) if err != nil { t.Fatal(err) } @@ -251,7 +253,7 @@ func TestRaces(t *testing.T) { config := defaultConfig config.Paths = dirs config.Recursive = true - r, err := NewEventReader(config) + r, err := NewEventReader(config, logp.NewLogger("")) if err != nil { t.Fatal(err) } diff --git a/auditbeat/module/file_integrity/eventreader_unsupported.go b/auditbeat/module/file_integrity/eventreader_unsupported.go index 79ab1f4245e0..d039cc1bfe06 100644 --- a/auditbeat/module/file_integrity/eventreader_unsupported.go +++ b/auditbeat/module/file_integrity/eventreader_unsupported.go @@ -19,8 +19,12 @@ package file_integrity -import "errors" +import ( + "errors" -func NewEventReader(c Config) (EventProducer, error) { + "github.com/elastic/elastic-agent-libs/logp" +) + +func NewEventReader(c Config, logger *logp.Logger) (EventProducer, error) { return errors.New("file auditing metricset is not implemented on this system") } diff --git a/auditbeat/module/file_integrity/fileinfo_posix.go b/auditbeat/module/file_integrity/fileinfo_posix.go index f70a638bc65a..d87c8fc4e20e 100644 --- a/auditbeat/module/file_integrity/fileinfo_posix.go +++ b/auditbeat/module/file_integrity/fileinfo_posix.go @@ -69,18 +69,7 @@ func NewMetadata(path string, info os.FileInfo) (*Metadata, error) { fileInfo.Owner = owner.Username } - var selinux []byte - getExtendedAttributes(path, map[string]*[]byte{ - "security.selinux": &selinux, - "system.posix_acl_access": &fileInfo.POSIXACLAccess, - }) - // The selinux attr may be null terminated. It would be cheaper - // to use strings.TrimRight, but absent documentation saying - // that there is only ever a final null terminator, take the - // guaranteed correct path of terminating at the first found - // null byte. - selinux, _, _ = bytes.Cut(selinux, []byte{0}) - fileInfo.SELinux = string(selinux) + fillExtendedAttributes(fileInfo, path) group, err := user.LookupGroupId(strconv.Itoa(int(fileInfo.GID))) if err != nil { @@ -91,9 +80,25 @@ func NewMetadata(path string, info os.FileInfo) (*Metadata, error) { if fileInfo.Origin, err = GetFileOrigin(path); err != nil { errs = append(errs, err) } + return fileInfo, errs.Err() } +func fillExtendedAttributes(md *Metadata, path string) { + var selinux []byte + getExtendedAttributes(path, map[string]*[]byte{ + "security.selinux": &selinux, + "system.posix_acl_access": &md.POSIXACLAccess, + }) + // The selinux attr may be null terminated. It would be cheaper + // to use strings.TrimRight, but absent documentation saying + // that there is only ever a final null terminator, take the + // guaranteed correct path of terminating at the first found + // null byte. + selinux, _, _ = bytes.Cut(selinux, []byte{0}) + md.SELinux = string(selinux) +} + func getExtendedAttributes(path string, dst map[string]*[]byte) { f, err := os.Open(path) if err != nil { diff --git a/auditbeat/module/file_integrity/flatbuffers.go b/auditbeat/module/file_integrity/flatbuffers.go index 837d39cf2262..f380e42252c3 100644 --- a/auditbeat/module/file_integrity/flatbuffers.go +++ b/auditbeat/module/file_integrity/flatbuffers.go @@ -164,6 +164,14 @@ func fbWriteMetadata(b *flatbuffers.Builder, m *Metadata) flatbuffers.UOffsetT { schema.MetadataAddType(b, schema.TypeDir) case SymlinkType: schema.MetadataAddType(b, schema.TypeSymlink) + case CharDeviceType: + schema.MetadataAddType(b, schema.TypeCharDevice) + case BlockDeviceType: + schema.MetadataAddType(b, schema.TypeBlockDevice) + case FIFOType: + schema.MetadataAddType(b, schema.TypeFIFO) + case SocketType: + schema.MetadataAddType(b, schema.TypeSocket) } if selinuxOffset > 0 { schema.MetadataAddSelinux(b, selinuxOffset) @@ -191,10 +199,12 @@ func fbWriteEvent(b *flatbuffers.Builder, e *Event) flatbuffers.UOffsetT { schema.EventAddTimestampNs(b, e.Timestamp.UnixNano()) switch e.Source { - case SourceFSNotify: - schema.EventAddSource(b, schema.SourceFSNotify) case SourceScan: schema.EventAddSource(b, schema.SourceScan) + case SourceFSNotify: + schema.EventAddSource(b, schema.SourceFSNotify) + case SourceEBPF: + schema.EventAddSource(b, schema.SourceEBPF) } if targetPathOffset > 0 { @@ -235,6 +245,8 @@ func fbDecodeEvent(path string, buf []byte) *Event { rtn.Source = SourceScan case schema.SourceFSNotify: rtn.Source = SourceFSNotify + case schema.SourceEBPF: + rtn.Source = SourceEBPF } action := e.Action() @@ -285,6 +297,14 @@ func fbDecodeMetadata(e *schema.Event) *Metadata { rtn.Type = DirType case schema.TypeSymlink: rtn.Type = SymlinkType + case schema.TypeCharDevice: + rtn.Type = CharDeviceType + case schema.TypeBlockDevice: + rtn.Type = BlockDeviceType + case schema.TypeFIFO: + rtn.Type = FIFOType + case schema.TypeSocket: + rtn.Type = SocketType default: rtn.Type = UnknownType } diff --git a/auditbeat/module/file_integrity/metricset.go b/auditbeat/module/file_integrity/metricset.go index 2c9c38d2d564..1183d3c86e2b 100644 --- a/auditbeat/module/file_integrity/metricset.go +++ b/auditbeat/module/file_integrity/metricset.go @@ -71,10 +71,10 @@ type MetricSet struct { log *logp.Logger // Runtime params that are initialized on Run(). - bucket datastore.BoltBucket - scanStart time.Time - scanChan <-chan Event - fsnotifyChan <-chan Event + bucket datastore.BoltBucket + scanStart time.Time + scanChan <-chan Event + eventChan <-chan Event // Used when a hash can't be calculated nullHashes map[HashType]Digest @@ -87,7 +87,9 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, err } - r, err := NewEventReader(config) + logger := logp.NewLogger(moduleName) + + r, err := NewEventReader(config, logger) if err != nil { return nil, fmt.Errorf("failed to initialize file event reader: %w", err) } @@ -96,7 +98,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { BaseMetricSet: base, config: config, reader: r, - log: logp.NewLogger(moduleName), + log: logger, } ms.nullHashes = make(map[HashType]Digest, len(config.HashTypes)) @@ -118,11 +120,11 @@ func (ms *MetricSet) Run(reporter mb.PushReporterV2) { return } - for ms.fsnotifyChan != nil || ms.scanChan != nil { + for ms.eventChan != nil || ms.scanChan != nil { select { - case event, ok := <-ms.fsnotifyChan: + case event, ok := <-ms.eventChan: if !ok { - ms.fsnotifyChan = nil + ms.eventChan = nil continue } @@ -161,9 +163,9 @@ func (ms *MetricSet) init(reporter mb.PushReporterV2) bool { } ms.bucket = bucket.(datastore.BoltBucket) - ms.fsnotifyChan, err = ms.reader.Start(reporter.Done()) + ms.eventChan, err = ms.reader.Start(reporter.Done()) if err != nil { - err = fmt.Errorf("failed to start fsnotify event producer: %w", err) + err = fmt.Errorf("failed to start event producer: %w", err) reporter.Error(err) ms.log.Errorw("Failed to initialize", "error", err) return false diff --git a/auditbeat/module/file_integrity/monitor/monitor.go b/auditbeat/module/file_integrity/monitor/monitor.go index 107a690d9754..ae80d1a17dc7 100644 --- a/auditbeat/module/file_integrity/monitor/monitor.go +++ b/auditbeat/module/file_integrity/monitor/monitor.go @@ -37,7 +37,7 @@ type Watcher interface { // New creates a new Watcher backed by fsnotify with optional recursive // logic. -func New(recursive bool, IsExcludedPath func(path string) bool) (Watcher, error) { +func New(recursive bool, isExcludedPath func(path string) bool) (Watcher, error) { watcher, err := fsnotify.NewWatcher() if err != nil { return nil, err @@ -45,7 +45,7 @@ func New(recursive bool, IsExcludedPath func(path string) bool) (Watcher, error) // Use our simulated recursive watches unless the fsnotify implementation // supports OS-provided recursive watches if recursive && watcher.SetRecursive() != nil { - return newRecursiveWatcher(watcher, IsExcludedPath), nil //nolint:nilerr // Ignore SetRecursive() errors. + return newRecursiveWatcher(watcher, isExcludedPath), nil //nolint:nilerr // Ignore SetRecursive() errors. } return (*nonRecursiveWatcher)(watcher), nil } diff --git a/auditbeat/module/file_integrity/monitor/recursive.go b/auditbeat/module/file_integrity/monitor/recursive.go index 80ab3e742ef3..7a0768d6fcbd 100644 --- a/auditbeat/module/file_integrity/monitor/recursive.go +++ b/auditbeat/module/file_integrity/monitor/recursive.go @@ -40,7 +40,7 @@ type recursiveWatcher struct { isExcludedPath func(path string) bool } -func newRecursiveWatcher(inner *fsnotify.Watcher, IsExcludedPath func(path string) bool) *recursiveWatcher { +func newRecursiveWatcher(inner *fsnotify.Watcher, isExcludedPath func(path string) bool) *recursiveWatcher { return &recursiveWatcher{ inner: inner, tree: FileTree{}, @@ -48,7 +48,7 @@ func newRecursiveWatcher(inner *fsnotify.Watcher, IsExcludedPath func(path strin addC: make(chan string), addErrC: make(chan error), log: logp.NewLogger(moduleName), - isExcludedPath: IsExcludedPath, + isExcludedPath: isExcludedPath, } } diff --git a/auditbeat/module/file_integrity/schema.fbs b/auditbeat/module/file_integrity/schema.fbs index 9e0863f6379a..583497a7522b 100644 --- a/auditbeat/module/file_integrity/schema.fbs +++ b/auditbeat/module/file_integrity/schema.fbs @@ -12,6 +12,7 @@ enum Action : ubyte (bit_flags) { enum Source : ubyte { Scan, FSNotify, + eBPF, } enum Type : ubyte { @@ -19,6 +20,10 @@ enum Type : ubyte { File, Dir, Symlink, + CharDevice, + BlockDevice, + FIFO, + Socket, } table Metadata { diff --git a/auditbeat/module/file_integrity/schema/Source.go b/auditbeat/module/file_integrity/schema/Source.go index 94730ce29572..17f0b83e6ebc 100644 --- a/auditbeat/module/file_integrity/schema/Source.go +++ b/auditbeat/module/file_integrity/schema/Source.go @@ -26,16 +26,19 @@ type Source byte const ( SourceScan Source = 0 SourceFSNotify Source = 1 + SourceEBPF Source = 2 ) var EnumNamesSource = map[Source]string{ SourceScan: "Scan", SourceFSNotify: "FSNotify", + SourceEBPF: "eBPF", } var EnumValuesSource = map[string]Source{ "Scan": SourceScan, "FSNotify": SourceFSNotify, + "eBPF": SourceEBPF, } func (v Source) String() string { diff --git a/auditbeat/module/file_integrity/schema/Type.go b/auditbeat/module/file_integrity/schema/Type.go index 2025ee3b0968..a0dc4e7a416e 100644 --- a/auditbeat/module/file_integrity/schema/Type.go +++ b/auditbeat/module/file_integrity/schema/Type.go @@ -24,24 +24,36 @@ import "strconv" type Type byte const ( - TypeUnknown Type = 0 - TypeFile Type = 1 - TypeDir Type = 2 - TypeSymlink Type = 3 + TypeUnknown Type = 0 + TypeFile Type = 1 + TypeDir Type = 2 + TypeSymlink Type = 3 + TypeCharDevice Type = 4 + TypeBlockDevice Type = 5 + TypeFIFO Type = 6 + TypeSocket Type = 7 ) var EnumNamesType = map[Type]string{ - TypeUnknown: "Unknown", - TypeFile: "File", - TypeDir: "Dir", - TypeSymlink: "Symlink", + TypeUnknown: "Unknown", + TypeFile: "File", + TypeDir: "Dir", + TypeSymlink: "Symlink", + TypeCharDevice: "CharDevice", + TypeBlockDevice: "BlockDevice", + TypeFIFO: "FIFO", + TypeSocket: "Socket", } var EnumValuesType = map[string]Type{ - "Unknown": TypeUnknown, - "File": TypeFile, - "Dir": TypeDir, - "Symlink": TypeSymlink, + "Unknown": TypeUnknown, + "File": TypeFile, + "Dir": TypeDir, + "Symlink": TypeSymlink, + "CharDevice": TypeCharDevice, + "BlockDevice": TypeBlockDevice, + "FIFO": TypeFIFO, + "Socket": TypeSocket, } func (v Type) String() string { diff --git a/auditbeat/tests/system/test_file_integrity.py b/auditbeat/tests/system/test_file_integrity.py index 280d2916a550..7d05d144217d 100644 --- a/auditbeat/tests/system/test_file_integrity.py +++ b/auditbeat/tests/system/test_file_integrity.py @@ -1,9 +1,16 @@ +import os import time import unittest import platform from auditbeat import * +def is_root(): + if 'geteuid' not in dir(os): + return False + return os.geteuid() == 0 + + # Escapes a path to match what's printed in the logs def escape_path(path): return path.replace('\\', '\\\\') @@ -49,7 +56,6 @@ def wrap_except(expr): class Test(BaseTest): - def wait_output(self, min_events): self.wait_until(lambda: wrap_except(lambda: len(self.read_output()) >= min_events)) # wait for the number of lines in the file to stay constant for a second @@ -62,9 +68,17 @@ def wait_output(self, min_events): else: break - @unittest.skipIf(os.getenv("CI") is not None and platform.system() == 'Darwin', - 'Flaky test: https://github.com/elastic/beats/issues/24678') - def test_non_recursive(self): + def wait_startup(self, backend, dir): + if backend == "ebpf": + self.wait_log_contains("started ebpf watcher", max_timeout=30, ignore_case=True) + else: + # wait until the directories to watch are printed in the logs + # this happens when the file_integrity module starts. + # Case must be ignored under windows as capitalisation of paths + # may differ + self.wait_log_contains(escape_path(dir), max_timeout=30, ignore_case=True) + + def _test_non_recursive(self, backend): """ file_integrity monitors watched directories (non recursive). """ @@ -73,22 +87,21 @@ def test_non_recursive(self): self.temp_dir("auditbeat_test")] with PathCleanup(dirs): + extras = { + "paths": dirs, + "scan_at_start": False + } + if platform.system() == "Linux": + extras["backend"] = backend + self.render_config_template( modules=[{ "name": "file_integrity", - "extras": { - "paths": dirs, - "scan_at_start": False - } + "extras": extras }], ) proc = self.start_beat() - - # wait until the directories to watch are printed in the logs - # this happens when the file_integrity module starts. - # Case must be ignored under windows as capitalisation of paths - # may differ - self.wait_log_contains(escape_path(dirs[0]), max_timeout=30, ignore_case=True) + self.wait_startup(backend, dirs[0]) file1 = os.path.join(dirs[0], 'file.txt') self.create_file(file1, "hello world!") @@ -109,10 +122,12 @@ def test_non_recursive(self): # log entries are JSON formatted, this value shows up as an escaped json string. self.wait_log_contains("\\\"deleted\\\"") - self.wait_log_contains("\"path\":\"{0}\"".format(escape_path(subdir)), ignore_case=True) - self.wait_output(3) - self.wait_until(lambda: any( - 'file.path' in obj and obj['file.path'].lower() == subdir.lower() for obj in self.read_output())) + + if backend == "fsnotify": + self.wait_output(4) + else: + # ebpf backend doesn't catch directory creation + self.wait_output(3) proc.check_kill_and_wait() self.assert_no_logged_warnings() @@ -126,7 +141,8 @@ def test_non_recursive(self): has_file(objs, file1, "430ce34d020724ed75a196dfc2ad67c77772d169") has_file(objs, file2, "d23be250530a24be33069572db67995f21244c51") - has_dir(objs, subdir) + if backend == "fsnotify": + has_dir(objs, subdir) file_events(objs, file1, ['created', 'deleted']) file_events(objs, file2, ['created']) @@ -134,8 +150,16 @@ def test_non_recursive(self): # assert file inside subdir is not reported assert self.log_contains(file3) is False - @unittest.skipIf(os.getenv("BUILD_ID") is not None, "Skipped as flaky: https://github.com/elastic/beats/issues/7731") - def test_recursive(self): + @unittest.skipIf(os.getenv("CI") is not None and platform.system() == 'Darwin', + 'Flaky test: https://github.com/elastic/beats/issues/24678') + def test_non_recursive__fsnotify(self): + self._test_non_recursive("fsnotify") + + @unittest.skipUnless(is_root(), "Requires root") + def test_non_recursive__ebpf(self): + self._test_non_recursive("ebpf") + + def _test_recursive(self, backend): """ file_integrity monitors watched directories (recursive). """ @@ -143,22 +167,22 @@ def test_recursive(self): dirs = [self.temp_dir("auditbeat_test")] with PathCleanup(dirs): + extras = { + "paths": dirs, + "scan_at_start": False, + "recursive": True + } + if platform.system() == "Linux": + extras["backend"] = backend + self.render_config_template( modules=[{ "name": "file_integrity", - "extras": { - "paths": dirs, - "scan_at_start": False, - "recursive": True - } + "extras": extras }], ) proc = self.start_beat() - - # wait until the directories to watch are printed in the logs - # this happens when the file_integrity module starts - self.wait_log_contains(escape_path(dirs[0]), max_timeout=30, ignore_case=True) - self.wait_log_contains("\"recursive\":true") + self.wait_startup(backend, dirs[0]) # auditbeat_test/subdir/ subdir = os.path.join(dirs[0], "subdir") @@ -174,10 +198,13 @@ def test_recursive(self): file2 = os.path.join(subdir2, "more.txt") self.create_file(file2, "") - self.wait_log_contains("\"path\":\"{0}\"".format(escape_path(file2)), ignore_case=True) - self.wait_output(4) - self.wait_until(lambda: any( - 'file.path' in obj and obj['file.path'].lower() == subdir2.lower() for obj in self.read_output())) + if backend == "fsnotify": + self.wait_output(4) + self.wait_until(lambda: any( + 'file.path' in obj and obj['file.path'].lower() == subdir2.lower() for obj in self.read_output())) + else: + # ebpf backend doesn't catch directory creation + self.wait_output(2) proc.check_kill_and_wait() self.assert_no_logged_warnings() @@ -191,8 +218,82 @@ def test_recursive(self): has_file(objs, file1, "430ce34d020724ed75a196dfc2ad67c77772d169") has_file(objs, file2, "da39a3ee5e6b4b0d3255bfef95601890afd80709") - has_dir(objs, subdir) - has_dir(objs, subdir2) + if backend == "fsnotify": + has_dir(objs, subdir) + has_dir(objs, subdir2) file_events(objs, file1, ['created']) file_events(objs, file2, ['created']) + + def test_recursive__fsnotify(self): + self._test_recursive("fsnotify") + + @unittest.skipUnless(is_root(), "Requires root") + def test_recursive__ebpf(self): + self._test_recursive("ebpf") + + @unittest.skipIf(platform.system() != 'Linux', 'Non linux, skipping.') + def _test_file_modified(self, backend): + """ + file_integrity tests for file modifications (chmod, chown, write, truncate, xattrs). + """ + + dirs = [self.temp_dir("auditbeat_test")] + + with PathCleanup(dirs): + self.render_config_template( + modules=[{ + "name": "file_integrity", + "extras": { + "paths": dirs, + "scan_at_start": False, + "recursive": False, + "backend": backend + } + }], + ) + proc = self.start_beat() + self.wait_startup(backend, dirs[0]) + + # Event 1: file create + f = os.path.join(dirs[0], f'file_{backend}.txt') + self.create_file(f, "hello world!") + + # FSNotify can't catch the events if operations happens too fast + time.sleep(1) + + # Event 2: chmod + os.chmod(f, 0o777) + # FSNotify can't catch the events if operations happens too fast + time.sleep(1) + + with open(f, "w") as fd: + # Event 3: write + fd.write("data") + # FSNotify can't catch the events if operations happens too fast + time.sleep(1) + + # Event 4: truncate + fd.truncate(0) + # FSNotify can't catch the events if operations happens too fast + time.sleep(1) + + # Wait N events + self.wait_output(4) + + proc.check_kill_and_wait() + self.assert_no_logged_warnings() + + # Ensure all Beater stages are used. + assert self.log_contains("Setup Beat: auditbeat") + assert self.log_contains("auditbeat start running") + assert self.log_contains("auditbeat stopped") + + @unittest.skipIf(platform.system() != 'Linux', 'Non linux, skipping.') + def test_file_modified__fsnotify(self): + self._test_file_modified("fsnotify") + + @unittest.skipIf(platform.system() != 'Linux', 'Non linux, skipping.') + @unittest.skipUnless(is_root(), "Requires root") + def test_file_modified__ebpf(self): + self._test_file_modified("ebpf") diff --git a/catalog-info.yaml b/catalog-info.yaml index fb0395d20277..00b22b0d7f13 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -219,9 +219,9 @@ spec: name: heartbeat description: "Heartbeat pipeline" spec: - # branch_configuration: "main 7.* 8.* v7.* v8.*" TODO: temporarily commented to build PRs from forks + branch_configuration: "main 7.17 8.*" pipeline_file: ".buildkite/heartbeat/heartbeat-pipeline.yml" - # maximum_timeout_in_minutes: 120 TODO: uncomment when pipeline is ready + maximum_timeout_in_minutes: 120 provider_settings: build_pull_request_forks: false build_pull_requests: true # requires filter_enabled and filter_condition settings as below when used with buildkite-pr-bot @@ -234,8 +234,8 @@ spec: cancel_intermediate_builds_branch_filter: "!main !7.* !8.*" skip_intermediate_builds: true skip_intermediate_builds_branch_filter: "!main !7.* !8.*" - # env: - # ELASTIC_PR_COMMENTS_ENABLED: "true" TODO: uncomment when pipeline is ready + env: + ELASTIC_PR_COMMENTS_ENABLED: "true" teams: ingest-fp: access_level: MANAGE_BUILD_AND_READ @@ -309,7 +309,7 @@ spec: name: beats-libbeat description: "Beats libbeat pipeline" spec: - branch_configuration: "main 7.17 8.* + branch_configuration: "main 7.17 8.*" pipeline_file: ".buildkite/libbeat/pipeline.libbeat.yml" maximum_timeout_in_minutes: 120 provider_settings: @@ -321,7 +321,7 @@ spec: build.pull_request.id == null || (build.creator.name == 'elasticmachine' && build.pull_request.id != null) repository: elastic/beats cancel_intermediate_builds: true - cancel_intermediate_builds_branch_filter: "!main !7.* !8.*" + cancel_intermediate_builds_branch_filter: "!main !7.17 !8.*" skip_intermediate_builds: true skip_intermediate_builds_branch_filter: "!main !7.17 !8.*" env: @@ -354,9 +354,9 @@ spec: name: beats-packetbeat description: "Beats packetbeat pipeline" spec: -# branch_configuration: "main 7.17 8.* v7.17 v8.*" TODO: temporarily commented to build PRs from forks + branch_configuration: "main 7.17 8.*" pipeline_file: ".buildkite/packetbeat/pipeline.packetbeat.yml" -# maximum_timeout_in_minutes: 120 TODO: uncomment when pipeline is ready + maximum_timeout_in_minutes: 120 provider_settings: build_pull_request_forks: false build_pull_requests: true # requires filter_enabled and filter_condition settings as below when used with buildkite-pr-bot @@ -369,8 +369,8 @@ spec: cancel_intermediate_builds_branch_filter: "!main !7.17 !8.*" skip_intermediate_builds: true skip_intermediate_builds_branch_filter: "!main !7.17 !8.*" - # env: - # ELASTIC_PR_COMMENTS_ENABLED: "true" TODO: uncomment when pipeline is ready + env: + ELASTIC_PR_COMMENTS_ENABLED: "true" teams: ingest-fp: access_level: MANAGE_BUILD_AND_READ @@ -443,9 +443,9 @@ spec: name: beats-winlogbeat description: "Beats winlogbeat pipeline" spec: -# branch_configuration: "main 7.17 8.*" TODO: temporarily commented to build PRs from forks + branch_configuration: "main 7.17 8.*" pipeline_file: ".buildkite/winlogbeat/pipeline.winlogbeat.yml" -# maximum_timeout_in_minutes: 120 TODO: uncomment when pipeline is ready + maximum_timeout_in_minutes: 120 provider_settings: build_pull_request_forks: false build_pull_requests: true # requires filter_enabled and filter_condition settings as below when used with buildkite-pr-bot @@ -458,8 +458,8 @@ spec: cancel_intermediate_builds_branch_filter: "!main !7.17 !8.*" skip_intermediate_builds: true skip_intermediate_builds_branch_filter: "!main !7.17 !8.*" - # env: - # ELASTIC_PR_COMMENTS_ENABLED: "true" TODO: uncomment when pipeline is ready + env: + ELASTIC_PR_COMMENTS_ENABLED: "true" teams: ingest-fp: access_level: MANAGE_BUILD_AND_READ diff --git a/go.mod b/go.mod index ee391fb43d20..362cc23dca1e 100644 --- a/go.mod +++ b/go.mod @@ -200,7 +200,7 @@ require ( github.com/aws/smithy-go v1.13.5 github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20220623125934-28468a6701b5 github.com/elastic/bayeux v1.0.5 - github.com/elastic/ebpfevents v0.3.2 + github.com/elastic/ebpfevents v0.4.0 github.com/elastic/elastic-agent-autodiscover v0.6.7 github.com/elastic/elastic-agent-libs v0.7.5 github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3 diff --git a/go.sum b/go.sum index 52051e7f4cf6..9457811b68d6 100644 --- a/go.sum +++ b/go.sum @@ -659,8 +659,8 @@ github.com/elastic/bayeux v1.0.5 h1:UceFq01ipmT3S8DzFK+uVAkbCdiPR0Bqei8qIGmUeY0= github.com/elastic/bayeux v1.0.5/go.mod h1:CSI4iP7qeo5MMlkznGvYKftp8M7qqP/3nzmVZoXHY68= github.com/elastic/dhcp v0.0.0-20200227161230-57ec251c7eb3 h1:lnDkqiRFKm0rxdljqrj3lotWinO9+jFmeDXIC4gvIQs= github.com/elastic/dhcp v0.0.0-20200227161230-57ec251c7eb3/go.mod h1:aPqzac6AYkipvp4hufTyMj5PDIphF3+At8zr7r51xjY= -github.com/elastic/ebpfevents v0.3.2 h1:UJ8kW5jw2TpUR5MEMaZ1O62sK9JQ+5xTlj+YpQC6BXc= -github.com/elastic/ebpfevents v0.3.2/go.mod h1:o21z5xup/9dK8u0Hg9bZRflSqqj1Zu5h2dg2hSTcUPQ= +github.com/elastic/ebpfevents v0.4.0 h1:M80eAeJnzvGQgU9cjJqkjFca9pjM3aq/TuZxJeom4bI= +github.com/elastic/ebpfevents v0.4.0/go.mod h1:o21z5xup/9dK8u0Hg9bZRflSqqj1Zu5h2dg2hSTcUPQ= github.com/elastic/elastic-agent-autodiscover v0.6.7 h1:+KVjltN0rPsBrU8b156gV4lOTBgG/vt0efFCFARrf3g= github.com/elastic/elastic-agent-autodiscover v0.6.7/go.mod h1:hFeFqneS2r4jD0/QzGkrNk0YVdN0JGh7lCWdsH7zcI4= github.com/elastic/elastic-agent-client/v7 v7.8.0 h1:GHFzDJIWpdgI0qDk5EcqbQJGvwTsl2E2vQK3/xe+MYQ= diff --git a/x-pack/auditbeat/auditbeat.reference.yml b/x-pack/auditbeat/auditbeat.reference.yml index 45d1c4af8510..3ef4ac77eabd 100644 --- a/x-pack/auditbeat/auditbeat.reference.yml +++ b/x-pack/auditbeat/auditbeat.reference.yml @@ -92,6 +92,11 @@ auditbeat.modules: # Auditbeat will ignore files unless they match a pattern. #include_files: #- '/\.ssh($|/)' + # Select the backend which will be used to source events. + # "fsnotify" doesn't have the ability to associate user data to file events. + # Valid values: auto, fsnotify, kprobes, ebpf. + # Default: fsnotify. + backend: fsnotify # Scan over the configured file paths at startup and send events for new or # modified files since the last time Auditbeat was running.