diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index faa8c6b58d2..4a02b3c9a1d 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -198,7 +198,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - The environment variable `BEATS_ADD_CLOUD_METADATA_PROVIDERS` overrides configured/default `add_cloud_metadata` providers {pull}38669[38669] - Introduce log message for not supported annotations for Hints based autodiscover {pull}38213[38213] - Add persistent volume claim name to volume if available {pull}38839[38839] - +- Raw events are now logged to a different file, this prevents potentially sensitive information from leaking into log files {pull}38767[38767] *Auditbeat* diff --git a/auditbeat/auditbeat.reference.yml b/auditbeat/auditbeat.reference.yml index e9a23ca6ac0..8f684d78d03 100644 --- a/auditbeat/auditbeat.reference.yml +++ b/auditbeat/auditbeat.reference.yml @@ -1549,6 +1549,54 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +#=============================== Events Logging =============================== +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. +# +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +# +# IMPORTANT: No matter the default logger output configuration, raw events +# will **always** be logged to a file configured by `logging.event_data.files`. + +# logging.event_data: +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +#logging.event_data.to_files: true +#logging.event_data: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/auditbeat + + # The name of the files where the logs are written to. + #name: auditbeat-event-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 2 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to false. + # rotateonstartup: false + # ============================= X-Pack Monitoring ============================== # Auditbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/filebeat/filebeat.reference.yml b/filebeat/filebeat.reference.yml index bc5ebdc3d15..ff308012ed1 100644 --- a/filebeat/filebeat.reference.yml +++ b/filebeat/filebeat.reference.yml @@ -2640,6 +2640,54 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +#=============================== Events Logging =============================== +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. +# +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +# +# IMPORTANT: No matter the default logger output configuration, raw events +# will **always** be logged to a file configured by `logging.event_data.files`. + +# logging.event_data: +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +#logging.event_data.to_files: true +#logging.event_data: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/filebeat + + # The name of the files where the logs are written to. + #name: filebeat-event-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 2 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to false. + # rotateonstartup: false + # ============================= X-Pack Monitoring ============================== # Filebeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/filebeat/tests/integration/event_log_file_test.go b/filebeat/tests/integration/event_log_file_test.go new file mode 100644 index 00000000000..5b2758b4018 --- /dev/null +++ b/filebeat/tests/integration/event_log_file_test.go @@ -0,0 +1,136 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration + +package integration + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/libbeat/tests/integration" +) + +var eventsLogFileCfg = ` +filebeat.inputs: + - type: filestream + id: filestream-input-id + enabled: true + parsers: + - ndjson: + target: "" + overwrite_keys: true + expand_keys: true + add_error_key: true + ignore_decoding_error: false + paths: + - %s + +output: + elasticsearch: + hosts: + - localhost:9200 + protocol: http + username: admin + password: testing + +logging: + level: info + event_data: + files: + name: filebeat-my-event-log +` + +func TestEventsLoggerESOutput(t *testing.T) { + // First things first, ensure ES is running and we can connect to it. + // If ES is not running, the test will timeout and the only way to know + // what caused it is going through Filebeat's logs. + integration.EnsureESIsRunning(t) + + filebeat := integration.NewBeat( + t, + "filebeat", + "../../filebeat.test", + ) + + logFilePath := filepath.Join(filebeat.TempDir(), "log.log") + filebeat.WriteConfigFile(fmt.Sprintf(eventsLogFileCfg, logFilePath)) + + logFile, err := os.Create(logFilePath) + if err != nil { + t.Fatalf("could not create file '%s': %s", logFilePath, err) + } + + _, _ = logFile.WriteString(` +{"message":"foo bar","int":10,"string":"str"} +{"message":"another message","int":20,"string":"str2"} +{"message":"index failure","int":"not a number","string":10} +{"message":"second index failure","int":"not a number","string":10} +`) + if err := logFile.Sync(); err != nil { + t.Fatalf("could not sync log file '%s': %s", logFilePath, err) + } + if err := logFile.Close(); err != nil { + t.Fatalf("could not close log file '%s': %s", logFilePath, err) + } + + filebeat.Start() + + // Wait for a log entry that indicates an entry in the events + // logger file. + msg := "Cannot index event (status=400)" + require.Eventually(t, func() bool { + return filebeat.LogContains(msg) + }, time.Minute, 100*time.Millisecond, + fmt.Sprintf("String '%s' not found on Filebeat logs", msg)) + + // The glob here matches the configured value for the filename + glob := filepath.Join(filebeat.TempDir(), "filebeat-my-event-log*.ndjson") + files, err := filepath.Glob(glob) + if err != nil { + t.Fatalf("could not read files matching glob '%s': %s", glob, err) + } + if len(files) != 1 { + t.Fatalf("there must be only one file matching the glob '%s', found: %s", glob, files) + } + + eventsLogFile := files[0] + data, err := os.ReadFile(eventsLogFile) + if err != nil { + t.Fatalf("could not read '%s': %s", eventsLogFile, err) + } + + strData := string(data) + eventMsg := "not a number" + if !strings.Contains(strData, eventMsg) { + t.Errorf("expecting to find '%s' on '%s'", eventMsg, eventsLogFile) + t.Errorf("Contents:\n%s", strData) + t.FailNow() + } + + // Ensure the normal log file does not contain the event data + if filebeat.LogContains(eventMsg) { + t.Fatalf("normal log file must NOT contain event data, '%s' found in the logs", eventMsg) + } +} diff --git a/filebeat/tests/system/test_reload_inputs.py b/filebeat/tests/system/test_reload_inputs.py index 36c5b2eeedd..53644837c2c 100644 --- a/filebeat/tests/system/test_reload_inputs.py +++ b/filebeat/tests/system/test_reload_inputs.py @@ -105,9 +105,9 @@ def test_start_stop(self): self.wait_until(lambda: self.output_lines() == 1) - # Remove input - with open(self.working_dir + "/configs/input.yml", 'w') as f: - f.write("") + # Remove input by moving the file + # we keep it around to help debugging + os.rename(self.working_dir + "/configs/input.yml", self.working_dir + "/configs/input.yml.disabled") # Wait until input is stopped self.wait_until( @@ -152,8 +152,9 @@ def test_start_stop_replace(self): self.wait_until(lambda: self.output_lines() == 1) # Remove input - with open(self.working_dir + "/configs/input.yml", 'w') as f: - f.write("") + # Remove input by moving the file + # we keep it around to help debugging + os.rename(self.working_dir + "/configs/input.yml", self.working_dir + "/configs/input.yml.disabled") # Wait until input is stopped self.wait_until( diff --git a/filebeat/tests/system/test_reload_modules.py b/filebeat/tests/system/test_reload_modules.py index 5b8e08f49f4..4d0b530acd3 100644 --- a/filebeat/tests/system/test_reload_modules.py +++ b/filebeat/tests/system/test_reload_modules.py @@ -144,9 +144,9 @@ def test_start_stop(self): self.wait_until(lambda: self.output_lines() == 1, max_timeout=10) print(self.output_lines()) - # Remove input - with open(self.working_dir + "/configs/system.yml", 'w') as f: - f.write("") + # Remove input by moving the file + # we keep it around to help debugging + os.rename(self.working_dir + "/configs/system.yml", self.working_dir + "/configs/system.yml.disabled") # Wait until input is stopped self.wait_until( diff --git a/heartbeat/heartbeat.reference.yml b/heartbeat/heartbeat.reference.yml index 7407d213748..3632ce12bbd 100644 --- a/heartbeat/heartbeat.reference.yml +++ b/heartbeat/heartbeat.reference.yml @@ -1636,6 +1636,54 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +#=============================== Events Logging =============================== +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. +# +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +# +# IMPORTANT: No matter the default logger output configuration, raw events +# will **always** be logged to a file configured by `logging.event_data.files`. + +# logging.event_data: +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +#logging.event_data.to_files: true +#logging.event_data: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/heartbeat + + # The name of the files where the logs are written to. + #name: heartbeat-event-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 2 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to false. + # rotateonstartup: false + # ============================= X-Pack Monitoring ============================== # Heartbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/libbeat/_meta/config/logging.reference.yml.tmpl b/libbeat/_meta/config/logging.reference.yml.tmpl index 660bbb73a02..e0921d0ff82 100644 --- a/libbeat/_meta/config/logging.reference.yml.tmpl +++ b/libbeat/_meta/config/logging.reference.yml.tmpl @@ -67,3 +67,51 @@ logging.files: # Rotate existing logs on startup rather than appending them to the existing # file. Defaults to true. # rotateonstartup: true + +#=============================== Events Logging =============================== +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. +# +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +# +# IMPORTANT: No matter the default logger output configuration, raw events +# will **always** be logged to a file configured by `logging.event_data.files`. + +# logging.event_data: +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +#logging.event_data.to_files: true +#logging.event_data: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/{{.BeatName}} + + # The name of the files where the logs are written to. + #name: {{.BeatName}}-event-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 2 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to false. + # rotateonstartup: false diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index 8fa3678e042..c3932c0b867 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -125,6 +125,7 @@ type beatConfig struct { BufferConfig *config.C `config:"http.buffer"` Path paths.Path `config:"path"` Logging *config.C `config:"logging"` + EventLogging *config.C `config:"logging.event_data"` MetricLogging *config.C `config:"logging.metrics"` Keystore *config.C `config:"keystore"` Instrumentation instrumentation.Config `config:"instrumentation"` @@ -808,7 +809,7 @@ func (b *Beat) configure(settings Settings) error { return fmt.Errorf("error setting timestamp precision: %w", err) } - if err := configure.Logging(b.Info.Beat, b.Config.Logging); err != nil { + if err := configure.LoggingWithTypedOutputs(b.Info.Beat, b.Config.Logging, b.Config.EventLogging, logp.TypeKey, logp.EventType); err != nil { return fmt.Errorf("error initializing logging: %w", err) } diff --git a/libbeat/docs/loggingconfig.asciidoc b/libbeat/docs/loggingconfig.asciidoc index 4ba73c1b60d..c9bb5344046 100644 --- a/libbeat/docs/loggingconfig.asciidoc +++ b/libbeat/docs/loggingconfig.asciidoc @@ -293,3 +293,79 @@ Below are some samples: `2017-12-17T18:54:16.242-0500 INFO [example] logp/core_test.go:16 some message` `2017-12-17T18:54:16.242-0500 INFO [example] logp/core_test.go:19 some message {"x": 1}` + +ifndef::serverless[] +[float] +=== Configuration options for event_data logger + +Some outputs will log raw events on errors like indexing errors in the +Elasticsearch output, to prevent logging raw events (that may contain +sensitive information) together with other log messages, a different +log file, only for log entries containing raw events, is used. It will +use the same level, selectors and all other configurations from the +default logger, but it will have it's own file configuration. + +Having a different log file for raw events also prevents event data +from drowning out the regular log files. + +IMPORTANT: No matter the default logger output configuration, raw events +will **always** be logged to a file configured by `logging.event_data.files`. + +[float] +==== `logging.event_data.files.path` + +The directory that log files are written to. The default is the logs path. See +the <> section for details. + +[float] +==== `logging.event_data.files.name` + +The name of the file that logs are written to. The default is '{beatname_lc}'-event-data. + +[float] +==== `logging.event_data.files.rotateeverybytes` + +The maximum size of a log file. If the limit is reached, a new log file is +generated. The default size limit is 5242880 (5 MB). + +[float] +==== `logging.event_data.files.keepfiles` + +The number of most recent rotated log files to keep on disk. Older files are +deleted during log rotation. The default value is 2. The `keepfiles` options has +to be in the range of 2 to 1024 files. + +[float] +==== `logging.event_data.files.permissions` + +The permissions mask to apply when rotating log files. The default value is +0600. The `permissions` option must be a valid Unix-style file permissions mask +expressed in octal notation. In Go, numbers in octal notation must start with +'0'. + +The most permissive mask allowed is 0640. If a higher permissions mask is +specified via this setting, it will be subject to an umask of 0027. + +This option is not supported on Windows. + +Examples: + +* 0640: give read and write access to the file owner, and read access to members of the group associated with the file. +* 0600: give read and write access to the file owner, and no access to all others. + +[float] +==== `logging.event_data.files.interval` + +Enable log file rotation on time intervals in addition to size-based rotation. +Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h +are boundary-aligned with minutes, hours, days, weeks, months, and years as +reported by the local system clock. All other intervals are calculated from the +unix epoch. Defaults to disabled. + +[float] +==== `logging.event_data.files.rotateonstartup` + +If the log file already exists on startup, immediately rotate it and start +writing to a new file instead of appending to the existing one. Defaults to +false. +endif::serverless[] diff --git a/libbeat/outputs/elasticsearch/client.go b/libbeat/outputs/elasticsearch/client.go index 504aac710af..0892ce40173 100644 --- a/libbeat/outputs/elasticsearch/client.go +++ b/libbeat/outputs/elasticsearch/client.go @@ -417,17 +417,18 @@ func (client *Client) bulkCollectPublishFails(result eslegclient.BulkResult, dat encodedEvent := data[i].EncodedEvent.(*encodedEvent) if encodedEvent.deadLetter { stats.nonIndexable++ - client.log.Errorf("Can't deliver to dead letter index event (status=%v). Enable debug logs to view the event and cause.", status) - client.log.Debugf("Can't deliver to dead letter index event %#v (status=%v): %s", data[i], status, msg) + client.log.Errorf("Can't deliver to dead letter index event (status=%v). Look at the event log to view the event and cause.", status) + client.log.Errorw(fmt.Sprintf("Can't deliver to dead letter index event %#v (status=%v): %s", data[i], status, msg), logp.TypeKey, logp.EventType) // poison pill - this will clog the pipeline if the underlying failure is non transient. } else if client.deadLetterIndex != "" { - client.log.Warnf("Cannot index event (status=%v), trying dead letter index. Enable debug logs to view the event and cause.", status) - client.log.Debugf("Cannot index event %#v (status=%v): %s, trying dead letter index", data[i], status, msg) + client.log.Warnf("Cannot index event (status=%v), trying dead letter index. Look at the event log to view the event and cause.", status) + client.log.Warnw(fmt.Sprintf("Cannot index event %#v (status=%v): %s, trying dead letter index", data[i], status, msg), logp.TypeKey, logp.EventType) client.setDeadLetter(encodedEvent, status, string(msg)) + } else { // drop stats.nonIndexable++ - client.log.Warnf("Cannot index event (status=%v): dropping event! Enable debug logs to view the event and cause.", status) - client.log.Debugf("Cannot index event %#v (status=%v): %s, dropping event!", data[i], status, msg) + client.log.Warnf("Cannot index event (status=%v): dropping event! Look at the event log to view the event and cause.", status) + client.log.Warnw(fmt.Sprintf("Cannot index event %#v (status=%v): %s, dropping event!", data[i], status, msg), logp.TypeKey, logp.EventType) continue } } diff --git a/libbeat/outputs/fileout/file.go b/libbeat/outputs/fileout/file.go index d14bd99d69a..87b50f62c1a 100644 --- a/libbeat/outputs/fileout/file.go +++ b/libbeat/outputs/fileout/file.go @@ -19,6 +19,7 @@ package fileout import ( "context" + "fmt" "os" "path/filepath" "time" @@ -132,7 +133,8 @@ func (out *fileOutput) Publish(_ context.Context, batch publisher.Batch) error { } else { out.log.Warnf("Failed to serialize the event: %+v", err) } - out.log.Debugf("Failed event: %v", event) + out.log.Debug("Failed event logged to event log file") + out.log.Debugw(fmt.Sprintf("Failed event: %v", event), logp.TypeKey, logp.EventType) dropped++ continue diff --git a/libbeat/outputs/kafka/client.go b/libbeat/outputs/kafka/client.go index 24bbc61145d..afeb02a5534 100644 --- a/libbeat/outputs/kafka/client.go +++ b/libbeat/outputs/kafka/client.go @@ -214,21 +214,22 @@ func (c *client) getEventMessage(data *publisher.Event) (*message, error) { if msg.topic == "" { topic, err := c.topic.Select(event) if err != nil { - return nil, fmt.Errorf("setting kafka topic failed with %v", err) + return nil, fmt.Errorf("setting kafka topic failed with %w", err) } if topic == "" { return nil, errNoTopicsSelected } msg.topic = topic if _, err := data.Cache.Put("topic", topic); err != nil { - return nil, fmt.Errorf("setting kafka topic in publisher event failed: %v", err) + return nil, fmt.Errorf("setting kafka topic in publisher event failed: %w", err) } } serializedEvent, err := c.codec.Encode(c.index, event) if err != nil { if c.log.IsDebug() { - c.log.Debugf("failed event: %v", event) + c.log.Debug("failed event logged to event log file") + c.log.Debugw(fmt.Sprintf("failed event: %v", event), logp.TypeKey, logp.EventType) } return nil, err } @@ -270,7 +271,7 @@ func (c *client) errorWorker(ch <-chan *sarama.ProducerError) { msg := errMsg.Msg.Metadata.(*message) msg.ref.fail(msg, errMsg.Err) - if errMsg.Err == breaker.ErrBreakerOpen { + if errors.Is(errMsg.Err, breaker.ErrBreakerOpen) { // ErrBreakerOpen is a very special case in Sarama. It happens only when // there have been repeated critical (broker / topic-level) errors, and it // puts Sarama into a state where it immediately rejects all input @@ -356,18 +357,18 @@ func (r *msgRef) done() { } func (r *msgRef) fail(msg *message, err error) { - switch err { - case sarama.ErrInvalidMessage: + switch { + case errors.Is(err, sarama.ErrInvalidMessage): r.client.log.Errorf("Kafka (topic=%v): dropping invalid message", msg.topic) r.client.observer.Dropped(1) - case sarama.ErrMessageSizeTooLarge, sarama.ErrInvalidMessageSize: + case errors.Is(err, sarama.ErrMessageSizeTooLarge) || errors.Is(err, sarama.ErrInvalidMessageSize): r.client.log.Errorf("Kafka (topic=%v): dropping too large message of size %v.", msg.topic, len(msg.key)+len(msg.value)) r.client.observer.Dropped(1) - case breaker.ErrBreakerOpen: + case errors.Is(err, breaker.ErrBreakerOpen): // Add this message to the failed list, but don't overwrite r.err since // all the breaker error means is "there were a lot of other errors". r.failed = append(r.failed, msg.data) @@ -411,7 +412,7 @@ func (r *msgRef) dec() { } func (c *client) Test(d testing.Driver) { - if c.config.Net.TLS.Enable == true { + if c.config.Net.TLS.Enable { d.Warn("TLS", "Kafka output doesn't support TLS testing") } diff --git a/libbeat/outputs/redis/client.go b/libbeat/outputs/redis/client.go index 5a299749aac..1fcd46e6f64 100644 --- a/libbeat/outputs/redis/client.go +++ b/libbeat/outputs/redis/client.go @@ -20,6 +20,7 @@ package redis import ( "context" "errors" + "fmt" "regexp" "strconv" "strings" @@ -317,10 +318,11 @@ func serializeEvents( succeeded := data for _, d := range data { + d := d serializedEvent, err := codec.Encode(index, &d.Content) if err != nil { - log.Errorf("Encoding event failed with error: %+v", err) - log.Debugf("Failed event: %v", d.Content) + log.Errorf("Encoding event failed with error: %+v. Look at the event log file to view the event", err) + log.Errorw(fmt.Sprintf("Failed event: %v", d.Content), logp.TypeKey, logp.EventType) goto failLoop } @@ -335,10 +337,11 @@ failLoop: succeeded = data[:i] rest := data[i+1:] for _, d := range rest { + d := d serializedEvent, err := codec.Encode(index, &d.Content) if err != nil { - log.Errorf("Encoding event failed with error: %+v", err) - log.Debugf("Failed event: %v", d.Content) + log.Errorf("Encoding event failed with error: %+v. Look at the event log file to view the event", err) + log.Errorw(fmt.Sprintf("Failed event: %v", d.Content), logp.TypeKey, logp.EventType) i++ continue } diff --git a/libbeat/processors/actions/append.go b/libbeat/processors/actions/append.go index 1bf2caad45f..fd15eeb2e58 100644 --- a/libbeat/processors/actions/append.go +++ b/libbeat/processors/actions/append.go @@ -21,7 +21,6 @@ import ( "fmt" "github.com/elastic/beats/v7/libbeat/beat" - "github.com/elastic/beats/v7/libbeat/management" "github.com/elastic/beats/v7/libbeat/processors" "github.com/elastic/beats/v7/libbeat/processors/checks" jsprocessor "github.com/elastic/beats/v7/libbeat/processors/script/javascript/module/processor" @@ -82,9 +81,8 @@ func (f *appendProcessor) Run(event *beat.Event) (*beat.Event, error) { err := f.appendValues(f.config.TargetField, f.config.Fields, f.config.Values, event) if err != nil { errMsg := fmt.Errorf("failed to append fields in append processor: %w", err) - if management.TraceLevelEnabled() { - f.logger.Debug(errMsg.Error()) - } + f.logger.Debugw(errMsg.Error(), logp.TypeKey, logp.EventType) + if f.config.FailOnError { event = backup if _, err := event.PutValue("error.message", errMsg.Error()); err != nil { diff --git a/libbeat/processors/actions/copy_fields.go b/libbeat/processors/actions/copy_fields.go index 0f4fab309a3..f0d6cbe775a 100644 --- a/libbeat/processors/actions/copy_fields.go +++ b/libbeat/processors/actions/copy_fields.go @@ -22,7 +22,6 @@ import ( "fmt" "github.com/elastic/beats/v7/libbeat/beat" - "github.com/elastic/beats/v7/libbeat/management" "github.com/elastic/beats/v7/libbeat/processors" "github.com/elastic/beats/v7/libbeat/processors/checks" jsprocessor "github.com/elastic/beats/v7/libbeat/processors/script/javascript/module/processor" @@ -79,9 +78,8 @@ func (f *copyFields) Run(event *beat.Event) (*beat.Event, error) { err := f.copyField(field.From, field.To, event) if err != nil { errMsg := fmt.Errorf("Failed to copy fields in copy_fields processor: %w", err) - if management.TraceLevelEnabled() { - f.logger.Debug(errMsg.Error()) - } + f.logger.Debugw(errMsg.Error(), logp.TypeKey, logp.EventType) + if f.config.FailOnError { event = backup _, _ = event.PutValue("error.message", errMsg.Error()) diff --git a/libbeat/processors/actions/decode_base64_field.go b/libbeat/processors/actions/decode_base64_field.go index c45166beb11..3ec5e0a8d7a 100644 --- a/libbeat/processors/actions/decode_base64_field.go +++ b/libbeat/processors/actions/decode_base64_field.go @@ -24,7 +24,6 @@ import ( "strings" "github.com/elastic/beats/v7/libbeat/beat" - "github.com/elastic/beats/v7/libbeat/management" "github.com/elastic/beats/v7/libbeat/processors" "github.com/elastic/beats/v7/libbeat/processors/checks" jsprocessor "github.com/elastic/beats/v7/libbeat/processors/script/javascript/module/processor" @@ -84,9 +83,8 @@ func (f *decodeBase64Field) Run(event *beat.Event) (*beat.Event, error) { err := f.decodeField(event) if err != nil { errMsg := fmt.Errorf("failed to decode base64 fields in processor: %w", err) - if management.TraceLevelEnabled() { - f.log.Debug(errMsg.Error()) - } + f.log.Debugw(errMsg.Error(), logp.TypeKey, logp.EventType) + if f.config.FailOnError { event = backup _, _ = event.PutValue("error.message", errMsg.Error()) diff --git a/libbeat/processors/actions/decompress_gzip_field.go b/libbeat/processors/actions/decompress_gzip_field.go index 8d463600c21..993dbf3e821 100644 --- a/libbeat/processors/actions/decompress_gzip_field.go +++ b/libbeat/processors/actions/decompress_gzip_field.go @@ -25,7 +25,6 @@ import ( "io" "github.com/elastic/beats/v7/libbeat/beat" - "github.com/elastic/beats/v7/libbeat/management" "github.com/elastic/beats/v7/libbeat/processors" "github.com/elastic/beats/v7/libbeat/processors/checks" conf "github.com/elastic/elastic-agent-libs/config" @@ -76,9 +75,8 @@ func (f *decompressGzipField) Run(event *beat.Event) (*beat.Event, error) { err := f.decompressGzipField(event) if err != nil { errMsg := fmt.Errorf("Failed to decompress field in decompress_gzip_field processor: %w", err) - if management.TraceLevelEnabled() { - f.log.Debug(errMsg.Error()) - } + f.log.Debugw(errMsg.Error(), logp.EventType, logp.TypeKey) + if f.config.FailOnError { event = backup _, _ = event.PutValue("error.message", errMsg.Error()) diff --git a/libbeat/processors/actions/rename.go b/libbeat/processors/actions/rename.go index 4c49174bf54..7503127e103 100644 --- a/libbeat/processors/actions/rename.go +++ b/libbeat/processors/actions/rename.go @@ -22,7 +22,6 @@ import ( "fmt" "github.com/elastic/beats/v7/libbeat/beat" - "github.com/elastic/beats/v7/libbeat/management" "github.com/elastic/beats/v7/libbeat/processors" "github.com/elastic/beats/v7/libbeat/processors/checks" jsprocessor "github.com/elastic/beats/v7/libbeat/processors/script/javascript/module/processor" @@ -84,9 +83,8 @@ func (f *renameFields) Run(event *beat.Event) (*beat.Event, error) { err := f.renameField(field.From, field.To, event) if err != nil { errMsg := fmt.Errorf("Failed to rename fields in processor: %w", err) - if management.TraceLevelEnabled() { - f.logger.Debug(errMsg.Error()) - } + f.logger.Debugw(errMsg.Error(), logp.TypeKey, logp.EventType) + if f.config.FailOnError { event = backup _, _ = event.PutValue("error.message", errMsg.Error()) diff --git a/libbeat/processors/actions/replace.go b/libbeat/processors/actions/replace.go index df4aa03fc86..b242b9f3579 100644 --- a/libbeat/processors/actions/replace.go +++ b/libbeat/processors/actions/replace.go @@ -23,7 +23,6 @@ import ( "regexp" "github.com/elastic/beats/v7/libbeat/beat" - "github.com/elastic/beats/v7/libbeat/management" "github.com/elastic/beats/v7/libbeat/processors" "github.com/elastic/beats/v7/libbeat/processors/checks" jsprocessor "github.com/elastic/beats/v7/libbeat/processors/script/javascript/module/processor" @@ -86,9 +85,8 @@ func (f *replaceString) Run(event *beat.Event) (*beat.Event, error) { err := f.replaceField(field.Field, field.Pattern, field.Replacement, event) if err != nil { errMsg := fmt.Errorf("Failed to replace fields in processor: %w", err) - if management.TraceLevelEnabled() { - f.log.Debug(errMsg.Error()) - } + f.log.Debugw(errMsg.Error(), logp.TypeKey, logp.EventType) + if f.config.FailOnError { event = backup _, _ = event.PutValue("error.message", errMsg.Error()) diff --git a/libbeat/processors/urldecode/urldecode.go b/libbeat/processors/urldecode/urldecode.go index 59ed552e2ae..c9aac0cdef1 100644 --- a/libbeat/processors/urldecode/urldecode.go +++ b/libbeat/processors/urldecode/urldecode.go @@ -23,7 +23,6 @@ import ( "net/url" "github.com/elastic/beats/v7/libbeat/beat" - "github.com/elastic/beats/v7/libbeat/management" "github.com/elastic/beats/v7/libbeat/processors" "github.com/elastic/beats/v7/libbeat/processors/checks" jsprocessor "github.com/elastic/beats/v7/libbeat/processors/script/javascript/module/processor" @@ -83,9 +82,8 @@ func (p *urlDecode) Run(event *beat.Event) (*beat.Event, error) { err := p.decodeField(field.From, field.To, event) if err != nil { errMsg := fmt.Errorf("failed to decode fields in urldecode processor: %w", err) - if management.TraceLevelEnabled() { - p.log.Debug(errMsg.Error()) - } + p.log.Debugw(errMsg.Error(), logp.TypeKey, logp.EventType) + if p.config.FailOnError { event = backup _, _ = event.PutValue("error.message", errMsg.Error()) diff --git a/libbeat/publisher/processing/processors.go b/libbeat/publisher/processing/processors.go index 69fb5090e4c..e90202401a7 100644 --- a/libbeat/publisher/processing/processors.go +++ b/libbeat/publisher/processing/processors.go @@ -27,7 +27,6 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" - "github.com/elastic/beats/v7/libbeat/management" "github.com/elastic/beats/v7/libbeat/outputs/codec/json" "github.com/elastic/beats/v7/libbeat/processors" "github.com/elastic/elastic-agent-libs/logp" @@ -200,18 +199,16 @@ func debugPrintProcessor(info beat.Info, log *logp.Logger) *processorFn { EscapeHTML: false, }) return newProcessor("debugPrint", func(event *beat.Event) (*beat.Event, error) { - if management.TraceLevelEnabled() { - mux.Lock() - defer mux.Unlock() + mux.Lock() + defer mux.Unlock() - b, err := encoder.Encode(info.Beat, event) - if err != nil { - //nolint:nilerr // encoder failure is not considered an error by this processor [why not?] - return event, nil - } - - log.Debugf("Publish event: %s", b) + b, err := encoder.Encode(info.Beat, event) + if err != nil { + //nolint:nilerr // encoder failure is not considered an error by this processor [why not?] + return event, nil } + + log.Debugw(fmt.Sprintf("Publish event: %s", b), logp.TypeKey, logp.EventType) return event, nil }) } diff --git a/libbeat/tests/integration/framework.go b/libbeat/tests/integration/framework.go index 229d855b9fa..cc305255709 100644 --- a/libbeat/tests/integration/framework.go +++ b/libbeat/tests/integration/framework.go @@ -53,6 +53,7 @@ type BeatProc struct { configFile string fullPath string logFileOffset int64 + eventLogFileOffset int64 t *testing.T tempDir string stdin io.WriteCloser @@ -283,13 +284,32 @@ func (b *BeatProc) Stop() { } // LogMatch tests each line of the logfile to see if contains any -// match of the provided regular expression. It will open the log -// file on every call, read until EOF, then close it. LogContains +// match of the provided regular expression. It will open the log +// file on every call, read until EOF, then close it. LogContains // will be faster so use that if possible. func (b *BeatProc) LogMatch(match string) bool { re := regexp.MustCompile(match) logFile := b.openLogFile() - _, err := logFile.Seek(b.logFileOffset, io.SeekStart) + defer logFile.Close() + + found := false + found, b.logFileOffset = b.logRegExpMatch(re, logFile, b.logFileOffset) + if found { + return found + } + + eventLogFile := b.openEventLogFile() + if eventLogFile == nil { + return false + } + defer eventLogFile.Close() + found, b.eventLogFileOffset = b.logRegExpMatch(re, eventLogFile, b.eventLogFileOffset) + + return found +} + +func (b *BeatProc) logRegExpMatch(re *regexp.Regexp, logFile *os.File, offset int64) (bool, int64) { + _, err := logFile.Seek(offset, io.SeekStart) if err != nil { b.t.Fatalf("could not set offset for '%s': %s", logFile.Name(), err) } @@ -306,7 +326,7 @@ func (b *BeatProc) LogMatch(match string) bool { for { data, err := r.ReadBytes('\n') line := string(data) - b.logFileOffset += int64(len(data)) + offset += int64(len(data)) if err != nil { if err != io.EOF { @@ -316,20 +336,49 @@ func (b *BeatProc) LogMatch(match string) bool { } if re.MatchString(line) { - return true + return true, offset } } - return false + return false, offset } // LogContains looks for `s` as a substring of every log line, // it will open the log file on every call, read it until EOF, -// then close it. +// then close it. It keeps track of the offset so subsequent calls +// will only read log entries that were not read by the previous +// call. +// +// The events log file is read after the normal log file and its +// offset is tracked separately. func (b *BeatProc) LogContains(s string) bool { - t := b.t logFile := b.openLogFile() - _, err := logFile.Seek(b.logFileOffset, io.SeekStart) + defer logFile.Close() + + found := false + found, b.logFileOffset = b.searchStrInLogs(logFile, s, b.logFileOffset) + if found { + return found + } + + eventLogFile := b.openEventLogFile() + if eventLogFile == nil { + return false + } + defer eventLogFile.Close() + found, b.eventLogFileOffset = b.searchStrInLogs(eventLogFile, s, b.eventLogFileOffset) + + return found +} + +// searchStrInLogs search for s as a substring of any line in logFile starting +// from offset. +// +// It will close logFile and return the current offset. +func (b *BeatProc) searchStrInLogs(logFile *os.File, s string, offset int64) (bool, int64) { + t := b.t + + _, err := logFile.Seek(offset, io.SeekStart) if err != nil { t.Fatalf("could not set offset for '%s': %s", logFile.Name(), err) } @@ -346,7 +395,7 @@ func (b *BeatProc) LogContains(s string) bool { for { data, err := r.ReadBytes('\n') line := string(data) - b.logFileOffset += int64(len(data)) + offset += int64(len(data)) if err != nil { if err != io.EOF { @@ -356,11 +405,11 @@ func (b *BeatProc) LogContains(s string) bool { } if strings.Contains(line, s) { - return true + return true, offset } } - return false + return false, offset } // WaitForLogs waits for the specified string s to be present in the logs within @@ -393,32 +442,36 @@ func (b *BeatProc) WriteConfigFile(cfg string) { b.baseArgs = append(b.baseArgs, "-c", b.configFile) } -// openLogFile opens the log file for reading and returns it. -// It also registers a cleanup function to close the file -// when the test ends. -func (b *BeatProc) openLogFile() *os.File { +// openGlobFile opens a file defined by glob. The glob must resolve to a single +// file otherwise the test fails. It returns a *os.File and a boolean indicating +// whether a file was found. +// +// If `waitForFile` is true, it will wait up to 5 seconds for the file to +// be created. The test will fail if the file is not found. If it is false +// and no file is found, nil and false are returned. +func (b *BeatProc) openGlobFile(glob string, waitForFile bool) *os.File { t := b.t - glob := fmt.Sprintf("%s-*.ndjson", filepath.Join(b.tempDir, b.beatName)) + files, err := filepath.Glob(glob) if err != nil { t.Fatalf("could not expand log file glob: %s", err) } - require.Eventually(t, func() bool { - files, err = filepath.Glob(glob) - if err != nil { - t.Fatalf("could not expand log file glob: %s", err) - } - return len(files) == 1 - }, 5*time.Second, 100*time.Millisecond, - "waiting for log file matching glob '%s' to be created", glob) + if waitForFile && len(files) == 0 { + require.Eventually(t, func() bool { + files, err = filepath.Glob(glob) + if err != nil { + t.Fatalf("could not expand log file glob: %s", err) + } + return len(files) == 1 + }, 5*time.Second, 100*time.Millisecond, + "waiting for log file matching glob '%s' to be created", glob) + } - // On a normal operation there must be a single log, if there are more - // than one, then there is an issue and the Beat is logging too much, - // which is enough to stop the test - if len(files) != 1 { - t.Fatalf("there must be only one log file for %s, found: %d", - glob, len(files)) + // We only reach this line if `waitForFile` is false, so we need + // to check whether we found a file + if len(files) == 0 { + return nil } f, err := os.Open(files[0]) @@ -429,6 +482,33 @@ func (b *BeatProc) openLogFile() *os.File { return f } +// openLogFile opens the log file for reading and returns it. +// It's the caller's responsibility to close the file. +// If the log file is not found, the test fails. The returned +// value is never nil. +func (b *BeatProc) openLogFile() *os.File { + // Beats can produce two different log files, to make sure we're + // reading the normal one we add the year to the glob. The default + // log file name looks like: filebeat-20240116.ndjson + year := time.Now().Year() + glob := fmt.Sprintf("%s-%d*.ndjson", filepath.Join(b.tempDir, b.beatName), year) + + return b.openGlobFile(glob, true) +} + +// openEventLogFile opens the log file for reading and returns it. +// If the events log file does not exist, nil is returned +// It's the caller's responsibility to close the file. +func (b *BeatProc) openEventLogFile() *os.File { + // Beats can produce two different log files, to make sure we're + // reading the normal one we add the year to the glob. The default + // log file name looks like: filebeat-20240116.ndjson + year := time.Now().Year() + glob := fmt.Sprintf("%s-events-data-%d*.ndjson", filepath.Join(b.tempDir, b.beatName), year) + + return b.openGlobFile(glob, false) +} + // createTempDir creates a temporary directory that will be // removed after the tests passes. // @@ -484,9 +564,9 @@ func EnsureESIsRunning(t *testing.T) { resp, err := http.DefaultClient.Do(req) if err != nil { // If you're reading this message, you probably forgot to start ES - // run `mage compose:Up` from Filebeat's folder to start all + // run `mage docker:composeUp` from Filebeat's folder to start all // containers required for integration tests - t.Fatalf("cannot execute HTTP request to ES: '%s', check to make sure ES is running (mage compose:Up)", err) + t.Fatalf("cannot execute HTTP request to ES: '%s', check to make sure ES is running (mage docker:composeUp)", err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { diff --git a/libbeat/tests/system/beat/beat.py b/libbeat/tests/system/beat/beat.py index b8c4b3c43f1..bc1126402cd 100644 --- a/libbeat/tests/system/beat/beat.py +++ b/libbeat/tests/system/beat/beat.py @@ -517,9 +517,10 @@ def log_contains_count(self, msg, logfile=None, ignore_case=False): if logfile is None: logfile = self.beat_name + "-" + self.today + ".ndjson" - print("logfile", logfile, self.working_dir) + logfile_path = os.path.join(self.working_dir, logfile) + print("logfile ", logfile_path) try: - with open(os.path.join(self.working_dir, logfile), "r", encoding="utf_8") as f: + with open(logfile_path, "r", encoding="utf_8") as f: for line in f: if is_regexp: if msg.search(line) is not None: @@ -529,6 +530,27 @@ def log_contains_count(self, msg, logfile=None, ignore_case=False): line = line.lower() if line.find(msg) >= 0: counter = counter + 1 + + # Event log file: + logfile = self.beat_name + "-events-data-" + self.today + ".ndjson" + logfile_path = os.path.join(self.working_dir, "logs", logfile) + print("event logfile", logfile_path) + try: + with open(logfile_path, "r", encoding="utf_8") as f: + for line in f: + if is_regexp: + if msg.search(line) is not None: + counter = counter + 1 + continue + if ignore_case: + line = line.lower() + if line.find(msg) >= 0: + counter = counter + 1 + except FileNotFoundError as e: + # The events log file is not always present, so we ignore + # if it does not exit + pass + except IOError as ioe: print(ioe) counter = -1 diff --git a/metricbeat/metricbeat.reference.yml b/metricbeat/metricbeat.reference.yml index 2538bef77d3..985e4795910 100644 --- a/metricbeat/metricbeat.reference.yml +++ b/metricbeat/metricbeat.reference.yml @@ -2421,6 +2421,54 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +#=============================== Events Logging =============================== +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. +# +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +# +# IMPORTANT: No matter the default logger output configuration, raw events +# will **always** be logged to a file configured by `logging.event_data.files`. + +# logging.event_data: +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +#logging.event_data.to_files: true +#logging.event_data: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/metricbeat + + # The name of the files where the logs are written to. + #name: metricbeat-event-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 2 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to false. + # rotateonstartup: false + # ============================= X-Pack Monitoring ============================== # Metricbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/packetbeat/packetbeat.reference.yml b/packetbeat/packetbeat.reference.yml index 6eaee863da0..7041e79ea74 100644 --- a/packetbeat/packetbeat.reference.yml +++ b/packetbeat/packetbeat.reference.yml @@ -2015,6 +2015,54 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +#=============================== Events Logging =============================== +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. +# +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +# +# IMPORTANT: No matter the default logger output configuration, raw events +# will **always** be logged to a file configured by `logging.event_data.files`. + +# logging.event_data: +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +#logging.event_data.to_files: true +#logging.event_data: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/packetbeat + + # The name of the files where the logs are written to. + #name: packetbeat-event-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 2 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to false. + # rotateonstartup: false + # ============================= X-Pack Monitoring ============================== # Packetbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/winlogbeat/winlogbeat.reference.yml b/winlogbeat/winlogbeat.reference.yml index a6a042f4aba..47a61d36faf 100644 --- a/winlogbeat/winlogbeat.reference.yml +++ b/winlogbeat/winlogbeat.reference.yml @@ -1426,6 +1426,54 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +#=============================== Events Logging =============================== +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. +# +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +# +# IMPORTANT: No matter the default logger output configuration, raw events +# will **always** be logged to a file configured by `logging.event_data.files`. + +# logging.event_data: +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +#logging.event_data.to_files: true +#logging.event_data: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/winlogbeat + + # The name of the files where the logs are written to. + #name: winlogbeat-event-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 2 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to false. + # rotateonstartup: false + # ============================= X-Pack Monitoring ============================== # Winlogbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/agentbeat/agentbeat.spec.yml b/x-pack/agentbeat/agentbeat.spec.yml index 045188513b0..7e859cc9362 100644 --- a/x-pack/agentbeat/agentbeat.spec.yml +++ b/x-pack/agentbeat/agentbeat.spec.yml @@ -39,6 +39,10 @@ inputs: - "gc_percent=${AUDITBEAT_GOGC:100}" - "-E" - "auditbeat.config.modules.enabled=false" + - "-E" + - "logging.event_data.to_stderr=true" + - "-E" + - "logging.event_data.to_files=false" - name: audit/file_integrity description: "Audit File Integrity" platforms: *platforms @@ -77,6 +81,10 @@ inputs: - "gc_percent=${FILEBEAT_GOGC:100}" - "-E" - "filebeat.config.modules.enabled=false" + - "-E" + - "logging.event_data.to_stderr=true" + - "-E" + - "logging.event_data.to_files=false" - name: aws-s3 description: "AWS S3" platforms: *platforms @@ -261,6 +269,10 @@ inputs: - "logging.to_stderr=true" - "-E" - "gc_percent=${HEARTBEAT_GOGC:100}" + - "-E" + - "logging.event_data.to_stderr=true" + - "-E" + - "logging.event_data.to_files=false" - name: synthetics/http description: "Synthetics HTTP Monitor" platforms: *platforms @@ -304,6 +316,10 @@ inputs: - "gc_percent=${METRICBEAT_GOGC:100}" - "-E" - "metricbeat.config.modules.enabled=false" + - "-E" + - "logging.event_data.to_stderr=true" + - "-E" + - "logging.event_data.to_files=false" - name: docker/metrics description: "Docker metrics" platforms: *platforms @@ -540,6 +556,10 @@ inputs: - "logging.to_stderr=true" - "-E" - "gc_percent=${OSQUERYBEAT_GOGC:100}" + - "-E" + - "logging.event_data.to_stderr=true" + - "-E" + - "logging.event_data.to_files=false" - name: packet description: "Packet Capture" platforms: *platforms @@ -566,3 +586,7 @@ inputs: - "logging.to_stderr=true" - "-E" - "gc_percent=${PACKETBEAT_GOGC:100}" + - "-E" + - "logging.event_data.to_stderr=true" + - "-E" + - "logging.event_data.to_files=false" diff --git a/x-pack/auditbeat/auditbeat.reference.yml b/x-pack/auditbeat/auditbeat.reference.yml index a0352454e09..438ae307b80 100644 --- a/x-pack/auditbeat/auditbeat.reference.yml +++ b/x-pack/auditbeat/auditbeat.reference.yml @@ -1605,6 +1605,54 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +#=============================== Events Logging =============================== +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. +# +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +# +# IMPORTANT: No matter the default logger output configuration, raw events +# will **always** be logged to a file configured by `logging.event_data.files`. + +# logging.event_data: +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +#logging.event_data.to_files: true +#logging.event_data: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/auditbeat + + # The name of the files where the logs are written to. + #name: auditbeat-event-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 2 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to false. + # rotateonstartup: false + # ============================= X-Pack Monitoring ============================== # Auditbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index 0c7cab1acb1..9830e468e64 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -4567,6 +4567,54 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +#=============================== Events Logging =============================== +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. +# +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +# +# IMPORTANT: No matter the default logger output configuration, raw events +# will **always** be logged to a file configured by `logging.event_data.files`. + +# logging.event_data: +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +#logging.event_data.to_files: true +#logging.event_data: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/filebeat + + # The name of the files where the logs are written to. + #name: filebeat-event-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 2 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to false. + # rotateonstartup: false + # ============================= X-Pack Monitoring ============================== # Filebeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/functionbeat/functionbeat.reference.yml b/x-pack/functionbeat/functionbeat.reference.yml index 2284fedbcce..a3df78c4e2d 100644 --- a/x-pack/functionbeat/functionbeat.reference.yml +++ b/x-pack/functionbeat/functionbeat.reference.yml @@ -1264,6 +1264,54 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +#=============================== Events Logging =============================== +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. +# +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +# +# IMPORTANT: No matter the default logger output configuration, raw events +# will **always** be logged to a file configured by `logging.event_data.files`. + +# logging.event_data: +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +#logging.event_data.to_files: true +#logging.event_data: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/functionbeat + + # The name of the files where the logs are written to. + #name: functionbeat-event-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 2 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to false. + # rotateonstartup: false + # ============================= X-Pack Monitoring ============================== # Functionbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/heartbeat/heartbeat.reference.yml b/x-pack/heartbeat/heartbeat.reference.yml index 7407d213748..3632ce12bbd 100644 --- a/x-pack/heartbeat/heartbeat.reference.yml +++ b/x-pack/heartbeat/heartbeat.reference.yml @@ -1636,6 +1636,54 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +#=============================== Events Logging =============================== +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. +# +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +# +# IMPORTANT: No matter the default logger output configuration, raw events +# will **always** be logged to a file configured by `logging.event_data.files`. + +# logging.event_data: +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +#logging.event_data.to_files: true +#logging.event_data: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/heartbeat + + # The name of the files where the logs are written to. + #name: heartbeat-event-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 2 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to false. + # rotateonstartup: false + # ============================= X-Pack Monitoring ============================== # Heartbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml index 6877f2b4534..1d2957306db 100644 --- a/x-pack/metricbeat/metricbeat.reference.yml +++ b/x-pack/metricbeat/metricbeat.reference.yml @@ -2982,6 +2982,54 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +#=============================== Events Logging =============================== +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. +# +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +# +# IMPORTANT: No matter the default logger output configuration, raw events +# will **always** be logged to a file configured by `logging.event_data.files`. + +# logging.event_data: +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +#logging.event_data.to_files: true +#logging.event_data: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/metricbeat + + # The name of the files where the logs are written to. + #name: metricbeat-event-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 2 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to false. + # rotateonstartup: false + # ============================= X-Pack Monitoring ============================== # Metricbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/osquerybeat/osquerybeat.reference.yml b/x-pack/osquerybeat/osquerybeat.reference.yml index 0c28af89144..7e8ccd5e842 100644 --- a/x-pack/osquerybeat/osquerybeat.reference.yml +++ b/x-pack/osquerybeat/osquerybeat.reference.yml @@ -983,6 +983,54 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +#=============================== Events Logging =============================== +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. +# +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +# +# IMPORTANT: No matter the default logger output configuration, raw events +# will **always** be logged to a file configured by `logging.event_data.files`. + +# logging.event_data: +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +#logging.event_data.to_files: true +#logging.event_data: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/osquerybeat + + # The name of the files where the logs are written to. + #name: osquerybeat-event-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 2 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to false. + # rotateonstartup: false + # ============================= X-Pack Monitoring ============================== # Osquerybeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/packetbeat/packetbeat.reference.yml b/x-pack/packetbeat/packetbeat.reference.yml index 6eaee863da0..7041e79ea74 100644 --- a/x-pack/packetbeat/packetbeat.reference.yml +++ b/x-pack/packetbeat/packetbeat.reference.yml @@ -2015,6 +2015,54 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +#=============================== Events Logging =============================== +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. +# +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +# +# IMPORTANT: No matter the default logger output configuration, raw events +# will **always** be logged to a file configured by `logging.event_data.files`. + +# logging.event_data: +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +#logging.event_data.to_files: true +#logging.event_data: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/packetbeat + + # The name of the files where the logs are written to. + #name: packetbeat-event-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 2 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to false. + # rotateonstartup: false + # ============================= X-Pack Monitoring ============================== # Packetbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The diff --git a/x-pack/winlogbeat/winlogbeat.reference.yml b/x-pack/winlogbeat/winlogbeat.reference.yml index 5bc8f774e03..96e912ea41c 100644 --- a/x-pack/winlogbeat/winlogbeat.reference.yml +++ b/x-pack/winlogbeat/winlogbeat.reference.yml @@ -1428,6 +1428,54 @@ logging.files: # file. Defaults to true. # rotateonstartup: true +#=============================== Events Logging =============================== +# Some outputs will log raw events on errors like indexing errors in the +# Elasticsearch output, to prevent logging raw events (that may contain +# sensitive information) together with other log messages, a different +# log file, only for log entries containing raw events, is used. It will +# use the same level, selectors and all other configurations from the +# default logger, but it will have it's own file configuration. +# +# Having a different log file for raw events also prevents event data +# from drowning out the regular log files. +# +# IMPORTANT: No matter the default logger output configuration, raw events +# will **always** be logged to a file configured by `logging.event_data.files`. + +# logging.event_data: +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +#logging.event_data.to_files: true +#logging.event_data: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/winlogbeat + + # The name of the files where the logs are written to. + #name: winlogbeat-event-data + + # Configure log file size limit. If the limit is reached, log file will be + # automatically rotated. + #rotateeverybytes: 5242880 # = 5MB + + # Number of rotated log files to keep. The oldest files will be deleted first. + #keepfiles: 2 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to the size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending them to the existing + # file. Defaults to false. + # rotateonstartup: false + # ============================= X-Pack Monitoring ============================== # Winlogbeat can export internal metrics to a central Elasticsearch monitoring # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The