diff --git a/docs/en/ingest-management/elastic-agent/configuration/outputs/output-kafka.asciidoc b/docs/en/ingest-management/elastic-agent/configuration/outputs/output-kafka.asciidoc index 839a2c043..0845fec2c 100644 --- a/docs/en/ingest-management/elastic-agent/configuration/outputs/output-kafka.asciidoc +++ b/docs/en/ingest-management/elastic-agent/configuration/outputs/output-kafka.asciidoc @@ -44,6 +44,29 @@ outputs: verification_mode: full ---- +== Kafka output and using {ls} to index data to {es} + +If you are considering using {ls} to ship the data from `kafka` to {es}, please +be aware Elastic is not currently testing this kind of setup. + +The structure of the documents sent from {agent} to `kafka` must not be modified by {ls}. +We suggest disabling `ecs_compatibility` on both the `kafka` input and the `json` codec. + +Refer to <> documentation for more details. + +[source,yaml] +---- +inputs { + kafka { + ... + ecs_compatibility => "disabled" + codec => json { ecs_compatibility => "disabled" } + ... + } +} +... +---- + == Kafka output configuration settings The `kafka` output supports the following settings, grouped by category. @@ -502,4 +525,4 @@ Note: If set to 0, no ACKs are returned by Kafka. Messages might be lost silentl // ============================================================================= -|=== \ No newline at end of file +|=== diff --git a/docs/en/ingest-management/elastic-agent/configuration/outputs/output-logstash.asciidoc b/docs/en/ingest-management/elastic-agent/configuration/outputs/output-logstash.asciidoc index aec36d059..71dc07094 100644 --- a/docs/en/ingest-management/elastic-agent/configuration/outputs/output-logstash.asciidoc +++ b/docs/en/ingest-management/elastic-agent/configuration/outputs/output-logstash.asciidoc @@ -32,8 +32,9 @@ To receive the events in {ls}, you also need to create a {ls} configuration pipe The {ls} configuration pipeline listens for incoming {agent} connections, processes received events, and then sends the events to {es}. -The following example configures a {ls} pipeline that listens on port `5044` for -incoming {agent} connections and routes received events to {es}: +The following {ls} pipeline definition example configures a pipeline that listens on port `5044` for +incoming {agent} connections and routes received events to {es}. + [source,yaml] ---- @@ -41,19 +42,28 @@ input { elastic_agent { port => 5044 enrich => none # don't modify the events' schema at all - # or minimal change, add only ssl and source metadata - # enrich => [ssl_peer_metadata, source_metadata] + ssl => true + ssl_certificate_authorities => [""] + ssl_certificate => "" + ssl_key => "" + ssl_verify_mode => "force_peer" } } output { elasticsearch { hosts => ["http://localhost:9200"] <1> + # cloud_id => "..." data_stream => "true" + api_key => "" <2> + data_stream => true + ssl => true + # cacert => "" } } ---- <1> The {es} server and the port (`9200`) where {es} is running. +<2> The API Key used by {ls} to ship data to the destination data streams. For more information about configuring {ls}, refer to {logstash-ref}/configuration.html[Configuring {ls}] and diff --git a/docs/en/ingest-management/fleet/fleet-settings-output-kafka.asciidoc b/docs/en/ingest-management/fleet/fleet-settings-output-kafka.asciidoc index d7f7b1a29..ea6f46fc4 100644 --- a/docs/en/ingest-management/fleet/fleet-settings-output-kafka.asciidoc +++ b/docs/en/ingest-management/fleet/fleet-settings-output-kafka.asciidoc @@ -5,6 +5,29 @@ Specify these settings to send data over a secure connection to Kafka. In the {fleet} <>, make sure that the Kafka output type is selected. +== Kafka output and using {ls} to index data to {es} + +If you are considering using {ls} to ship the data from `kafka` to {es}, please +be aware Elastic is not currently testing this kind of setup. + +The structure of the documents sent from {agent} to `kafka` must not be modified by {ls}. +We suggest disabling `ecs_compatibility` on both the `kafka` input and the `json` codec. + +Refer to the <> documentation for more details. + +[source,yaml] +---- +inputs { + kafka { + ... + ecs_compatibility => "disabled" + codec => json { ecs_compatibility => "disabled" } + ... + } +} +... +---- + [discrete] == General settings diff --git a/docs/en/ingest-management/fleet/fleet-settings-output-logstash.asciidoc b/docs/en/ingest-management/fleet/fleet-settings-output-logstash.asciidoc index b96778f78..66fdea424 100644 --- a/docs/en/ingest-management/fleet/fleet-settings-output-logstash.asciidoc +++ b/docs/en/ingest-management/fleet/fleet-settings-output-logstash.asciidoc @@ -13,6 +13,44 @@ Before using the {ls} output, you need to make sure that for any integrations th To learn how to generate certificates, refer to <>. +To receive the events in {ls}, you also need to create a {ls} configuration pipeline. +The {ls} configuration pipeline listens for incoming {agent} connections, +processes received events, and then sends the events to {es}. + +The following example configures a {ls} pipeline that listens on port `5044` for +incoming {agent} connections and routes received events to {es}. + +The {ls} pipeline definition below is an example. Please refer to the `Additional Logstash +configuration required` steps when creating the {ls} output in the Fleet outputs page. + +[source,yaml] +---- +input { + elastic_agent { + port => 5044 + enrich => none # don't modify the events' schema at all + ssl => true + ssl_certificate_authorities => [""] + ssl_certificate => "" + ssl_key => "" + ssl_verify_mode => "force_peer" + } +} +output { + elasticsearch { + hosts => ["http://localhost:9200"] <1> + # cloud_id => "..." + data_stream => "true" + api_key => "" <2> + data_stream => true + ssl => true + # cacert => "" + } +} +---- +<1> The {es} server and the port (`9200`) where {es} is running. +<2> The API Key obtained from the {ls} output creation steps in Fleet. + [cols="2*