From 0ed736af874e36bb8e6faf18da7ee2f4b432f336 Mon Sep 17 00:00:00 2001 From: Marc Lopez Rubio Date: Mon, 8 Jul 2024 14:32:29 +0800 Subject: [PATCH 1/3] output: Retry document-level `429`s by default Updates the APM Server to automatically retry document-level `429`s from Elasticsearch to avoid dropping data. It can be configured/overwritten by `output.elasticsearch.max_retries`, and defaults to `3`. It uses the default backoff configuration, which could wait up to 1m if enough retries are configured, but can be overwritten as well. Signed-off-by: Marc Lopez Rubio --- internal/beater/beater.go | 15 ++++++++++----- internal/beater/beater_test.go | 32 ++++++++++++++++++-------------- 2 files changed, 28 insertions(+), 19 deletions(-) diff --git a/internal/beater/beater.go b/internal/beater/beater.go index 2071e11f84b..0c715af5223 100644 --- a/internal/beater/beater.go +++ b/internal/beater/beater.go @@ -723,7 +723,7 @@ func (s *Runner) newFinalBatchProcessor( func (s *Runner) newDocappenderConfig(tracer *apm.Tracer, memLimit float64) ( docappender.Config, *elasticsearch.Config, error, ) { - var esConfig struct { + esConfig := struct { *elasticsearch.Config `config:",inline"` FlushBytes string `config:"flush_bytes"` FlushInterval time.Duration `config:"flush_interval"` @@ -731,11 +731,12 @@ func (s *Runner) newDocappenderConfig(tracer *apm.Tracer, memLimit float64) ( Scaling struct { Enabled *bool `config:"enabled"` } `config:"autoscaling"` + }{ + // Default to 1mib flushes, which is the default for go-docappender. + FlushBytes: "1 mib", + FlushInterval: time.Second, + Config: elasticsearch.DefaultConfig(), } - // Default to 1mib flushes, which is the default for go-docappender. - esConfig.FlushBytes = "1 mib" - esConfig.FlushInterval = time.Second - esConfig.Config = elasticsearch.DefaultConfig() esConfig.MaxIdleConnsPerHost = 10 if err := s.elasticsearchOutputConfig.Unpack(&esConfig); err != nil { @@ -768,6 +769,10 @@ func (s *Runner) newDocappenderConfig(tracer *apm.Tracer, memLimit float64) ( Scaling: scalingCfg, Logger: zap.New(s.logger.Core(), zap.WithCaller(true)), RequireDataStream: true, + // Use the output's max_retries to configure the go-docappender's + // document level retries. + MaxDocumentRetries: esConfig.MaxRetries, + RetryOnDocumentStatus: []int{429}, // Only retry "safe" 429 responses. }, memLimit, s.logger) if cfg.MaxRequests != 0 { esConfig.MaxIdleConnsPerHost = cfg.MaxRequests diff --git a/internal/beater/beater_test.go b/internal/beater/beater_test.go index 3e911b35d8b..b035fe9c8cf 100644 --- a/internal/beater/beater_test.go +++ b/internal/beater/beater_test.go @@ -177,13 +177,15 @@ func TestRunnerNewDocappenderConfig(t *testing.T) { docCfg, esCfg, err := r.newDocappenderConfig(nil, c.memSize) require.NoError(t, err) assert.Equal(t, docappender.Config{ - Logger: zap.New(r.logger.Core(), zap.WithCaller(true)), - CompressionLevel: 5, - RequireDataStream: true, - FlushInterval: time.Second, - FlushBytes: 1024 * 1024, - MaxRequests: c.wantMaxRequests, - DocumentBufferSize: c.wantDocBufSize, + Logger: zap.New(r.logger.Core(), zap.WithCaller(true)), + CompressionLevel: 5, + RequireDataStream: true, + FlushInterval: time.Second, + FlushBytes: 1024 * 1024, + MaxRequests: c.wantMaxRequests, + DocumentBufferSize: c.wantDocBufSize, + MaxDocumentRetries: 3, + RetryOnDocumentStatus: []int{429}, }, docCfg) assert.Equal(t, &elasticsearch.Config{ Hosts: elasticsearch.Hosts{"localhost:9200"}, @@ -207,13 +209,15 @@ func TestRunnerNewDocappenderConfig(t *testing.T) { docCfg, esCfg, err := r.newDocappenderConfig(nil, c.memSize) require.NoError(t, err) assert.Equal(t, docappender.Config{ - Logger: zap.New(r.logger.Core(), zap.WithCaller(true)), - CompressionLevel: 5, - RequireDataStream: true, - FlushInterval: 2 * time.Second, - FlushBytes: 500 * 1024, - MaxRequests: 50, - DocumentBufferSize: c.wantDocBufSize, + Logger: zap.New(r.logger.Core(), zap.WithCaller(true)), + CompressionLevel: 5, + RequireDataStream: true, + FlushInterval: 2 * time.Second, + FlushBytes: 500 * 1024, + MaxRequests: 50, + DocumentBufferSize: c.wantDocBufSize, + MaxDocumentRetries: 3, + RetryOnDocumentStatus: []int{429}, }, docCfg) assert.Equal(t, &elasticsearch.Config{ Hosts: elasticsearch.Hosts{"localhost:9200"}, From d836b3c73c606acbd89498147f2cdd94a973cd22 Mon Sep 17 00:00:00 2001 From: Marc Lopez Rubio Date: Mon, 8 Jul 2024 15:28:11 +0800 Subject: [PATCH 2/3] Add changelog Signed-off-by: Marc Lopez Rubio --- changelogs/head.asciidoc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/changelogs/head.asciidoc b/changelogs/head.asciidoc index 4290a0f40e4..06a6f9520e9 100644 --- a/changelogs/head.asciidoc +++ b/changelogs/head.asciidoc @@ -16,4 +16,6 @@ https://github.com/elastic/apm-server/compare/8.15\...main[View commits] ==== Intake API Changes [float] -==== Added \ No newline at end of file +==== Added + +- APM Server now automatically retries document-level 429s from Elasticsearch to avoid dropping data. This is controlled by `output.elasticsearch.max_retries`, and defaults to `3`. {pull}13620[13620] From ae2db84138a9b8df60c43b7b927f85b5eaaed0be Mon Sep 17 00:00:00 2001 From: Marc Lopez Rubio Date: Mon, 8 Jul 2024 15:37:22 +0800 Subject: [PATCH 3/3] Update changelogs/head.asciidoc Co-authored-by: Carson Ip --- changelogs/head.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelogs/head.asciidoc b/changelogs/head.asciidoc index 06a6f9520e9..6177cd88a92 100644 --- a/changelogs/head.asciidoc +++ b/changelogs/head.asciidoc @@ -18,4 +18,4 @@ https://github.com/elastic/apm-server/compare/8.15\...main[View commits] [float] ==== Added -- APM Server now automatically retries document-level 429s from Elasticsearch to avoid dropping data. This is controlled by `output.elasticsearch.max_retries`, and defaults to `3`. {pull}13620[13620] +- APM Server now automatically retries document-level 429s from Elasticsearch to avoid dropping data. `output.elasticsearch.max_retries` now controls both request-level and document-level retries, and defaults to `3`. {pull}13620[13620]