-
Notifications
You must be signed in to change notification settings - Fork 6
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
add retries to cagg httpSinkActor #124
base: master
Are you sure you want to change the base?
Changes from 2 commits
6ac4fc1
36013a9
cf2e798
0639d72
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -61,6 +61,7 @@ public class HttpSinkActor extends AbstractActor { | |
* @param maximumQueueSize Maximum number of pending requests. | ||
* @param spreadPeriod Maximum time to delay sending new aggregates to spread load. | ||
* @param metricsFactory metrics factory to record metrics. | ||
* @param maxRetries Maximum number of retries for the http requests. | ||
* @return A new Props | ||
*/ | ||
public static Props props( | ||
|
@@ -69,8 +70,17 @@ public static Props props( | |
final int maximumConcurrency, | ||
final int maximumQueueSize, | ||
final Period spreadPeriod, | ||
final MetricsFactory metricsFactory) { | ||
return Props.create(HttpSinkActor.class, client, sink, maximumConcurrency, maximumQueueSize, spreadPeriod, metricsFactory); | ||
final MetricsFactory metricsFactory, | ||
final int maxRetries) { | ||
return Props.create( | ||
HttpSinkActor.class, | ||
client, | ||
sink, | ||
maximumConcurrency, | ||
maximumQueueSize, | ||
spreadPeriod, | ||
metricsFactory, | ||
maxRetries); | ||
} | ||
|
||
/** | ||
|
@@ -82,19 +92,22 @@ public static Props props( | |
* @param maximumQueueSize Maximum number of pending requests. | ||
* @param spreadPeriod Maximum time to delay sending new aggregates to spread load. | ||
* @param metricsFactory metrics factory to record metrics. | ||
* @param maxRetries Maximum number of retries for the http requests. | ||
*/ | ||
public HttpSinkActor( | ||
final AsyncHttpClient client, | ||
final HttpPostSink sink, | ||
final int maximumConcurrency, | ||
final int maximumQueueSize, | ||
final Period spreadPeriod, | ||
final MetricsFactory metricsFactory) { | ||
final MetricsFactory metricsFactory, | ||
final int maxRetries) { | ||
_client = client; | ||
_sink = sink; | ||
_maximumConcurrency = maximumConcurrency; | ||
_pendingRequests = EvictingQueue.create(maximumQueueSize); | ||
_metricsFactory = metricsFactory; | ||
_maxRetries = maxRetries; | ||
if (Period.ZERO.equals(spreadPeriod)) { | ||
_spreadingDelayMillis = 0; | ||
} else { | ||
|
@@ -106,6 +119,8 @@ public HttpSinkActor( | |
_inQueueLatencyName = "sinks/http_post/" + _sink.getMetricSafeName() + "/queue_time"; | ||
_requestSuccessName = "sinks/http_post/" + _sink.getMetricSafeName() + "/success"; | ||
_responseStatusName = "sinks/http_post/" + _sink.getMetricSafeName() + "/status"; | ||
_httpSinkAttemptsName = "sinks/http_post/" + _sink.getMetricSafeName() + "/attempts"; | ||
_samplesDroppedName = "sinks/http_post/" + _sink.getMetricSafeName() + "/samples_dropped"; | ||
} | ||
|
||
/** | ||
|
@@ -276,9 +291,8 @@ private void fireNextRequest() { | |
final Request request = requestEntry.getRequest(); | ||
_inflightRequestsCount++; | ||
|
||
final CompletableFuture<Response> promise = new CompletableFuture<>(); | ||
metrics.startTimer(_requestLatencyName); | ||
_client.executeRequest(request, new ResponseAsyncCompletionHandler(promise)); | ||
final CompletableFuture<Response> promise = sendHttpRequest(request, 0); | ||
final CompletionStage<Object> responsePromise = promise | ||
.handle((result, err) -> { | ||
metrics.stopTimer(_requestLatencyName); | ||
|
@@ -295,9 +309,11 @@ private void fireNextRequest() { | |
returnValue = new PostSuccess(result); | ||
} else { | ||
returnValue = new PostRejected(request, result); | ||
metrics.incrementCounter(_samplesDroppedName); | ||
} | ||
} else { | ||
returnValue = new PostFailure(request, err); | ||
metrics.incrementCounter(_samplesDroppedName); | ||
} | ||
metrics.incrementCounter(_requestSuccessName, (returnValue instanceof PostSuccess) ? 1 : 0); | ||
metrics.close(); | ||
|
@@ -306,6 +322,24 @@ private void fireNextRequest() { | |
PatternsCS.pipe(responsePromise, context().dispatcher()).to(self()); | ||
} | ||
|
||
private CompletableFuture<Response> sendHttpRequest( | ||
final Request request, | ||
final int attempt) { | ||
final CompletableFuture<Response> promise = new CompletableFuture<>(); | ||
_client.executeRequest(request, new ResponseAsyncCompletionHandler(promise)); | ||
promise.handle((result, err) -> { | ||
if (err == null && ACCEPTED_STATUS_CODES.contains(result.getStatusCode())) { | ||
try (Metrics metrics = _metricsFactory.create()) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Creating a new Metrics object for each POST is going to be quite expensive There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, it's not ideal; unfortunately, there is no end-to-end scoped instrumentation in CAGG. Not to mention the event loop here is async and so decoupled. If CAGG supports periodic metrics, we could use that, but this is not unreasonable (there's an MF in the Router so for each incoming request; Ref). |
||
metrics.incrementCounter(_httpSinkAttemptsName, attempt); | ||
} | ||
return promise; | ||
} else { | ||
return attempt < _maxRetries ? sendHttpRequest(request, attempt + 1) : promise; | ||
} | ||
}); | ||
return promise; | ||
} | ||
|
||
@Override | ||
public void postStop() throws Exception { | ||
super.postStop(); | ||
|
@@ -325,12 +359,15 @@ public void postStop() throws Exception { | |
private final HttpPostSink _sink; | ||
private final int _spreadingDelayMillis; | ||
private final MetricsFactory _metricsFactory; | ||
private final int _maxRetries; | ||
|
||
private final String _evictedRequestsName; | ||
private final String _requestLatencyName; | ||
private final String _inQueueLatencyName; | ||
private final String _requestSuccessName; | ||
private final String _responseStatusName; | ||
private final String _httpSinkAttemptsName; | ||
private final String _samplesDroppedName; | ||
|
||
private static final Logger LOGGER = LoggerFactory.getLogger(HttpPostSink.class); | ||
private static final Logger EVICTED_LOGGER = LoggerFactory.getRateLimitLogger(HttpPostSink.class, Duration.ofSeconds(30)); | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -63,7 +63,8 @@ public void setUp() { | |
.setName("kairosdb_sink_test") | ||
.setActorSystem(getSystem()) | ||
.setUri(URI.create("http://localhost:" + _wireMockServer.port() + PATH)) | ||
.setMetricsFactory(_mockMetricsFactory); | ||
.setMetricsFactory(_mockMetricsFactory) | ||
.setMaxRetries(5); | ||
Mockito.doReturn(_mockMetrics).when(_mockMetricsFactory).create(); | ||
} | ||
|
||
|
@@ -134,7 +135,8 @@ public void testPost() throws InterruptedException, IOException { | |
Assert.assertEquals(expected, actual); | ||
|
||
// Verify that metrics has been recorded. | ||
Mockito.verify(_mockMetricsFactory, Mockito.times(1)).create(); | ||
Mockito.verify(_mockMetricsFactory, Mockito.times(2)).create(); | ||
Mockito.verify(_mockMetrics, Mockito.times(1)).incrementCounter("sinks/http_post/kairosdb_sink_test/attempts", 0); | ||
Mockito.verify(_mockMetrics, Mockito.times(1)).incrementCounter("sinks/http_post/kairosdb_sink_test/success", 1); | ||
Mockito.verify(_mockMetrics, Mockito.times(1)).incrementCounter("sinks/http_post/kairosdb_sink_test/status/2xx", 1); | ||
Mockito.verify(_mockMetrics, Mockito.times(1)).setTimer( | ||
|
@@ -143,7 +145,7 @@ public void testPost() throws InterruptedException, IOException { | |
Mockito.any()); | ||
Mockito.verify(_mockMetrics, Mockito.times(1)).startTimer("sinks/http_post/kairosdb_sink_test/request_latency"); | ||
Mockito.verify(_mockMetrics, Mockito.times(1)).stopTimer("sinks/http_post/kairosdb_sink_test/request_latency"); | ||
Mockito.verify(_mockMetrics, Mockito.times(1)).close(); | ||
Mockito.verify(_mockMetrics, Mockito.times(2)).close(); | ||
} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Looks like you're missing a test case for the feature you're trying to add. Can you please add a test showing the retry on a failed POST? |
||
|
||
private KairosDbSink.Builder _kairosDbSinkBuilder; | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We're probably going to need a bit of a backoff here. I would recommend using an exponential backoff strategy
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
At least adding some support for backoff would be nice. But I'm also okay with adding that in a second step.