Skip to content
This repository was archived by the owner on Oct 19, 2024. It is now read-only.

Commit f208fb9

Browse files
authored
feat: detect requested backoff (#1711)
1 parent 74272ca commit f208fb9

File tree

1 file changed

+64
-14
lines changed

1 file changed

+64
-14
lines changed

ethers-providers/src/transports/retry.rs

+64-14
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,9 @@ use tracing::trace;
1818
pub trait RetryPolicy<E>: Send + Sync + Debug {
1919
/// Whether to retry the request based on the given `error`
2020
fn should_retry(&self, error: &E) -> bool;
21+
22+
/// Providers may include the `backoff` in the error response directly
23+
fn backoff_hint(&self, error: &E) -> Option<Duration>;
2124
}
2225

2326
/// [RetryClient] presents as a wrapper around [JsonRpcClient] that will retry
@@ -282,13 +285,18 @@ where
282285
}
283286

284287
let current_queued_requests = self.requests_enqueued.load(Ordering::SeqCst) as u64;
285-
// using `retry_number` for creating back pressure because
286-
// of already queued requests
287-
// this increases exponentially with retries and adds a delay based on how many
288-
// requests are currently queued
289-
let mut next_backoff = Duration::from_millis(
290-
self.initial_backoff.as_millis().pow(rate_limit_retry_number) as u64,
291-
);
288+
289+
// try to extract the requested backoff from the error or compute the next backoff
290+
// based on retry count
291+
let mut next_backoff = self.policy.backoff_hint(&err).unwrap_or_else(|| {
292+
// using `retry_number` for creating back pressure because
293+
// of already queued requests
294+
// this increases exponentially with retries and adds a delay based on how many
295+
// requests are currently queued
296+
Duration::from_millis(
297+
self.initial_backoff.as_millis().pow(rate_limit_retry_number) as u64,
298+
)
299+
});
292300

293301
// requests are usually weighted and can vary from 10 CU to several 100 CU, cheaper
294302
// requests are more common some example alchemy weights:
@@ -299,13 +307,13 @@ where
299307
// (coming from forking mode) assuming here that storage request will be the driver
300308
// for Rate limits we choose `17` as the average cost of any request
301309
const AVG_COST: u64 = 17u64;
302-
let seconds_to_wait_for_compute_budge = compute_unit_offset_in_secs(
310+
let seconds_to_wait_for_compute_budget = compute_unit_offset_in_secs(
303311
AVG_COST,
304312
self.compute_units_per_second,
305313
current_queued_requests,
306314
ahead_in_queue,
307315
);
308-
next_backoff += Duration::from_secs(seconds_to_wait_for_compute_budge);
316+
next_backoff += Duration::from_secs(seconds_to_wait_for_compute_budget);
309317

310318
trace!("retrying and backing off for {:?}", next_backoff);
311319
tokio::time::sleep(next_backoff).await;
@@ -339,20 +347,41 @@ impl RetryPolicy<ClientError> for HttpRateLimitRetryPolicy {
339347
ClientError::ReqwestError(err) => {
340348
err.status() == Some(http::StatusCode::TOO_MANY_REQUESTS)
341349
}
342-
ClientError::JsonRpcError(JsonRpcError { code, message, data: _ }) => {
350+
ClientError::JsonRpcError(JsonRpcError { code, message, .. }) => {
343351
// alchemy throws it this way
344352
if *code == 429 {
345353
return true
346354
}
347-
// this is commonly thrown by infura and is apparently a load balancer issue, see also <https://github.com/MetaMask/metamask-extension/issues/7234>
348-
if message == "header not found" {
349-
return true
355+
match message.as_str() {
356+
// this is commonly thrown by infura and is apparently a load balancer issue, see also <https://github.com/MetaMask/metamask-extension/issues/7234>
357+
"header not found" => true,
358+
// also thrown by infura if out of budget for the day and ratelimited
359+
"daily request count exceeded, request rate limited" => true,
360+
_ => false,
350361
}
351-
false
352362
}
353363
_ => false,
354364
}
355365
}
366+
367+
fn backoff_hint(&self, error: &ClientError) -> Option<Duration> {
368+
if let ClientError::JsonRpcError(JsonRpcError { data, .. }) = error {
369+
let data = data.as_ref()?;
370+
371+
// if daily rate limit exceeded, infura returns the requested backoff in the error
372+
// response
373+
let backoff_seconds = &data["rate"]["backoff_seconds"];
374+
// infura rate limit error
375+
if let Some(seconds) = backoff_seconds.as_u64() {
376+
return Some(Duration::from_secs(seconds))
377+
}
378+
if let Some(seconds) = backoff_seconds.as_f64() {
379+
return Some(Duration::from_secs(seconds as u64 + 1))
380+
}
381+
}
382+
383+
None
384+
}
356385
}
357386

358387
/// Calculates an offset in seconds by taking into account the number of currently queued requests,
@@ -450,4 +479,25 @@ mod tests {
450479
// need to wait 1 second
451480
assert_eq!(to_wait, 1);
452481
}
482+
483+
#[test]
484+
fn can_extract_backoff() {
485+
let resp = r#"{"rate": {"allowed_rps": 1, "backoff_seconds": 30, "current_rps": 1.1}, "see": "https://infura.io/dashboard"}"#;
486+
487+
let err = ClientError::JsonRpcError(JsonRpcError {
488+
code: 0,
489+
message: "daily request count exceeded, request rate limited".to_string(),
490+
data: Some(serde_json::from_str(resp).unwrap()),
491+
});
492+
let backoff = HttpRateLimitRetryPolicy.backoff_hint(&err).unwrap();
493+
assert_eq!(backoff, Duration::from_secs(30));
494+
495+
let err = ClientError::JsonRpcError(JsonRpcError {
496+
code: 0,
497+
message: "daily request count exceeded, request rate limited".to_string(),
498+
data: Some(serde_json::Value::String("blocked".to_string())),
499+
});
500+
let backoff = HttpRateLimitRetryPolicy.backoff_hint(&err);
501+
assert!(backoff.is_none());
502+
}
453503
}

0 commit comments

Comments
 (0)