@@ -18,6 +18,9 @@ use tracing::trace;
18
18
pub trait RetryPolicy < E > : Send + Sync + Debug {
19
19
/// Whether to retry the request based on the given `error`
20
20
fn should_retry ( & self , error : & E ) -> bool ;
21
+
22
+ /// Providers may include the `backoff` in the error response directly
23
+ fn backoff_hint ( & self , error : & E ) -> Option < Duration > ;
21
24
}
22
25
23
26
/// [RetryClient] presents as a wrapper around [JsonRpcClient] that will retry
@@ -282,13 +285,18 @@ where
282
285
}
283
286
284
287
let current_queued_requests = self . requests_enqueued . load ( Ordering :: SeqCst ) as u64 ;
285
- // using `retry_number` for creating back pressure because
286
- // of already queued requests
287
- // this increases exponentially with retries and adds a delay based on how many
288
- // requests are currently queued
289
- let mut next_backoff = Duration :: from_millis (
290
- self . initial_backoff . as_millis ( ) . pow ( rate_limit_retry_number) as u64 ,
291
- ) ;
288
+
289
+ // try to extract the requested backoff from the error or compute the next backoff
290
+ // based on retry count
291
+ let mut next_backoff = self . policy . backoff_hint ( & err) . unwrap_or_else ( || {
292
+ // using `retry_number` for creating back pressure because
293
+ // of already queued requests
294
+ // this increases exponentially with retries and adds a delay based on how many
295
+ // requests are currently queued
296
+ Duration :: from_millis (
297
+ self . initial_backoff . as_millis ( ) . pow ( rate_limit_retry_number) as u64 ,
298
+ )
299
+ } ) ;
292
300
293
301
// requests are usually weighted and can vary from 10 CU to several 100 CU, cheaper
294
302
// requests are more common some example alchemy weights:
@@ -299,13 +307,13 @@ where
299
307
// (coming from forking mode) assuming here that storage request will be the driver
300
308
// for Rate limits we choose `17` as the average cost of any request
301
309
const AVG_COST : u64 = 17u64 ;
302
- let seconds_to_wait_for_compute_budge = compute_unit_offset_in_secs (
310
+ let seconds_to_wait_for_compute_budget = compute_unit_offset_in_secs (
303
311
AVG_COST ,
304
312
self . compute_units_per_second ,
305
313
current_queued_requests,
306
314
ahead_in_queue,
307
315
) ;
308
- next_backoff += Duration :: from_secs ( seconds_to_wait_for_compute_budge ) ;
316
+ next_backoff += Duration :: from_secs ( seconds_to_wait_for_compute_budget ) ;
309
317
310
318
trace ! ( "retrying and backing off for {:?}" , next_backoff) ;
311
319
tokio:: time:: sleep ( next_backoff) . await ;
@@ -339,20 +347,41 @@ impl RetryPolicy<ClientError> for HttpRateLimitRetryPolicy {
339
347
ClientError :: ReqwestError ( err) => {
340
348
err. status ( ) == Some ( http:: StatusCode :: TOO_MANY_REQUESTS )
341
349
}
342
- ClientError :: JsonRpcError ( JsonRpcError { code, message, data : _ } ) => {
350
+ ClientError :: JsonRpcError ( JsonRpcError { code, message, .. } ) => {
343
351
// alchemy throws it this way
344
352
if * code == 429 {
345
353
return true
346
354
}
347
- // this is commonly thrown by infura and is apparently a load balancer issue, see also <https://github.com/MetaMask/metamask-extension/issues/7234>
348
- if message == "header not found" {
349
- return true
355
+ match message. as_str ( ) {
356
+ // this is commonly thrown by infura and is apparently a load balancer issue, see also <https://github.com/MetaMask/metamask-extension/issues/7234>
357
+ "header not found" => true ,
358
+ // also thrown by infura if out of budget for the day and ratelimited
359
+ "daily request count exceeded, request rate limited" => true ,
360
+ _ => false ,
350
361
}
351
- false
352
362
}
353
363
_ => false ,
354
364
}
355
365
}
366
+
367
+ fn backoff_hint ( & self , error : & ClientError ) -> Option < Duration > {
368
+ if let ClientError :: JsonRpcError ( JsonRpcError { data, .. } ) = error {
369
+ let data = data. as_ref ( ) ?;
370
+
371
+ // if daily rate limit exceeded, infura returns the requested backoff in the error
372
+ // response
373
+ let backoff_seconds = & data[ "rate" ] [ "backoff_seconds" ] ;
374
+ // infura rate limit error
375
+ if let Some ( seconds) = backoff_seconds. as_u64 ( ) {
376
+ return Some ( Duration :: from_secs ( seconds) )
377
+ }
378
+ if let Some ( seconds) = backoff_seconds. as_f64 ( ) {
379
+ return Some ( Duration :: from_secs ( seconds as u64 + 1 ) )
380
+ }
381
+ }
382
+
383
+ None
384
+ }
356
385
}
357
386
358
387
/// Calculates an offset in seconds by taking into account the number of currently queued requests,
@@ -450,4 +479,25 @@ mod tests {
450
479
// need to wait 1 second
451
480
assert_eq ! ( to_wait, 1 ) ;
452
481
}
482
+
483
+ #[ test]
484
+ fn can_extract_backoff ( ) {
485
+ let resp = r#"{"rate": {"allowed_rps": 1, "backoff_seconds": 30, "current_rps": 1.1}, "see": "https://infura.io/dashboard"}"# ;
486
+
487
+ let err = ClientError :: JsonRpcError ( JsonRpcError {
488
+ code : 0 ,
489
+ message : "daily request count exceeded, request rate limited" . to_string ( ) ,
490
+ data : Some ( serde_json:: from_str ( resp) . unwrap ( ) ) ,
491
+ } ) ;
492
+ let backoff = HttpRateLimitRetryPolicy . backoff_hint ( & err) . unwrap ( ) ;
493
+ assert_eq ! ( backoff, Duration :: from_secs( 30 ) ) ;
494
+
495
+ let err = ClientError :: JsonRpcError ( JsonRpcError {
496
+ code : 0 ,
497
+ message : "daily request count exceeded, request rate limited" . to_string ( ) ,
498
+ data : Some ( serde_json:: Value :: String ( "blocked" . to_string ( ) ) ) ,
499
+ } ) ;
500
+ let backoff = HttpRateLimitRetryPolicy . backoff_hint ( & err) ;
501
+ assert ! ( backoff. is_none( ) ) ;
502
+ }
453
503
}
0 commit comments