diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_scheduler.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_scheduler.rs
index cf5610df6657..42e37b967e4c 100644
--- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_scheduler.rs
+++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_scheduler.rs
@@ -1,42 +1,41 @@
// Copyright (C) Parity Technologies (UK) Ltd.
-// SPDX-License-Identifier: Apache-2.0
+// This file is part of Cumulus.
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// Cumulus is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Cumulus is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Cumulus. If not, see .
//! Autogenerated weights for `pallet_scheduler`
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2024-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
-//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024
+//! HOSTNAME: `runner-grjcggob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024
// Executed Command:
-// ./target/production/polkadot-parachain
+// target/production/polkadot-parachain
// benchmark
// pallet
-// --chain=collectives-polkadot-dev
-// --wasm-execution=compiled
-// --pallet=pallet_scheduler
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --extrinsic=*
// --steps=50
// --repeat=20
-// --json
-// --header=./file_header.txt
-// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/
+// --extrinsic=*
+// --wasm-execution=compiled
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=pallet_scheduler
+// --chain=collectives-westend-dev
+// --header=./cumulus/file_header.txt
+// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
@@ -55,8 +54,8 @@ impl pallet_scheduler::WeightInfo for WeightInfo {
// Proof Size summary in bytes:
// Measured: `31`
// Estimated: `1489`
- // Minimum execution time: 3_441_000 picoseconds.
- Weight::from_parts(3_604_000, 0)
+ // Minimum execution time: 2_475_000 picoseconds.
+ Weight::from_parts(2_644_000, 0)
.saturating_add(Weight::from_parts(0, 1489))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
@@ -68,11 +67,11 @@ impl pallet_scheduler::WeightInfo for WeightInfo {
// Proof Size summary in bytes:
// Measured: `77 + s * (177 ±0)`
// Estimated: `159279`
- // Minimum execution time: 2_879_000 picoseconds.
- Weight::from_parts(2_963_000, 0)
+ // Minimum execution time: 2_898_000 picoseconds.
+ Weight::from_parts(1_532_342, 0)
.saturating_add(Weight::from_parts(0, 159279))
- // Standard Error: 3_764
- .saturating_add(Weight::from_parts(909_557, 0).saturating_mul(s.into()))
+ // Standard Error: 4_736
+ .saturating_add(Weight::from_parts(412_374, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
}
@@ -80,25 +79,27 @@ impl pallet_scheduler::WeightInfo for WeightInfo {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
- // Minimum execution time: 5_172_000 picoseconds.
- Weight::from_parts(5_294_000, 0)
+ // Minimum execution time: 3_171_000 picoseconds.
+ Weight::from_parts(3_349_000, 0)
.saturating_add(Weight::from_parts(0, 0))
}
/// Storage: `Preimage::PreimageFor` (r:1 w:1)
/// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`)
- /// Storage: `Preimage::StatusFor` (r:1 w:1)
+ /// Storage: `Preimage::StatusFor` (r:1 w:0)
/// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`)
+ /// Storage: `Preimage::RequestStatusFor` (r:1 w:1)
+ /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`)
/// The range of component `s` is `[128, 4194304]`.
fn service_task_fetched(s: u32, ) -> Weight {
// Proof Size summary in bytes:
- // Measured: `213 + s * (1 ±0)`
- // Estimated: `3678 + s * (1 ±0)`
- // Minimum execution time: 19_704_000 picoseconds.
- Weight::from_parts(19_903_000, 0)
- .saturating_add(Weight::from_parts(0, 3678))
- // Standard Error: 5
- .saturating_add(Weight::from_parts(1_394, 0).saturating_mul(s.into()))
- .saturating_add(T::DbWeight::get().reads(2))
+ // Measured: `246 + s * (1 ±0)`
+ // Estimated: `3711 + s * (1 ±0)`
+ // Minimum execution time: 17_329_000 picoseconds.
+ Weight::from_parts(17_604_000, 0)
+ .saturating_add(Weight::from_parts(0, 3711))
+ // Standard Error: 1
+ .saturating_add(Weight::from_parts(1_256, 0).saturating_mul(s.into()))
+ .saturating_add(T::DbWeight::get().reads(3))
.saturating_add(T::DbWeight::get().writes(2))
.saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into()))
}
@@ -108,8 +109,8 @@ impl pallet_scheduler::WeightInfo for WeightInfo {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
- // Minimum execution time: 6_359_000 picoseconds.
- Weight::from_parts(6_599_000, 0)
+ // Minimum execution time: 4_503_000 picoseconds.
+ Weight::from_parts(4_677_000, 0)
.saturating_add(Weight::from_parts(0, 0))
.saturating_add(T::DbWeight::get().writes(1))
}
@@ -117,24 +118,24 @@ impl pallet_scheduler::WeightInfo for WeightInfo {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
- // Minimum execution time: 5_217_000 picoseconds.
- Weight::from_parts(5_333_000, 0)
+ // Minimum execution time: 3_145_000 picoseconds.
+ Weight::from_parts(3_252_000, 0)
.saturating_add(Weight::from_parts(0, 0))
}
fn execute_dispatch_signed() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
- // Minimum execution time: 2_406_000 picoseconds.
- Weight::from_parts(2_541_000, 0)
+ // Minimum execution time: 1_804_000 picoseconds.
+ Weight::from_parts(1_891_000, 0)
.saturating_add(Weight::from_parts(0, 0))
}
fn execute_dispatch_unsigned() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
- // Minimum execution time: 2_370_000 picoseconds.
- Weight::from_parts(2_561_000, 0)
+ // Minimum execution time: 1_706_000 picoseconds.
+ Weight::from_parts(1_776_000, 0)
.saturating_add(Weight::from_parts(0, 0))
}
/// Storage: `Scheduler::Agenda` (r:1 w:1)
@@ -144,11 +145,11 @@ impl pallet_scheduler::WeightInfo for WeightInfo {
// Proof Size summary in bytes:
// Measured: `77 + s * (177 ±0)`
// Estimated: `159279`
- // Minimum execution time: 11_784_000 picoseconds.
- Weight::from_parts(5_574_404, 0)
+ // Minimum execution time: 8_629_000 picoseconds.
+ Weight::from_parts(6_707_232, 0)
.saturating_add(Weight::from_parts(0, 159279))
- // Standard Error: 7_217
- .saturating_add(Weight::from_parts(1_035_248, 0).saturating_mul(s.into()))
+ // Standard Error: 5_580
+ .saturating_add(Weight::from_parts(471_827, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
}
@@ -161,11 +162,11 @@ impl pallet_scheduler::WeightInfo for WeightInfo {
// Proof Size summary in bytes:
// Measured: `77 + s * (177 ±0)`
// Estimated: `159279`
- // Minimum execution time: 16_373_000 picoseconds.
- Weight::from_parts(3_088_135, 0)
+ // Minimum execution time: 12_675_000 picoseconds.
+ Weight::from_parts(7_791_682, 0)
.saturating_add(Weight::from_parts(0, 159279))
- // Standard Error: 7_095
- .saturating_add(Weight::from_parts(1_745_270, 0).saturating_mul(s.into()))
+ // Standard Error: 5_381
+ .saturating_add(Weight::from_parts(653_023, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(2))
}
@@ -178,11 +179,11 @@ impl pallet_scheduler::WeightInfo for WeightInfo {
// Proof Size summary in bytes:
// Measured: `468 + s * (179 ±0)`
// Estimated: `159279`
- // Minimum execution time: 14_822_000 picoseconds.
- Weight::from_parts(9_591_402, 0)
+ // Minimum execution time: 11_908_000 picoseconds.
+ Weight::from_parts(11_833_059, 0)
.saturating_add(Weight::from_parts(0, 159279))
- // Standard Error: 7_151
- .saturating_add(Weight::from_parts(1_058_408, 0).saturating_mul(s.into()))
+ // Standard Error: 5_662
+ .saturating_add(Weight::from_parts(482_816, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(2))
.saturating_add(T::DbWeight::get().writes(2))
}
@@ -195,12 +196,91 @@ impl pallet_scheduler::WeightInfo for WeightInfo {
// Proof Size summary in bytes:
// Measured: `509 + s * (179 ±0)`
// Estimated: `159279`
- // Minimum execution time: 18_541_000 picoseconds.
- Weight::from_parts(6_522_239, 0)
+ // Minimum execution time: 15_506_000 picoseconds.
+ Weight::from_parts(11_372_975, 0)
.saturating_add(Weight::from_parts(0, 159279))
- // Standard Error: 8_349
- .saturating_add(Weight::from_parts(1_760_431, 0).saturating_mul(s.into()))
+ // Standard Error: 5_765
+ .saturating_add(Weight::from_parts(656_322, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(2))
.saturating_add(T::DbWeight::get().writes(2))
}
+ /// Storage: `Scheduler::Retries` (r:1 w:2)
+ /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Agenda` (r:1 w:1)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Lookup` (r:0 w:1)
+ /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`)
+ /// The range of component `s` is `[1, 200]`.
+ fn schedule_retry(s: u32, ) -> Weight {
+ // Proof Size summary in bytes:
+ // Measured: `159`
+ // Estimated: `159279`
+ // Minimum execution time: 14_069_000 picoseconds.
+ Weight::from_parts(14_868_345, 0)
+ .saturating_add(Weight::from_parts(0, 159279))
+ // Standard Error: 425
+ .saturating_add(Weight::from_parts(33_468, 0).saturating_mul(s.into()))
+ .saturating_add(T::DbWeight::get().reads(2))
+ .saturating_add(T::DbWeight::get().writes(4))
+ }
+ /// Storage: `Scheduler::Agenda` (r:1 w:0)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Retries` (r:0 w:1)
+ /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)
+ fn set_retry() -> Weight {
+ // Proof Size summary in bytes:
+ // Measured: `77 + s * (177 ±0)`
+ // Estimated: `159279`
+ // Minimum execution time: 7_550_000 picoseconds.
+ Weight::from_parts(6_735_955, 0)
+ .saturating_add(Weight::from_parts(0, 159279))
+ .saturating_add(T::DbWeight::get().reads(1))
+ .saturating_add(T::DbWeight::get().writes(1))
+ }
+ /// Storage: `Scheduler::Lookup` (r:1 w:0)
+ /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Agenda` (r:1 w:0)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Retries` (r:0 w:1)
+ /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)
+ fn set_retry_named() -> Weight {
+ // Proof Size summary in bytes:
+ // Measured: `513 + s * (179 ±0)`
+ // Estimated: `159279`
+ // Minimum execution time: 11_017_000 picoseconds.
+ Weight::from_parts(11_749_385, 0)
+ .saturating_add(Weight::from_parts(0, 159279))
+ .saturating_add(T::DbWeight::get().reads(2))
+ .saturating_add(T::DbWeight::get().writes(1))
+ }
+ /// Storage: `Scheduler::Agenda` (r:1 w:0)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Retries` (r:0 w:1)
+ /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)
+ fn cancel_retry() -> Weight {
+ // Proof Size summary in bytes:
+ // Measured: `77 + s * (177 ±0)`
+ // Estimated: `159279`
+ // Minimum execution time: 7_550_000 picoseconds.
+ Weight::from_parts(6_735_955, 0)
+ .saturating_add(Weight::from_parts(0, 159279))
+ .saturating_add(T::DbWeight::get().reads(1))
+ .saturating_add(T::DbWeight::get().writes(1))
+ }
+ /// Storage: `Scheduler::Lookup` (r:1 w:0)
+ /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Agenda` (r:1 w:0)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Retries` (r:0 w:1)
+ /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)
+ fn cancel_retry_named() -> Weight {
+ // Proof Size summary in bytes:
+ // Measured: `513 + s * (179 ±0)`
+ // Estimated: `159279`
+ // Minimum execution time: 11_017_000 picoseconds.
+ Weight::from_parts(11_749_385, 0)
+ .saturating_add(Weight::from_parts(0, 159279))
+ .saturating_add(T::DbWeight::get().reads(2))
+ .saturating_add(T::DbWeight::get().writes(1))
+ }
}
diff --git a/polkadot/runtime/rococo/src/weights/pallet_scheduler.rs b/polkadot/runtime/rococo/src/weights/pallet_scheduler.rs
index e4732a2d17dc..0f36dbd384df 100644
--- a/polkadot/runtime/rococo/src/weights/pallet_scheduler.rs
+++ b/polkadot/runtime/rococo/src/weights/pallet_scheduler.rs
@@ -17,24 +17,25 @@
//! Autogenerated weights for `pallet_scheduler`
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2024-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz`
-//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024
+//! HOSTNAME: `runner-grjcggob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024
// Executed Command:
-// ./target/production/polkadot
+// target/production/polkadot
// benchmark
// pallet
-// --chain=rococo-dev
// --steps=50
// --repeat=20
-// --pallet=pallet_scheduler
// --extrinsic=*
-// --execution=wasm
// --wasm-execution=compiled
-// --header=./file_header.txt
-// --output=./runtime/rococo/src/weights/
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=pallet_scheduler
+// --chain=rococo-dev
+// --header=./polkadot/file_header.txt
+// --output=./polkadot/runtime/rococo/src/weights/
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
@@ -47,30 +48,30 @@ use core::marker::PhantomData;
/// Weight functions for `pallet_scheduler`.
pub struct WeightInfo(PhantomData);
impl pallet_scheduler::WeightInfo for WeightInfo {
- /// Storage: Scheduler IncompleteSince (r:1 w:1)
- /// Proof: Scheduler IncompleteSince (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen)
+ /// Storage: `Scheduler::IncompleteSince` (r:1 w:1)
+ /// Proof: `Scheduler::IncompleteSince` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
fn service_agendas_base() -> Weight {
// Proof Size summary in bytes:
- // Measured: `69`
+ // Measured: `68`
// Estimated: `1489`
- // Minimum execution time: 4_741_000 picoseconds.
- Weight::from_parts(4_939_000, 0)
+ // Minimum execution time: 2_869_000 picoseconds.
+ Weight::from_parts(3_109_000, 0)
.saturating_add(Weight::from_parts(0, 1489))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
}
- /// Storage: Scheduler Agenda (r:1 w:1)
- /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen)
+ /// Storage: `Scheduler::Agenda` (r:1 w:1)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`)
/// The range of component `s` is `[0, 50]`.
fn service_agenda_base(s: u32, ) -> Weight {
// Proof Size summary in bytes:
- // Measured: `116 + s * (177 ±0)`
+ // Measured: `115 + s * (177 ±0)`
// Estimated: `42428`
- // Minimum execution time: 4_504_000 picoseconds.
- Weight::from_parts(7_569_333, 0)
+ // Minimum execution time: 3_326_000 picoseconds.
+ Weight::from_parts(5_818_563, 0)
.saturating_add(Weight::from_parts(0, 42428))
- // Standard Error: 1_818
- .saturating_add(Weight::from_parts(771_180, 0).saturating_mul(s.into()))
+ // Standard Error: 1_261
+ .saturating_add(Weight::from_parts(336_446, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
}
@@ -78,36 +79,38 @@ impl pallet_scheduler::WeightInfo for WeightInfo {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
- // Minimum execution time: 5_709_000 picoseconds.
- Weight::from_parts(5_929_000, 0)
+ // Minimum execution time: 3_007_000 picoseconds.
+ Weight::from_parts(3_197_000, 0)
.saturating_add(Weight::from_parts(0, 0))
}
- /// Storage: Preimage PreimageFor (r:1 w:1)
- /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: Measured)
- /// Storage: Preimage StatusFor (r:1 w:1)
- /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen)
+ /// Storage: `Preimage::PreimageFor` (r:1 w:1)
+ /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`)
+ /// Storage: `Preimage::StatusFor` (r:1 w:0)
+ /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`)
+ /// Storage: `Preimage::RequestStatusFor` (r:1 w:1)
+ /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`)
/// The range of component `s` is `[128, 4194304]`.
fn service_task_fetched(s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `251 + s * (1 ±0)`
// Estimated: `3716 + s * (1 ±0)`
- // Minimum execution time: 20_710_000 picoseconds.
- Weight::from_parts(20_918_000, 0)
+ // Minimum execution time: 16_590_000 picoseconds.
+ Weight::from_parts(16_869_000, 0)
.saturating_add(Weight::from_parts(0, 3716))
// Standard Error: 9
- .saturating_add(Weight::from_parts(1_257, 0).saturating_mul(s.into()))
- .saturating_add(T::DbWeight::get().reads(2))
+ .saturating_add(Weight::from_parts(1_308, 0).saturating_mul(s.into()))
+ .saturating_add(T::DbWeight::get().reads(3))
.saturating_add(T::DbWeight::get().writes(2))
.saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into()))
}
- /// Storage: Scheduler Lookup (r:0 w:1)
- /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen)
+ /// Storage: `Scheduler::Lookup` (r:0 w:1)
+ /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`)
fn service_task_named() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
- // Minimum execution time: 7_262_000 picoseconds.
- Weight::from_parts(7_412_000, 0)
+ // Minimum execution time: 4_320_000 picoseconds.
+ Weight::from_parts(4_594_000, 0)
.saturating_add(Weight::from_parts(0, 0))
.saturating_add(T::DbWeight::get().writes(1))
}
@@ -115,90 +118,173 @@ impl pallet_scheduler::WeightInfo for WeightInfo {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
- // Minimum execution time: 5_774_000 picoseconds.
- Weight::from_parts(5_887_000, 0)
+ // Minimum execution time: 2_956_000 picoseconds.
+ Weight::from_parts(3_216_000, 0)
.saturating_add(Weight::from_parts(0, 0))
}
fn execute_dispatch_signed() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
- // Minimum execution time: 2_777_000 picoseconds.
- Weight::from_parts(2_865_000, 0)
+ // Minimum execution time: 1_824_000 picoseconds.
+ Weight::from_parts(1_929_000, 0)
.saturating_add(Weight::from_parts(0, 0))
}
fn execute_dispatch_unsigned() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
- // Minimum execution time: 2_739_000 picoseconds.
- Weight::from_parts(2_827_000, 0)
+ // Minimum execution time: 1_749_000 picoseconds.
+ Weight::from_parts(1_916_000, 0)
.saturating_add(Weight::from_parts(0, 0))
}
- /// Storage: Scheduler Agenda (r:1 w:1)
- /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen)
+ /// Storage: `Scheduler::Agenda` (r:1 w:1)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`)
/// The range of component `s` is `[0, 49]`.
fn schedule(s: u32, ) -> Weight {
// Proof Size summary in bytes:
- // Measured: `116 + s * (177 ±0)`
+ // Measured: `115 + s * (177 ±0)`
// Estimated: `42428`
- // Minimum execution time: 14_788_000 picoseconds.
- Weight::from_parts(17_705_748, 0)
+ // Minimum execution time: 9_086_000 picoseconds.
+ Weight::from_parts(11_733_696, 0)
.saturating_add(Weight::from_parts(0, 42428))
- // Standard Error: 1_703
- .saturating_add(Weight::from_parts(760_991, 0).saturating_mul(s.into()))
+ // Standard Error: 1_362
+ .saturating_add(Weight::from_parts(375_266, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
}
- /// Storage: Scheduler Agenda (r:1 w:1)
- /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen)
- /// Storage: Scheduler Lookup (r:0 w:1)
- /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen)
+ /// Storage: `Scheduler::Agenda` (r:1 w:1)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Lookup` (r:0 w:1)
+ /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`)
/// The range of component `s` is `[1, 50]`.
fn cancel(s: u32, ) -> Weight {
// Proof Size summary in bytes:
- // Measured: `116 + s * (177 ±0)`
+ // Measured: `115 + s * (177 ±0)`
// Estimated: `42428`
- // Minimum execution time: 18_716_000 picoseconds.
- Weight::from_parts(18_220_022, 0)
+ // Minimum execution time: 12_716_000 picoseconds.
+ Weight::from_parts(12_529_180, 0)
.saturating_add(Weight::from_parts(0, 42428))
- // Standard Error: 1_508
- .saturating_add(Weight::from_parts(1_357_835, 0).saturating_mul(s.into()))
+ // Standard Error: 867
+ .saturating_add(Weight::from_parts(548_188, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(2))
}
- /// Storage: Scheduler Lookup (r:1 w:1)
- /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen)
- /// Storage: Scheduler Agenda (r:1 w:1)
- /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen)
+ /// Storage: `Scheduler::Lookup` (r:1 w:1)
+ /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Agenda` (r:1 w:1)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`)
/// The range of component `s` is `[0, 49]`.
fn schedule_named(s: u32, ) -> Weight {
// Proof Size summary in bytes:
- // Measured: `293 + s * (185 ±0)`
+ // Measured: `292 + s * (185 ±0)`
// Estimated: `42428`
- // Minimum execution time: 17_719_000 picoseconds.
- Weight::from_parts(21_657_806, 0)
+ // Minimum execution time: 12_053_000 picoseconds.
+ Weight::from_parts(15_358_056, 0)
.saturating_add(Weight::from_parts(0, 42428))
- // Standard Error: 2_645
- .saturating_add(Weight::from_parts(794_184, 0).saturating_mul(s.into()))
+ // Standard Error: 3_176
+ .saturating_add(Weight::from_parts(421_589, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(2))
.saturating_add(T::DbWeight::get().writes(2))
}
- /// Storage: Scheduler Lookup (r:1 w:1)
- /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen)
- /// Storage: Scheduler Agenda (r:1 w:1)
- /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen)
+ /// Storage: `Scheduler::Lookup` (r:1 w:1)
+ /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Agenda` (r:1 w:1)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`)
/// The range of component `s` is `[1, 50]`.
fn cancel_named(s: u32, ) -> Weight {
// Proof Size summary in bytes:
- // Measured: `319 + s * (185 ±0)`
+ // Measured: `318 + s * (185 ±0)`
// Estimated: `42428`
- // Minimum execution time: 20_225_000 picoseconds.
- Weight::from_parts(20_494_405, 0)
+ // Minimum execution time: 14_803_000 picoseconds.
+ Weight::from_parts(15_805_714, 0)
.saturating_add(Weight::from_parts(0, 42428))
- // Standard Error: 1_890
- .saturating_add(Weight::from_parts(1_379_025, 0).saturating_mul(s.into()))
+ // Standard Error: 2_597
+ .saturating_add(Weight::from_parts(611_053, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(2))
.saturating_add(T::DbWeight::get().writes(2))
}
+ /// Storage: `Scheduler::Retries` (r:1 w:2)
+ /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Agenda` (r:1 w:1)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Lookup` (r:0 w:1)
+ /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`)
+ /// The range of component `s` is `[1, 50]`.
+ fn schedule_retry(s: u32, ) -> Weight {
+ // Proof Size summary in bytes:
+ // Measured: `196`
+ // Estimated: `42428`
+ // Minimum execution time: 13_156_000 picoseconds.
+ Weight::from_parts(13_801_287, 0)
+ .saturating_add(Weight::from_parts(0, 42428))
+ // Standard Error: 568
+ .saturating_add(Weight::from_parts(35_441, 0).saturating_mul(s.into()))
+ .saturating_add(T::DbWeight::get().reads(2))
+ .saturating_add(T::DbWeight::get().writes(4))
+ }
+ /// Storage: `Scheduler::Agenda` (r:1 w:0)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Retries` (r:0 w:1)
+ /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)
+ /// The range of component `s` is `[1, 50]`.
+ fn set_retry() -> Weight {
+ // Proof Size summary in bytes:
+ // Measured: `115 + s * (177 ±0)`
+ // Estimated: `42428`
+ // Minimum execution time: 7_912_000 picoseconds.
+ Weight::from_parts(8_081_460, 0)
+ .saturating_add(Weight::from_parts(0, 42428))
+ .saturating_add(T::DbWeight::get().reads(1))
+ .saturating_add(T::DbWeight::get().writes(1))
+ }
+ /// Storage: `Scheduler::Lookup` (r:1 w:0)
+ /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Agenda` (r:1 w:0)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Retries` (r:0 w:1)
+ /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)
+ /// The range of component `s` is `[1, 50]`.
+ fn set_retry_named() -> Weight {
+ // Proof Size summary in bytes:
+ // Measured: `324 + s * (185 ±0)`
+ // Estimated: `42428`
+ // Minimum execution time: 10_673_000 picoseconds.
+ Weight::from_parts(12_212_185, 0)
+ .saturating_add(Weight::from_parts(0, 42428))
+ .saturating_add(T::DbWeight::get().reads(2))
+ .saturating_add(T::DbWeight::get().writes(1))
+ }
+ /// Storage: `Scheduler::Agenda` (r:1 w:0)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Retries` (r:0 w:1)
+ /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)
+ /// The range of component `s` is `[1, 50]`.
+ fn cancel_retry() -> Weight {
+ // Proof Size summary in bytes:
+ // Measured: `115 + s * (177 ±0)`
+ // Estimated: `42428`
+ // Minimum execution time: 7_912_000 picoseconds.
+ Weight::from_parts(8_081_460, 0)
+ .saturating_add(Weight::from_parts(0, 42428))
+ .saturating_add(T::DbWeight::get().reads(1))
+ .saturating_add(T::DbWeight::get().writes(1))
+ }
+ /// Storage: `Scheduler::Lookup` (r:1 w:0)
+ /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Agenda` (r:1 w:0)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Retries` (r:0 w:1)
+ /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)
+ /// The range of component `s` is `[1, 50]`.
+ fn cancel_retry_named() -> Weight {
+ // Proof Size summary in bytes:
+ // Measured: `324 + s * (185 ±0)`
+ // Estimated: `42428`
+ // Minimum execution time: 10_673_000 picoseconds.
+ Weight::from_parts(12_212_185, 0)
+ .saturating_add(Weight::from_parts(0, 42428))
+ .saturating_add(T::DbWeight::get().reads(2))
+ .saturating_add(T::DbWeight::get().writes(1))
+ }
}
diff --git a/polkadot/runtime/westend/src/weights/pallet_scheduler.rs b/polkadot/runtime/westend/src/weights/pallet_scheduler.rs
index 7291b9809330..beef3796dea6 100644
--- a/polkadot/runtime/westend/src/weights/pallet_scheduler.rs
+++ b/polkadot/runtime/westend/src/weights/pallet_scheduler.rs
@@ -17,27 +17,25 @@
//! Autogenerated weights for `pallet_scheduler`
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
-//! DATE: 2023-06-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
+//! DATE: 2024-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
-//! HOSTNAME: `runner--ss9ysm1-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
-//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024
+//! HOSTNAME: `runner-grjcggob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
+//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024
// Executed Command:
-// ./target/production/polkadot
+// target/production/polkadot
// benchmark
// pallet
-// --chain=westend-dev
// --steps=50
// --repeat=20
-// --no-storage-info
-// --no-median-slopes
-// --no-min-squares
-// --pallet=pallet_scheduler
// --extrinsic=*
-// --execution=wasm
// --wasm-execution=compiled
-// --header=./file_header.txt
-// --output=./runtime/westend/src/weights/
+// --heap-pages=4096
+// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
+// --pallet=pallet_scheduler
+// --chain=westend-dev
+// --header=./polkadot/file_header.txt
+// --output=./polkadot/runtime/westend/src/weights/
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
@@ -50,30 +48,30 @@ use core::marker::PhantomData;
/// Weight functions for `pallet_scheduler`.
pub struct WeightInfo(PhantomData);
impl pallet_scheduler::WeightInfo for WeightInfo {
- /// Storage: Scheduler IncompleteSince (r:1 w:1)
- /// Proof: Scheduler IncompleteSince (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen)
+ /// Storage: `Scheduler::IncompleteSince` (r:1 w:1)
+ /// Proof: `Scheduler::IncompleteSince` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
fn service_agendas_base() -> Weight {
// Proof Size summary in bytes:
// Measured: `69`
// Estimated: `1489`
- // Minimum execution time: 3_991_000 picoseconds.
- Weight::from_parts(4_160_000, 0)
+ // Minimum execution time: 3_220_000 picoseconds.
+ Weight::from_parts(3_512_000, 0)
.saturating_add(Weight::from_parts(0, 1489))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
}
- /// Storage: Scheduler Agenda (r:1 w:1)
- /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen)
+ /// Storage: `Scheduler::Agenda` (r:1 w:1)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`)
/// The range of component `s` is `[0, 50]`.
fn service_agenda_base(s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `116 + s * (177 ±0)`
// Estimated: `42428`
- // Minimum execution time: 3_647_000 picoseconds.
- Weight::from_parts(6_608_270, 0)
+ // Minimum execution time: 3_565_000 picoseconds.
+ Weight::from_parts(6_102_216, 0)
.saturating_add(Weight::from_parts(0, 42428))
- // Standard Error: 2_516
- .saturating_add(Weight::from_parts(892_866, 0).saturating_mul(s.into()))
+ // Standard Error: 1_413
+ .saturating_add(Weight::from_parts(339_016, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
}
@@ -81,36 +79,38 @@ impl pallet_scheduler::WeightInfo for WeightInfo {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
- // Minimum execution time: 5_552_000 picoseconds.
- Weight::from_parts(5_836_000, 0)
+ // Minimum execution time: 2_940_000 picoseconds.
+ Weight::from_parts(3_070_000, 0)
.saturating_add(Weight::from_parts(0, 0))
}
- /// Storage: Preimage PreimageFor (r:1 w:1)
- /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: Measured)
- /// Storage: Preimage StatusFor (r:1 w:1)
- /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen)
+ /// Storage: `Preimage::PreimageFor` (r:1 w:1)
+ /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`)
+ /// Storage: `Preimage::StatusFor` (r:1 w:0)
+ /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`)
+ /// Storage: `Preimage::RequestStatusFor` (r:1 w:1)
+ /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`)
/// The range of component `s` is `[128, 4194304]`.
fn service_task_fetched(s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `217 + s * (1 ±0)`
// Estimated: `3682 + s * (1 ±0)`
- // Minimum execution time: 20_583_000 picoseconds.
- Weight::from_parts(20_771_000, 0)
+ // Minimum execution time: 16_602_000 picoseconds.
+ Weight::from_parts(16_834_000, 0)
.saturating_add(Weight::from_parts(0, 3682))
- // Standard Error: 11
- .saturating_add(Weight::from_parts(2_250, 0).saturating_mul(s.into()))
- .saturating_add(T::DbWeight::get().reads(2))
+ // Standard Error: 10
+ .saturating_add(Weight::from_parts(1_307, 0).saturating_mul(s.into()))
+ .saturating_add(T::DbWeight::get().reads(3))
.saturating_add(T::DbWeight::get().writes(2))
.saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into()))
}
- /// Storage: Scheduler Lookup (r:0 w:1)
- /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen)
+ /// Storage: `Scheduler::Lookup` (r:0 w:1)
+ /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`)
fn service_task_named() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
- // Minimum execution time: 7_271_000 picoseconds.
- Weight::from_parts(7_447_000, 0)
+ // Minimum execution time: 4_202_000 picoseconds.
+ Weight::from_parts(4_383_000, 0)
.saturating_add(Weight::from_parts(0, 0))
.saturating_add(T::DbWeight::get().writes(1))
}
@@ -118,90 +118,169 @@ impl pallet_scheduler::WeightInfo for WeightInfo {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
- // Minimum execution time: 5_547_000 picoseconds.
- Weight::from_parts(5_776_000, 0)
+ // Minimum execution time: 2_917_000 picoseconds.
+ Weight::from_parts(3_043_000, 0)
.saturating_add(Weight::from_parts(0, 0))
}
fn execute_dispatch_signed() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
- // Minimum execution time: 2_480_000 picoseconds.
- Weight::from_parts(2_628_000, 0)
+ // Minimum execution time: 1_707_000 picoseconds.
+ Weight::from_parts(1_802_000, 0)
.saturating_add(Weight::from_parts(0, 0))
}
fn execute_dispatch_unsigned() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
- // Minimum execution time: 2_479_000 picoseconds.
- Weight::from_parts(2_626_000, 0)
+ // Minimum execution time: 1_671_000 picoseconds.
+ Weight::from_parts(1_796_000, 0)
.saturating_add(Weight::from_parts(0, 0))
}
- /// Storage: Scheduler Agenda (r:1 w:1)
- /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen)
+ /// Storage: `Scheduler::Agenda` (r:1 w:1)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`)
/// The range of component `s` is `[0, 49]`.
fn schedule(s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `116 + s * (177 ±0)`
// Estimated: `42428`
- // Minimum execution time: 13_350_000 picoseconds.
- Weight::from_parts(15_289_847, 0)
+ // Minimum execution time: 9_313_000 picoseconds.
+ Weight::from_parts(12_146_613, 0)
.saturating_add(Weight::from_parts(0, 42428))
- // Standard Error: 5_375
- .saturating_add(Weight::from_parts(974_567, 0).saturating_mul(s.into()))
+ // Standard Error: 1_381
+ .saturating_add(Weight::from_parts(360_418, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
}
- /// Storage: Scheduler Agenda (r:1 w:1)
- /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen)
- /// Storage: Scheduler Lookup (r:0 w:1)
- /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen)
+ /// Storage: `Scheduler::Agenda` (r:1 w:1)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Lookup` (r:0 w:1)
+ /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`)
/// The range of component `s` is `[1, 50]`.
fn cancel(s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `116 + s * (177 ±0)`
// Estimated: `42428`
- // Minimum execution time: 17_646_000 picoseconds.
- Weight::from_parts(15_858_434, 0)
+ // Minimum execution time: 13_079_000 picoseconds.
+ Weight::from_parts(12_921_017, 0)
.saturating_add(Weight::from_parts(0, 42428))
- // Standard Error: 5_354
- .saturating_add(Weight::from_parts(1_697_642, 0).saturating_mul(s.into()))
+ // Standard Error: 1_112
+ .saturating_add(Weight::from_parts(538_089, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(2))
}
- /// Storage: Scheduler Lookup (r:1 w:1)
- /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen)
- /// Storage: Scheduler Agenda (r:1 w:1)
- /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen)
+ /// Storage: `Scheduler::Lookup` (r:1 w:1)
+ /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Agenda` (r:1 w:1)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`)
/// The range of component `s` is `[0, 49]`.
fn schedule_named(s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `293 + s * (185 ±0)`
// Estimated: `42428`
- // Minimum execution time: 16_419_000 picoseconds.
- Weight::from_parts(19_868_760, 0)
+ // Minimum execution time: 12_458_000 picoseconds.
+ Weight::from_parts(16_009_539, 0)
.saturating_add(Weight::from_parts(0, 42428))
- // Standard Error: 6_915
- .saturating_add(Weight::from_parts(1_010_225, 0).saturating_mul(s.into()))
+ // Standard Error: 2_260
+ .saturating_add(Weight::from_parts(399_245, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(2))
.saturating_add(T::DbWeight::get().writes(2))
}
- /// Storage: Scheduler Lookup (r:1 w:1)
- /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen)
- /// Storage: Scheduler Agenda (r:1 w:1)
- /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen)
+ /// Storage: `Scheduler::Lookup` (r:1 w:1)
+ /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Agenda` (r:1 w:1)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`)
/// The range of component `s` is `[1, 50]`.
fn cancel_named(s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `319 + s * (185 ±0)`
// Estimated: `42428`
- // Minimum execution time: 19_574_000 picoseconds.
- Weight::from_parts(18_453_197, 0)
+ // Minimum execution time: 15_173_000 picoseconds.
+ Weight::from_parts(15_602_728, 0)
.saturating_add(Weight::from_parts(0, 42428))
- // Standard Error: 6_009
- .saturating_add(Weight::from_parts(1_707_130, 0).saturating_mul(s.into()))
+ // Standard Error: 1_302
+ .saturating_add(Weight::from_parts(557_878, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(2))
.saturating_add(T::DbWeight::get().writes(2))
}
+ /// Storage: `Scheduler::Retries` (r:1 w:2)
+ /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Agenda` (r:1 w:1)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Lookup` (r:0 w:1)
+ /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`)
+ /// The range of component `s` is `[1, 50]`.
+ fn schedule_retry(s: u32, ) -> Weight {
+ // Proof Size summary in bytes:
+ // Measured: `197`
+ // Estimated: `42428`
+ // Minimum execution time: 13_531_000 picoseconds.
+ Weight::from_parts(13_985_249, 0)
+ .saturating_add(Weight::from_parts(0, 42428))
+ // Standard Error: 619
+ .saturating_add(Weight::from_parts(39_068, 0).saturating_mul(s.into()))
+ .saturating_add(T::DbWeight::get().reads(2))
+ .saturating_add(T::DbWeight::get().writes(4))
+ }
+ /// Storage: `Scheduler::Agenda` (r:1 w:0)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Retries` (r:0 w:1)
+ /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)
+ fn set_retry() -> Weight {
+ // Proof Size summary in bytes:
+ // Measured: `116 + s * (177 ±0)`
+ // Estimated: `42428`
+ // Minimum execution time: 8_050_000 picoseconds.
+ Weight::from_parts(8_440_627, 0)
+ .saturating_add(Weight::from_parts(0, 42428))
+ .saturating_add(T::DbWeight::get().reads(1))
+ .saturating_add(T::DbWeight::get().writes(1))
+ }
+ /// Storage: `Scheduler::Lookup` (r:1 w:0)
+ /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Agenda` (r:1 w:0)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Retries` (r:0 w:1)
+ /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)
+ fn set_retry_named() -> Weight {
+ // Proof Size summary in bytes:
+ // Measured: `325 + s * (185 ±0)`
+ // Estimated: `42428`
+ // Minimum execution time: 10_876_000 picoseconds.
+ Weight::from_parts(11_708_172, 0)
+ .saturating_add(Weight::from_parts(0, 42428))
+ .saturating_add(T::DbWeight::get().reads(2))
+ .saturating_add(T::DbWeight::get().writes(1))
+ }
+ /// Storage: `Scheduler::Agenda` (r:1 w:0)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Retries` (r:0 w:1)
+ /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)
+ fn cancel_retry() -> Weight {
+ // Proof Size summary in bytes:
+ // Measured: `116 + s * (177 ±0)`
+ // Estimated: `42428`
+ // Minimum execution time: 8_050_000 picoseconds.
+ Weight::from_parts(8_440_627, 0)
+ .saturating_add(Weight::from_parts(0, 42428))
+ .saturating_add(T::DbWeight::get().reads(1))
+ .saturating_add(T::DbWeight::get().writes(1))
+ }
+ /// Storage: `Scheduler::Lookup` (r:1 w:0)
+ /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Agenda` (r:1 w:0)
+ /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`)
+ /// Storage: `Scheduler::Retries` (r:0 w:1)
+ /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)
+ fn cancel_retry_named() -> Weight {
+ // Proof Size summary in bytes:
+ // Measured: `325 + s * (185 ±0)`
+ // Estimated: `42428`
+ // Minimum execution time: 10_876_000 picoseconds.
+ Weight::from_parts(11_708_172, 0)
+ .saturating_add(Weight::from_parts(0, 42428))
+ .saturating_add(T::DbWeight::get().reads(2))
+ .saturating_add(T::DbWeight::get().writes(1))
+ }
}
diff --git a/prdoc/pr_3060.prdoc b/prdoc/pr_3060.prdoc
new file mode 100644
index 000000000000..4cd6674ebb2e
--- /dev/null
+++ b/prdoc/pr_3060.prdoc
@@ -0,0 +1,15 @@
+# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
+# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json
+
+title: Add retry mechanics to `pallet-scheduler`
+
+doc:
+ - audience: Runtime Dev
+ description: |
+ This PR adds retry mechanics to pallet-scheduler, as described in the issue above.
+ Users can now set a retry configuration for a task so that, in case its scheduled run fails, it will be retried after a number of blocks, for a specified number of times or until it succeeds.
+ If a retried task runs successfully before running out of retries, its remaining retry counter will be reset to the initial value. If a retried task runs out of retries, it will be removed from the schedule.
+ Tasks which need to be scheduled for a retry are still subject to weight metering and agenda space, same as a regular task. Periodic tasks will have their periodic schedule put on hold while the task is retrying.
+
+crates:
+ - name: pallet-scheduler
diff --git a/substrate/frame/scheduler/src/benchmarking.rs b/substrate/frame/scheduler/src/benchmarking.rs
index cc86a1797378..18441d54b39a 100644
--- a/substrate/frame/scheduler/src/benchmarking.rs
+++ b/substrate/frame/scheduler/src/benchmarking.rs
@@ -22,12 +22,13 @@ use frame_benchmarking::v1::{account, benchmarks, BenchmarkError};
use frame_support::{
ensure,
traits::{schedule::Priority, BoundedInline},
+ weights::WeightMeter,
};
use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin};
use sp_std::{prelude::*, vec};
use crate::Pallet as Scheduler;
-use frame_system::Call as SystemCall;
+use frame_system::{Call as SystemCall, EventRecord};
const SEED: u32 = 0;
@@ -35,6 +36,14 @@ const BLOCK_NUMBER: u32 = 2;
type SystemOrigin = ::RuntimeOrigin;
+fn assert_last_event(generic_event: ::RuntimeEvent) {
+ let events = frame_system::Pallet::::events();
+ let system_event: ::RuntimeEvent = generic_event.into();
+ // compare to the last event record
+ let EventRecord { event, .. } = &events[events.len() - 1];
+ assert_eq!(event, &system_event);
+}
+
/// Add `n` items to the schedule.
///
/// For `resolved`:
@@ -306,5 +315,105 @@ benchmarks! {
);
}
+ schedule_retry {
+ let s in 1 .. T::MaxScheduledPerBlock::get();
+ let when = BLOCK_NUMBER.into();
+
+ fill_schedule::(when, s)?;
+ let name = u32_to_name(s - 1);
+ let address = Lookup::::get(name).unwrap();
+ let period: BlockNumberFor = 1u32.into();
+ let root: ::PalletsOrigin = frame_system::RawOrigin::Root.into();
+ let retry_config = RetryConfig { total_retries: 10, remaining: 10, period };
+ Retries::::insert(address, retry_config);
+ let (mut when, index) = address;
+ let task = Agenda::::get(when)[index as usize].clone().unwrap();
+ let mut weight_counter = WeightMeter::with_limit(T::MaximumWeight::get());
+ }: {
+ Scheduler::::schedule_retry(&mut weight_counter, when, when, index, &task, retry_config);
+ } verify {
+ when = when + BlockNumberFor::::one();
+ assert_eq!(
+ Retries::::get((when, 0)),
+ Some(RetryConfig { total_retries: 10, remaining: 9, period })
+ );
+ }
+
+ set_retry {
+ let s = T::MaxScheduledPerBlock::get();
+ let when = BLOCK_NUMBER.into();
+
+ fill_schedule::(when, s)?;
+ let name = u32_to_name(s - 1);
+ let address = Lookup::::get(name).unwrap();
+ let (when, index) = address;
+ let period = BlockNumberFor::::one();
+ }: _(RawOrigin::Root, (when, index), 10, period)
+ verify {
+ assert_eq!(
+ Retries::::get((when, index)),
+ Some(RetryConfig { total_retries: 10, remaining: 10, period })
+ );
+ assert_last_event::(
+ Event::RetrySet { task: address, id: None, period, retries: 10 }.into(),
+ );
+ }
+
+ set_retry_named {
+ let s = T::MaxScheduledPerBlock::get();
+ let when = BLOCK_NUMBER.into();
+
+ fill_schedule::(when, s)?;
+ let name = u32_to_name(s - 1);
+ let address = Lookup::::get(name).unwrap();
+ let (when, index) = address;
+ let period = BlockNumberFor::::one();
+ }: _(RawOrigin::Root, name, 10, period)
+ verify {
+ assert_eq!(
+ Retries::::get((when, index)),
+ Some(RetryConfig { total_retries: 10, remaining: 10, period })
+ );
+ assert_last_event::(
+ Event::RetrySet { task: address, id: Some(name), period, retries: 10 }.into(),
+ );
+ }
+
+ cancel_retry {
+ let s = T::MaxScheduledPerBlock::get();
+ let when = BLOCK_NUMBER.into();
+
+ fill_schedule::(when, s)?;
+ let name = u32_to_name(s - 1);
+ let address = Lookup::::get(name).unwrap();
+ let (when, index) = address;
+ let period = BlockNumberFor::::one();
+ assert!(Scheduler::::set_retry(RawOrigin::Root.into(), (when, index), 10, period).is_ok());
+ }: _(RawOrigin::Root, (when, index))
+ verify {
+ assert!(!Retries::::contains_key((when, index)));
+ assert_last_event::(
+ Event::RetryCancelled { task: address, id: None }.into(),
+ );
+ }
+
+ cancel_retry_named {
+ let s = T::MaxScheduledPerBlock::get();
+ let when = BLOCK_NUMBER.into();
+
+ fill_schedule::(when, s)?;
+ let name = u32_to_name(s - 1);
+ let address = Lookup::::get(name).unwrap();
+ let (when, index) = address;
+ let period = BlockNumberFor::::one();
+ assert!(Scheduler::::set_retry_named(RawOrigin::Root.into(), name, 10, period).is_ok());
+ }: _(RawOrigin::Root, name)
+ verify {
+ assert!(!Retries::::contains_key((when, index)));
+ assert_last_event::(
+ Event::RetryCancelled { task: address, id: Some(name) }.into(),
+ );
+ }
+
impl_benchmark_test_suite!(Scheduler, crate::mock::new_test_ext(), crate::mock::Test);
}
diff --git a/substrate/frame/scheduler/src/lib.rs b/substrate/frame/scheduler/src/lib.rs
index e94f154eee32..daebebdee995 100644
--- a/substrate/frame/scheduler/src/lib.rs
+++ b/substrate/frame/scheduler/src/lib.rs
@@ -122,6 +122,17 @@ pub type CallOrHashOf =
pub type BoundedCallOf =
Bounded<::RuntimeCall, ::Hashing>;
+/// The configuration of the retry mechanism for a given task along with its current state.
+#[derive(Clone, Copy, RuntimeDebug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)]
+pub struct RetryConfig {
+ /// Initial amount of retries allowed.
+ total_retries: u8,
+ /// Amount of retries left.
+ remaining: u8,
+ /// Period of time between retry attempts.
+ period: Period,
+}
+
#[cfg_attr(any(feature = "std", test), derive(PartialEq, Eq))]
#[derive(Clone, RuntimeDebug, Encode, Decode)]
struct ScheduledV1 {
@@ -148,6 +159,26 @@ pub struct Scheduled {
_phantom: PhantomData,
}
+impl
+ Scheduled
+where
+ Call: Clone,
+ PalletsOrigin: Clone,
+{
+ /// Create a new task to be used for retry attempts of the original one. The cloned task will
+ /// have the same `priority`, `call` and `origin`, but will always be non-periodic and unnamed.
+ pub fn as_retry(&self) -> Self {
+ Self {
+ maybe_id: None,
+ priority: self.priority,
+ call: self.call.clone(),
+ maybe_periodic: None,
+ origin: self.origin.clone(),
+ _phantom: Default::default(),
+ }
+ }
+}
+
use crate::{Scheduled as ScheduledV3, Scheduled as ScheduledV2};
pub type ScheduledV2Of = ScheduledV2<
@@ -273,6 +304,16 @@ pub mod pallet {
ValueQuery,
>;
+ /// Retry configurations for items to be executed, indexed by task address.
+ #[pallet::storage]
+ pub type Retries = StorageMap<
+ _,
+ Blake2_128Concat,
+ TaskAddress>,
+ RetryConfig>,
+ OptionQuery,
+ >;
+
/// Lookup from a name to the block number and index of the task.
///
/// For v3 -> v4 the previously unbounded identities are Blake2-256 hashed to form the v4
@@ -295,10 +336,22 @@ pub mod pallet {
id: Option,
result: DispatchResult,
},
+ /// Set a retry configuration for some task.
+ RetrySet {
+ task: TaskAddress>,
+ id: Option,
+ period: BlockNumberFor,
+ retries: u8,
+ },
+ /// Cancel a retry configuration for some task.
+ RetryCancelled { task: TaskAddress>, id: Option },
/// The call for the provided hash was not found so the task has been aborted.
CallUnavailable { task: TaskAddress>, id: Option },
/// The given task was unable to be renewed since the agenda is full at that block.
PeriodicFailed { task: TaskAddress>, id: Option },
+ /// The given task was unable to be retried since the agenda is full at that block or there
+ /// was not enough weight to reschedule it.
+ RetryFailed { task: TaskAddress>, id: Option },
/// The given task can never be executed since it is overweight.
PermanentlyOverweight { task: TaskAddress>, id: Option },
}
@@ -440,6 +493,111 @@ pub mod pallet {
)?;
Ok(())
}
+
+ /// Set a retry configuration for a task so that, in case its scheduled run fails, it will
+ /// be retried after `period` blocks, for a total amount of `retries` retries or until it
+ /// succeeds.
+ ///
+ /// Tasks which need to be scheduled for a retry are still subject to weight metering and
+ /// agenda space, same as a regular task. If a periodic task fails, it will be scheduled
+ /// normally while the task is retrying.
+ ///
+ /// Tasks scheduled as a result of a retry for a periodic task are unnamed, non-periodic
+ /// clones of the original task. Their retry configuration will be derived from the
+ /// original task's configuration, but will have a lower value for `remaining` than the
+ /// original `total_retries`.
+ #[pallet::call_index(6)]
+ #[pallet::weight(::WeightInfo::set_retry())]
+ pub fn set_retry(
+ origin: OriginFor,
+ task: TaskAddress>,
+ retries: u8,
+ period: BlockNumberFor,
+ ) -> DispatchResult {
+ T::ScheduleOrigin::ensure_origin(origin.clone())?;
+ let origin = ::RuntimeOrigin::from(origin);
+ let (when, index) = task;
+ let agenda = Agenda::::get(when);
+ let scheduled = agenda
+ .get(index as usize)
+ .and_then(Option::as_ref)
+ .ok_or(Error::::NotFound)?;
+ Self::ensure_privilege(origin.caller(), &scheduled.origin)?;
+ Retries::::insert(
+ (when, index),
+ RetryConfig { total_retries: retries, remaining: retries, period },
+ );
+ Self::deposit_event(Event::RetrySet { task, id: None, period, retries });
+ Ok(())
+ }
+
+ /// Set a retry configuration for a named task so that, in case its scheduled run fails, it
+ /// will be retried after `period` blocks, for a total amount of `retries` retries or until
+ /// it succeeds.
+ ///
+ /// Tasks which need to be scheduled for a retry are still subject to weight metering and
+ /// agenda space, same as a regular task. If a periodic task fails, it will be scheduled
+ /// normally while the task is retrying.
+ ///
+ /// Tasks scheduled as a result of a retry for a periodic task are unnamed, non-periodic
+ /// clones of the original task. Their retry configuration will be derived from the
+ /// original task's configuration, but will have a lower value for `remaining` than the
+ /// original `total_retries`.
+ #[pallet::call_index(7)]
+ #[pallet::weight(::WeightInfo::set_retry_named())]
+ pub fn set_retry_named(
+ origin: OriginFor,
+ id: TaskName,
+ retries: u8,
+ period: BlockNumberFor,
+ ) -> DispatchResult {
+ T::ScheduleOrigin::ensure_origin(origin.clone())?;
+ let origin = ::RuntimeOrigin::from(origin);
+ let (when, agenda_index) = Lookup::::get(&id).ok_or(Error::::NotFound)?;
+ let agenda = Agenda::::get(when);
+ let scheduled = agenda
+ .get(agenda_index as usize)
+ .and_then(Option::as_ref)
+ .ok_or(Error::::NotFound)?;
+ Self::ensure_privilege(origin.caller(), &scheduled.origin)?;
+ Retries::::insert(
+ (when, agenda_index),
+ RetryConfig { total_retries: retries, remaining: retries, period },
+ );
+ Self::deposit_event(Event::RetrySet {
+ task: (when, agenda_index),
+ id: Some(id),
+ period,
+ retries,
+ });
+ Ok(())
+ }
+
+ /// Removes the retry configuration of a task.
+ #[pallet::call_index(8)]
+ #[pallet::weight(::WeightInfo::cancel_retry())]
+ pub fn cancel_retry(
+ origin: OriginFor,
+ task: TaskAddress>,
+ ) -> DispatchResult {
+ T::ScheduleOrigin::ensure_origin(origin.clone())?;
+ let origin = ::RuntimeOrigin::from(origin);
+ Self::do_cancel_retry(origin.caller(), task)?;
+ Self::deposit_event(Event::RetryCancelled { task, id: None });
+ Ok(())
+ }
+
+ /// Cancel the retry configuration of a named task.
+ #[pallet::call_index(9)]
+ #[pallet::weight(::WeightInfo::cancel_retry_named())]
+ pub fn cancel_retry_named(origin: OriginFor, id: TaskName) -> DispatchResult {
+ T::ScheduleOrigin::ensure_origin(origin.clone())?;
+ let origin = ::RuntimeOrigin::from(origin);
+ let task = Lookup::::get(&id).ok_or(Error::::NotFound)?;
+ Self::do_cancel_retry(origin.caller(), task)?;
+ Self::deposit_event(Event::RetryCancelled { task, id: Some(id) });
+ Ok(())
+ }
}
}
@@ -838,12 +996,7 @@ impl Pallet {
Ok(None),
|s| -> Result