diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_scheduler.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_scheduler.rs index cf5610df6657..42e37b967e4c 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_scheduler.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_scheduler.rs @@ -1,42 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_scheduler` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-grjcggob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_scheduler -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_scheduler +// --chain=collectives-westend-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -55,8 +54,8 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `31` // Estimated: `1489` - // Minimum execution time: 3_441_000 picoseconds. - Weight::from_parts(3_604_000, 0) + // Minimum execution time: 2_475_000 picoseconds. + Weight::from_parts(2_644_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -68,11 +67,11 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `77 + s * (177 ±0)` // Estimated: `159279` - // Minimum execution time: 2_879_000 picoseconds. - Weight::from_parts(2_963_000, 0) + // Minimum execution time: 2_898_000 picoseconds. + Weight::from_parts(1_532_342, 0) .saturating_add(Weight::from_parts(0, 159279)) - // Standard Error: 3_764 - .saturating_add(Weight::from_parts(909_557, 0).saturating_mul(s.into())) + // Standard Error: 4_736 + .saturating_add(Weight::from_parts(412_374, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -80,25 +79,27 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_172_000 picoseconds. - Weight::from_parts(5_294_000, 0) + // Minimum execution time: 3_171_000 picoseconds. + Weight::from_parts(3_349_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `Preimage::PreimageFor` (r:1 w:1) /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) - /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Storage: `Preimage::StatusFor` (r:1 w:0) /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `s` is `[128, 4194304]`. fn service_task_fetched(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `213 + s * (1 ±0)` - // Estimated: `3678 + s * (1 ±0)` - // Minimum execution time: 19_704_000 picoseconds. - Weight::from_parts(19_903_000, 0) - .saturating_add(Weight::from_parts(0, 3678)) - // Standard Error: 5 - .saturating_add(Weight::from_parts(1_394, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) + // Measured: `246 + s * (1 ±0)` + // Estimated: `3711 + s * (1 ±0)` + // Minimum execution time: 17_329_000 picoseconds. + Weight::from_parts(17_604_000, 0) + .saturating_add(Weight::from_parts(0, 3711)) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_256, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) } @@ -108,8 +109,8 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_359_000 picoseconds. - Weight::from_parts(6_599_000, 0) + // Minimum execution time: 4_503_000 picoseconds. + Weight::from_parts(4_677_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -117,24 +118,24 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_217_000 picoseconds. - Weight::from_parts(5_333_000, 0) + // Minimum execution time: 3_145_000 picoseconds. + Weight::from_parts(3_252_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn execute_dispatch_signed() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_406_000 picoseconds. - Weight::from_parts(2_541_000, 0) + // Minimum execution time: 1_804_000 picoseconds. + Weight::from_parts(1_891_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn execute_dispatch_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_370_000 picoseconds. - Weight::from_parts(2_561_000, 0) + // Minimum execution time: 1_706_000 picoseconds. + Weight::from_parts(1_776_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `Scheduler::Agenda` (r:1 w:1) @@ -144,11 +145,11 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `77 + s * (177 ±0)` // Estimated: `159279` - // Minimum execution time: 11_784_000 picoseconds. - Weight::from_parts(5_574_404, 0) + // Minimum execution time: 8_629_000 picoseconds. + Weight::from_parts(6_707_232, 0) .saturating_add(Weight::from_parts(0, 159279)) - // Standard Error: 7_217 - .saturating_add(Weight::from_parts(1_035_248, 0).saturating_mul(s.into())) + // Standard Error: 5_580 + .saturating_add(Weight::from_parts(471_827, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -161,11 +162,11 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `77 + s * (177 ±0)` // Estimated: `159279` - // Minimum execution time: 16_373_000 picoseconds. - Weight::from_parts(3_088_135, 0) + // Minimum execution time: 12_675_000 picoseconds. + Weight::from_parts(7_791_682, 0) .saturating_add(Weight::from_parts(0, 159279)) - // Standard Error: 7_095 - .saturating_add(Weight::from_parts(1_745_270, 0).saturating_mul(s.into())) + // Standard Error: 5_381 + .saturating_add(Weight::from_parts(653_023, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -178,11 +179,11 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `468 + s * (179 ±0)` // Estimated: `159279` - // Minimum execution time: 14_822_000 picoseconds. - Weight::from_parts(9_591_402, 0) + // Minimum execution time: 11_908_000 picoseconds. + Weight::from_parts(11_833_059, 0) .saturating_add(Weight::from_parts(0, 159279)) - // Standard Error: 7_151 - .saturating_add(Weight::from_parts(1_058_408, 0).saturating_mul(s.into())) + // Standard Error: 5_662 + .saturating_add(Weight::from_parts(482_816, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -195,12 +196,91 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `509 + s * (179 ±0)` // Estimated: `159279` - // Minimum execution time: 18_541_000 picoseconds. - Weight::from_parts(6_522_239, 0) + // Minimum execution time: 15_506_000 picoseconds. + Weight::from_parts(11_372_975, 0) .saturating_add(Weight::from_parts(0, 159279)) - // Standard Error: 8_349 - .saturating_add(Weight::from_parts(1_760_431, 0).saturating_mul(s.into())) + // Standard Error: 5_765 + .saturating_add(Weight::from_parts(656_322, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Scheduler::Retries` (r:1 w:2) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:0 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 200]`. + fn schedule_retry(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `159` + // Estimated: `159279` + // Minimum execution time: 14_069_000 picoseconds. + Weight::from_parts(14_868_345, 0) + .saturating_add(Weight::from_parts(0, 159279)) + // Standard Error: 425 + .saturating_add(Weight::from_parts(33_468, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn set_retry() -> Weight { + // Proof Size summary in bytes: + // Measured: `77 + s * (177 ±0)` + // Estimated: `159279` + // Minimum execution time: 7_550_000 picoseconds. + Weight::from_parts(6_735_955, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Lookup` (r:1 w:0) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn set_retry_named() -> Weight { + // Proof Size summary in bytes: + // Measured: `513 + s * (179 ±0)` + // Estimated: `159279` + // Minimum execution time: 11_017_000 picoseconds. + Weight::from_parts(11_749_385, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn cancel_retry() -> Weight { + // Proof Size summary in bytes: + // Measured: `77 + s * (177 ±0)` + // Estimated: `159279` + // Minimum execution time: 7_550_000 picoseconds. + Weight::from_parts(6_735_955, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Lookup` (r:1 w:0) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn cancel_retry_named() -> Weight { + // Proof Size summary in bytes: + // Measured: `513 + s * (179 ±0)` + // Estimated: `159279` + // Minimum execution time: 11_017_000 picoseconds. + Weight::from_parts(11_749_385, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_scheduler.rs b/polkadot/runtime/rococo/src/weights/pallet_scheduler.rs index e4732a2d17dc..0f36dbd384df 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_scheduler.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_scheduler.rs @@ -17,24 +17,25 @@ //! Autogenerated weights for `pallet_scheduler` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-grjcggob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet -// --chain=rococo-dev // --steps=50 // --repeat=20 -// --pallet=pallet_scheduler // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_scheduler +// --chain=rococo-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,30 +48,30 @@ use core::marker::PhantomData; /// Weight functions for `pallet_scheduler`. pub struct WeightInfo(PhantomData); impl pallet_scheduler::WeightInfo for WeightInfo { - /// Storage: Scheduler IncompleteSince (r:1 w:1) - /// Proof: Scheduler IncompleteSince (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Scheduler::IncompleteSince` (r:1 w:1) + /// Proof: `Scheduler::IncompleteSince` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn service_agendas_base() -> Weight { // Proof Size summary in bytes: - // Measured: `69` + // Measured: `68` // Estimated: `1489` - // Minimum execution time: 4_741_000 picoseconds. - Weight::from_parts(4_939_000, 0) + // Minimum execution time: 2_869_000 picoseconds. + Weight::from_parts(3_109_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 50]`. fn service_agenda_base(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `116 + s * (177 ±0)` + // Measured: `115 + s * (177 ±0)` // Estimated: `42428` - // Minimum execution time: 4_504_000 picoseconds. - Weight::from_parts(7_569_333, 0) + // Minimum execution time: 3_326_000 picoseconds. + Weight::from_parts(5_818_563, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 1_818 - .saturating_add(Weight::from_parts(771_180, 0).saturating_mul(s.into())) + // Standard Error: 1_261 + .saturating_add(Weight::from_parts(336_446, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -78,36 +79,38 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_709_000 picoseconds. - Weight::from_parts(5_929_000, 0) + // Minimum execution time: 3_007_000 picoseconds. + Weight::from_parts(3_197_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: Preimage PreimageFor (r:1 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: Measured) - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::PreimageFor` (r:1 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `s` is `[128, 4194304]`. fn service_task_fetched(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `251 + s * (1 ±0)` // Estimated: `3716 + s * (1 ±0)` - // Minimum execution time: 20_710_000 picoseconds. - Weight::from_parts(20_918_000, 0) + // Minimum execution time: 16_590_000 picoseconds. + Weight::from_parts(16_869_000, 0) .saturating_add(Weight::from_parts(0, 3716)) // Standard Error: 9 - .saturating_add(Weight::from_parts(1_257, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(Weight::from_parts(1_308, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) } - /// Storage: Scheduler Lookup (r:0 w:1) - /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Scheduler::Lookup` (r:0 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn service_task_named() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_262_000 picoseconds. - Weight::from_parts(7_412_000, 0) + // Minimum execution time: 4_320_000 picoseconds. + Weight::from_parts(4_594_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -115,90 +118,173 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_774_000 picoseconds. - Weight::from_parts(5_887_000, 0) + // Minimum execution time: 2_956_000 picoseconds. + Weight::from_parts(3_216_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn execute_dispatch_signed() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_777_000 picoseconds. - Weight::from_parts(2_865_000, 0) + // Minimum execution time: 1_824_000 picoseconds. + Weight::from_parts(1_929_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn execute_dispatch_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_739_000 picoseconds. - Weight::from_parts(2_827_000, 0) + // Minimum execution time: 1_749_000 picoseconds. + Weight::from_parts(1_916_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 49]`. fn schedule(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `116 + s * (177 ±0)` + // Measured: `115 + s * (177 ±0)` // Estimated: `42428` - // Minimum execution time: 14_788_000 picoseconds. - Weight::from_parts(17_705_748, 0) + // Minimum execution time: 9_086_000 picoseconds. + Weight::from_parts(11_733_696, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 1_703 - .saturating_add(Weight::from_parts(760_991, 0).saturating_mul(s.into())) + // Standard Error: 1_362 + .saturating_add(Weight::from_parts(375_266, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - /// Storage: Scheduler Lookup (r:0 w:1) - /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:0 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 50]`. fn cancel(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `116 + s * (177 ±0)` + // Measured: `115 + s * (177 ±0)` // Estimated: `42428` - // Minimum execution time: 18_716_000 picoseconds. - Weight::from_parts(18_220_022, 0) + // Minimum execution time: 12_716_000 picoseconds. + Weight::from_parts(12_529_180, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 1_508 - .saturating_add(Weight::from_parts(1_357_835, 0).saturating_mul(s.into())) + // Standard Error: 867 + .saturating_add(Weight::from_parts(548_188, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Scheduler Lookup (r:1 w:1) - /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 49]`. fn schedule_named(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `293 + s * (185 ±0)` + // Measured: `292 + s * (185 ±0)` // Estimated: `42428` - // Minimum execution time: 17_719_000 picoseconds. - Weight::from_parts(21_657_806, 0) + // Minimum execution time: 12_053_000 picoseconds. + Weight::from_parts(15_358_056, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 2_645 - .saturating_add(Weight::from_parts(794_184, 0).saturating_mul(s.into())) + // Standard Error: 3_176 + .saturating_add(Weight::from_parts(421_589, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Scheduler Lookup (r:1 w:1) - /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 50]`. fn cancel_named(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `319 + s * (185 ±0)` + // Measured: `318 + s * (185 ±0)` // Estimated: `42428` - // Minimum execution time: 20_225_000 picoseconds. - Weight::from_parts(20_494_405, 0) + // Minimum execution time: 14_803_000 picoseconds. + Weight::from_parts(15_805_714, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 1_890 - .saturating_add(Weight::from_parts(1_379_025, 0).saturating_mul(s.into())) + // Standard Error: 2_597 + .saturating_add(Weight::from_parts(611_053, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Scheduler::Retries` (r:1 w:2) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:0 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 50]`. + fn schedule_retry(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `196` + // Estimated: `42428` + // Minimum execution time: 13_156_000 picoseconds. + Weight::from_parts(13_801_287, 0) + .saturating_add(Weight::from_parts(0, 42428)) + // Standard Error: 568 + .saturating_add(Weight::from_parts(35_441, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 50]`. + fn set_retry() -> Weight { + // Proof Size summary in bytes: + // Measured: `115 + s * (177 ±0)` + // Estimated: `42428` + // Minimum execution time: 7_912_000 picoseconds. + Weight::from_parts(8_081_460, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Lookup` (r:1 w:0) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 50]`. + fn set_retry_named() -> Weight { + // Proof Size summary in bytes: + // Measured: `324 + s * (185 ±0)` + // Estimated: `42428` + // Minimum execution time: 10_673_000 picoseconds. + Weight::from_parts(12_212_185, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 50]`. + fn cancel_retry() -> Weight { + // Proof Size summary in bytes: + // Measured: `115 + s * (177 ±0)` + // Estimated: `42428` + // Minimum execution time: 7_912_000 picoseconds. + Weight::from_parts(8_081_460, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Lookup` (r:1 w:0) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 50]`. + fn cancel_retry_named() -> Weight { + // Proof Size summary in bytes: + // Measured: `324 + s * (185 ±0)` + // Estimated: `42428` + // Minimum execution time: 10_673_000 picoseconds. + Weight::from_parts(12_212_185, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/polkadot/runtime/westend/src/weights/pallet_scheduler.rs b/polkadot/runtime/westend/src/weights/pallet_scheduler.rs index 7291b9809330..beef3796dea6 100644 --- a/polkadot/runtime/westend/src/weights/pallet_scheduler.rs +++ b/polkadot/runtime/westend/src/weights/pallet_scheduler.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_scheduler` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner--ss9ysm1-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-grjcggob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet -// --chain=westend-dev // --steps=50 // --repeat=20 -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --pallet=pallet_scheduler // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/westend/src/weights/ +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_scheduler +// --chain=westend-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,30 +48,30 @@ use core::marker::PhantomData; /// Weight functions for `pallet_scheduler`. pub struct WeightInfo(PhantomData); impl pallet_scheduler::WeightInfo for WeightInfo { - /// Storage: Scheduler IncompleteSince (r:1 w:1) - /// Proof: Scheduler IncompleteSince (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Scheduler::IncompleteSince` (r:1 w:1) + /// Proof: `Scheduler::IncompleteSince` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn service_agendas_base() -> Weight { // Proof Size summary in bytes: // Measured: `69` // Estimated: `1489` - // Minimum execution time: 3_991_000 picoseconds. - Weight::from_parts(4_160_000, 0) + // Minimum execution time: 3_220_000 picoseconds. + Weight::from_parts(3_512_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 50]`. fn service_agenda_base(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `116 + s * (177 ±0)` // Estimated: `42428` - // Minimum execution time: 3_647_000 picoseconds. - Weight::from_parts(6_608_270, 0) + // Minimum execution time: 3_565_000 picoseconds. + Weight::from_parts(6_102_216, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 2_516 - .saturating_add(Weight::from_parts(892_866, 0).saturating_mul(s.into())) + // Standard Error: 1_413 + .saturating_add(Weight::from_parts(339_016, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -81,36 +79,38 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_552_000 picoseconds. - Weight::from_parts(5_836_000, 0) + // Minimum execution time: 2_940_000 picoseconds. + Weight::from_parts(3_070_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: Preimage PreimageFor (r:1 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: Measured) - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::PreimageFor` (r:1 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `s` is `[128, 4194304]`. fn service_task_fetched(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `217 + s * (1 ±0)` // Estimated: `3682 + s * (1 ±0)` - // Minimum execution time: 20_583_000 picoseconds. - Weight::from_parts(20_771_000, 0) + // Minimum execution time: 16_602_000 picoseconds. + Weight::from_parts(16_834_000, 0) .saturating_add(Weight::from_parts(0, 3682)) - // Standard Error: 11 - .saturating_add(Weight::from_parts(2_250, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) + // Standard Error: 10 + .saturating_add(Weight::from_parts(1_307, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) } - /// Storage: Scheduler Lookup (r:0 w:1) - /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Scheduler::Lookup` (r:0 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn service_task_named() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_271_000 picoseconds. - Weight::from_parts(7_447_000, 0) + // Minimum execution time: 4_202_000 picoseconds. + Weight::from_parts(4_383_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -118,90 +118,169 @@ impl pallet_scheduler::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_547_000 picoseconds. - Weight::from_parts(5_776_000, 0) + // Minimum execution time: 2_917_000 picoseconds. + Weight::from_parts(3_043_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn execute_dispatch_signed() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_480_000 picoseconds. - Weight::from_parts(2_628_000, 0) + // Minimum execution time: 1_707_000 picoseconds. + Weight::from_parts(1_802_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn execute_dispatch_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_479_000 picoseconds. - Weight::from_parts(2_626_000, 0) + // Minimum execution time: 1_671_000 picoseconds. + Weight::from_parts(1_796_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 49]`. fn schedule(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `116 + s * (177 ±0)` // Estimated: `42428` - // Minimum execution time: 13_350_000 picoseconds. - Weight::from_parts(15_289_847, 0) + // Minimum execution time: 9_313_000 picoseconds. + Weight::from_parts(12_146_613, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 5_375 - .saturating_add(Weight::from_parts(974_567, 0).saturating_mul(s.into())) + // Standard Error: 1_381 + .saturating_add(Weight::from_parts(360_418, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) - /// Storage: Scheduler Lookup (r:0 w:1) - /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:0 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 50]`. fn cancel(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `116 + s * (177 ±0)` // Estimated: `42428` - // Minimum execution time: 17_646_000 picoseconds. - Weight::from_parts(15_858_434, 0) + // Minimum execution time: 13_079_000 picoseconds. + Weight::from_parts(12_921_017, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 5_354 - .saturating_add(Weight::from_parts(1_697_642, 0).saturating_mul(s.into())) + // Standard Error: 1_112 + .saturating_add(Weight::from_parts(538_089, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Scheduler Lookup (r:1 w:1) - /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 49]`. fn schedule_named(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `293 + s * (185 ±0)` // Estimated: `42428` - // Minimum execution time: 16_419_000 picoseconds. - Weight::from_parts(19_868_760, 0) + // Minimum execution time: 12_458_000 picoseconds. + Weight::from_parts(16_009_539, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 6_915 - .saturating_add(Weight::from_parts(1_010_225, 0).saturating_mul(s.into())) + // Standard Error: 2_260 + .saturating_add(Weight::from_parts(399_245, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: Scheduler Lookup (r:1 w:1) - /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(38963), added: 41438, mode: MaxEncodedLen) + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 50]`. fn cancel_named(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `319 + s * (185 ±0)` // Estimated: `42428` - // Minimum execution time: 19_574_000 picoseconds. - Weight::from_parts(18_453_197, 0) + // Minimum execution time: 15_173_000 picoseconds. + Weight::from_parts(15_602_728, 0) .saturating_add(Weight::from_parts(0, 42428)) - // Standard Error: 6_009 - .saturating_add(Weight::from_parts(1_707_130, 0).saturating_mul(s.into())) + // Standard Error: 1_302 + .saturating_add(Weight::from_parts(557_878, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: `Scheduler::Retries` (r:1 w:2) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:0 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 50]`. + fn schedule_retry(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `197` + // Estimated: `42428` + // Minimum execution time: 13_531_000 picoseconds. + Weight::from_parts(13_985_249, 0) + .saturating_add(Weight::from_parts(0, 42428)) + // Standard Error: 619 + .saturating_add(Weight::from_parts(39_068, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn set_retry() -> Weight { + // Proof Size summary in bytes: + // Measured: `116 + s * (177 ±0)` + // Estimated: `42428` + // Minimum execution time: 8_050_000 picoseconds. + Weight::from_parts(8_440_627, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Lookup` (r:1 w:0) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn set_retry_named() -> Weight { + // Proof Size summary in bytes: + // Measured: `325 + s * (185 ±0)` + // Estimated: `42428` + // Minimum execution time: 10_876_000 picoseconds. + Weight::from_parts(11_708_172, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn cancel_retry() -> Weight { + // Proof Size summary in bytes: + // Measured: `116 + s * (177 ±0)` + // Estimated: `42428` + // Minimum execution time: 8_050_000 picoseconds. + Weight::from_parts(8_440_627, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Lookup` (r:1 w:0) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(38963), added: 41438, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn cancel_retry_named() -> Weight { + // Proof Size summary in bytes: + // Measured: `325 + s * (185 ±0)` + // Estimated: `42428` + // Minimum execution time: 10_876_000 picoseconds. + Weight::from_parts(11_708_172, 0) + .saturating_add(Weight::from_parts(0, 42428)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/prdoc/pr_3060.prdoc b/prdoc/pr_3060.prdoc new file mode 100644 index 000000000000..4cd6674ebb2e --- /dev/null +++ b/prdoc/pr_3060.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Add retry mechanics to `pallet-scheduler` + +doc: + - audience: Runtime Dev + description: | + This PR adds retry mechanics to pallet-scheduler, as described in the issue above. + Users can now set a retry configuration for a task so that, in case its scheduled run fails, it will be retried after a number of blocks, for a specified number of times or until it succeeds. + If a retried task runs successfully before running out of retries, its remaining retry counter will be reset to the initial value. If a retried task runs out of retries, it will be removed from the schedule. + Tasks which need to be scheduled for a retry are still subject to weight metering and agenda space, same as a regular task. Periodic tasks will have their periodic schedule put on hold while the task is retrying. + +crates: + - name: pallet-scheduler diff --git a/substrate/frame/scheduler/src/benchmarking.rs b/substrate/frame/scheduler/src/benchmarking.rs index cc86a1797378..18441d54b39a 100644 --- a/substrate/frame/scheduler/src/benchmarking.rs +++ b/substrate/frame/scheduler/src/benchmarking.rs @@ -22,12 +22,13 @@ use frame_benchmarking::v1::{account, benchmarks, BenchmarkError}; use frame_support::{ ensure, traits::{schedule::Priority, BoundedInline}, + weights::WeightMeter, }; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use sp_std::{prelude::*, vec}; use crate::Pallet as Scheduler; -use frame_system::Call as SystemCall; +use frame_system::{Call as SystemCall, EventRecord}; const SEED: u32 = 0; @@ -35,6 +36,14 @@ const BLOCK_NUMBER: u32 = 2; type SystemOrigin = ::RuntimeOrigin; +fn assert_last_event(generic_event: ::RuntimeEvent) { + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = generic_event.into(); + // compare to the last event record + let EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); +} + /// Add `n` items to the schedule. /// /// For `resolved`: @@ -306,5 +315,105 @@ benchmarks! { ); } + schedule_retry { + let s in 1 .. T::MaxScheduledPerBlock::get(); + let when = BLOCK_NUMBER.into(); + + fill_schedule::(when, s)?; + let name = u32_to_name(s - 1); + let address = Lookup::::get(name).unwrap(); + let period: BlockNumberFor = 1u32.into(); + let root: ::PalletsOrigin = frame_system::RawOrigin::Root.into(); + let retry_config = RetryConfig { total_retries: 10, remaining: 10, period }; + Retries::::insert(address, retry_config); + let (mut when, index) = address; + let task = Agenda::::get(when)[index as usize].clone().unwrap(); + let mut weight_counter = WeightMeter::with_limit(T::MaximumWeight::get()); + }: { + Scheduler::::schedule_retry(&mut weight_counter, when, when, index, &task, retry_config); + } verify { + when = when + BlockNumberFor::::one(); + assert_eq!( + Retries::::get((when, 0)), + Some(RetryConfig { total_retries: 10, remaining: 9, period }) + ); + } + + set_retry { + let s = T::MaxScheduledPerBlock::get(); + let when = BLOCK_NUMBER.into(); + + fill_schedule::(when, s)?; + let name = u32_to_name(s - 1); + let address = Lookup::::get(name).unwrap(); + let (when, index) = address; + let period = BlockNumberFor::::one(); + }: _(RawOrigin::Root, (when, index), 10, period) + verify { + assert_eq!( + Retries::::get((when, index)), + Some(RetryConfig { total_retries: 10, remaining: 10, period }) + ); + assert_last_event::( + Event::RetrySet { task: address, id: None, period, retries: 10 }.into(), + ); + } + + set_retry_named { + let s = T::MaxScheduledPerBlock::get(); + let when = BLOCK_NUMBER.into(); + + fill_schedule::(when, s)?; + let name = u32_to_name(s - 1); + let address = Lookup::::get(name).unwrap(); + let (when, index) = address; + let period = BlockNumberFor::::one(); + }: _(RawOrigin::Root, name, 10, period) + verify { + assert_eq!( + Retries::::get((when, index)), + Some(RetryConfig { total_retries: 10, remaining: 10, period }) + ); + assert_last_event::( + Event::RetrySet { task: address, id: Some(name), period, retries: 10 }.into(), + ); + } + + cancel_retry { + let s = T::MaxScheduledPerBlock::get(); + let when = BLOCK_NUMBER.into(); + + fill_schedule::(when, s)?; + let name = u32_to_name(s - 1); + let address = Lookup::::get(name).unwrap(); + let (when, index) = address; + let period = BlockNumberFor::::one(); + assert!(Scheduler::::set_retry(RawOrigin::Root.into(), (when, index), 10, period).is_ok()); + }: _(RawOrigin::Root, (when, index)) + verify { + assert!(!Retries::::contains_key((when, index))); + assert_last_event::( + Event::RetryCancelled { task: address, id: None }.into(), + ); + } + + cancel_retry_named { + let s = T::MaxScheduledPerBlock::get(); + let when = BLOCK_NUMBER.into(); + + fill_schedule::(when, s)?; + let name = u32_to_name(s - 1); + let address = Lookup::::get(name).unwrap(); + let (when, index) = address; + let period = BlockNumberFor::::one(); + assert!(Scheduler::::set_retry_named(RawOrigin::Root.into(), name, 10, period).is_ok()); + }: _(RawOrigin::Root, name) + verify { + assert!(!Retries::::contains_key((when, index))); + assert_last_event::( + Event::RetryCancelled { task: address, id: Some(name) }.into(), + ); + } + impl_benchmark_test_suite!(Scheduler, crate::mock::new_test_ext(), crate::mock::Test); } diff --git a/substrate/frame/scheduler/src/lib.rs b/substrate/frame/scheduler/src/lib.rs index e94f154eee32..daebebdee995 100644 --- a/substrate/frame/scheduler/src/lib.rs +++ b/substrate/frame/scheduler/src/lib.rs @@ -122,6 +122,17 @@ pub type CallOrHashOf = pub type BoundedCallOf = Bounded<::RuntimeCall, ::Hashing>; +/// The configuration of the retry mechanism for a given task along with its current state. +#[derive(Clone, Copy, RuntimeDebug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub struct RetryConfig { + /// Initial amount of retries allowed. + total_retries: u8, + /// Amount of retries left. + remaining: u8, + /// Period of time between retry attempts. + period: Period, +} + #[cfg_attr(any(feature = "std", test), derive(PartialEq, Eq))] #[derive(Clone, RuntimeDebug, Encode, Decode)] struct ScheduledV1 { @@ -148,6 +159,26 @@ pub struct Scheduled { _phantom: PhantomData, } +impl + Scheduled +where + Call: Clone, + PalletsOrigin: Clone, +{ + /// Create a new task to be used for retry attempts of the original one. The cloned task will + /// have the same `priority`, `call` and `origin`, but will always be non-periodic and unnamed. + pub fn as_retry(&self) -> Self { + Self { + maybe_id: None, + priority: self.priority, + call: self.call.clone(), + maybe_periodic: None, + origin: self.origin.clone(), + _phantom: Default::default(), + } + } +} + use crate::{Scheduled as ScheduledV3, Scheduled as ScheduledV2}; pub type ScheduledV2Of = ScheduledV2< @@ -273,6 +304,16 @@ pub mod pallet { ValueQuery, >; + /// Retry configurations for items to be executed, indexed by task address. + #[pallet::storage] + pub type Retries = StorageMap< + _, + Blake2_128Concat, + TaskAddress>, + RetryConfig>, + OptionQuery, + >; + /// Lookup from a name to the block number and index of the task. /// /// For v3 -> v4 the previously unbounded identities are Blake2-256 hashed to form the v4 @@ -295,10 +336,22 @@ pub mod pallet { id: Option, result: DispatchResult, }, + /// Set a retry configuration for some task. + RetrySet { + task: TaskAddress>, + id: Option, + period: BlockNumberFor, + retries: u8, + }, + /// Cancel a retry configuration for some task. + RetryCancelled { task: TaskAddress>, id: Option }, /// The call for the provided hash was not found so the task has been aborted. CallUnavailable { task: TaskAddress>, id: Option }, /// The given task was unable to be renewed since the agenda is full at that block. PeriodicFailed { task: TaskAddress>, id: Option }, + /// The given task was unable to be retried since the agenda is full at that block or there + /// was not enough weight to reschedule it. + RetryFailed { task: TaskAddress>, id: Option }, /// The given task can never be executed since it is overweight. PermanentlyOverweight { task: TaskAddress>, id: Option }, } @@ -440,6 +493,111 @@ pub mod pallet { )?; Ok(()) } + + /// Set a retry configuration for a task so that, in case its scheduled run fails, it will + /// be retried after `period` blocks, for a total amount of `retries` retries or until it + /// succeeds. + /// + /// Tasks which need to be scheduled for a retry are still subject to weight metering and + /// agenda space, same as a regular task. If a periodic task fails, it will be scheduled + /// normally while the task is retrying. + /// + /// Tasks scheduled as a result of a retry for a periodic task are unnamed, non-periodic + /// clones of the original task. Their retry configuration will be derived from the + /// original task's configuration, but will have a lower value for `remaining` than the + /// original `total_retries`. + #[pallet::call_index(6)] + #[pallet::weight(::WeightInfo::set_retry())] + pub fn set_retry( + origin: OriginFor, + task: TaskAddress>, + retries: u8, + period: BlockNumberFor, + ) -> DispatchResult { + T::ScheduleOrigin::ensure_origin(origin.clone())?; + let origin = ::RuntimeOrigin::from(origin); + let (when, index) = task; + let agenda = Agenda::::get(when); + let scheduled = agenda + .get(index as usize) + .and_then(Option::as_ref) + .ok_or(Error::::NotFound)?; + Self::ensure_privilege(origin.caller(), &scheduled.origin)?; + Retries::::insert( + (when, index), + RetryConfig { total_retries: retries, remaining: retries, period }, + ); + Self::deposit_event(Event::RetrySet { task, id: None, period, retries }); + Ok(()) + } + + /// Set a retry configuration for a named task so that, in case its scheduled run fails, it + /// will be retried after `period` blocks, for a total amount of `retries` retries or until + /// it succeeds. + /// + /// Tasks which need to be scheduled for a retry are still subject to weight metering and + /// agenda space, same as a regular task. If a periodic task fails, it will be scheduled + /// normally while the task is retrying. + /// + /// Tasks scheduled as a result of a retry for a periodic task are unnamed, non-periodic + /// clones of the original task. Their retry configuration will be derived from the + /// original task's configuration, but will have a lower value for `remaining` than the + /// original `total_retries`. + #[pallet::call_index(7)] + #[pallet::weight(::WeightInfo::set_retry_named())] + pub fn set_retry_named( + origin: OriginFor, + id: TaskName, + retries: u8, + period: BlockNumberFor, + ) -> DispatchResult { + T::ScheduleOrigin::ensure_origin(origin.clone())?; + let origin = ::RuntimeOrigin::from(origin); + let (when, agenda_index) = Lookup::::get(&id).ok_or(Error::::NotFound)?; + let agenda = Agenda::::get(when); + let scheduled = agenda + .get(agenda_index as usize) + .and_then(Option::as_ref) + .ok_or(Error::::NotFound)?; + Self::ensure_privilege(origin.caller(), &scheduled.origin)?; + Retries::::insert( + (when, agenda_index), + RetryConfig { total_retries: retries, remaining: retries, period }, + ); + Self::deposit_event(Event::RetrySet { + task: (when, agenda_index), + id: Some(id), + period, + retries, + }); + Ok(()) + } + + /// Removes the retry configuration of a task. + #[pallet::call_index(8)] + #[pallet::weight(::WeightInfo::cancel_retry())] + pub fn cancel_retry( + origin: OriginFor, + task: TaskAddress>, + ) -> DispatchResult { + T::ScheduleOrigin::ensure_origin(origin.clone())?; + let origin = ::RuntimeOrigin::from(origin); + Self::do_cancel_retry(origin.caller(), task)?; + Self::deposit_event(Event::RetryCancelled { task, id: None }); + Ok(()) + } + + /// Cancel the retry configuration of a named task. + #[pallet::call_index(9)] + #[pallet::weight(::WeightInfo::cancel_retry_named())] + pub fn cancel_retry_named(origin: OriginFor, id: TaskName) -> DispatchResult { + T::ScheduleOrigin::ensure_origin(origin.clone())?; + let origin = ::RuntimeOrigin::from(origin); + let task = Lookup::::get(&id).ok_or(Error::::NotFound)?; + Self::do_cancel_retry(origin.caller(), task)?; + Self::deposit_event(Event::RetryCancelled { task, id: Some(id) }); + Ok(()) + } } } @@ -838,12 +996,7 @@ impl Pallet { Ok(None), |s| -> Result>, DispatchError> { if let (Some(ref o), Some(ref s)) = (origin, s.borrow()) { - if matches!( - T::OriginPrivilegeCmp::cmp_privilege(o, &s.origin), - Some(Ordering::Less) | None - ) { - return Err(BadOrigin.into()) - } + Self::ensure_privilege(o, &s.origin)?; }; Ok(s.take()) }, @@ -854,6 +1007,7 @@ impl Pallet { if let Some(id) = s.maybe_id { Lookup::::remove(id); } + Retries::::remove((when, index)); Self::cleanup_agenda(when); Self::deposit_event(Event::Canceled { when, index }); Ok(()) @@ -931,12 +1085,8 @@ impl Pallet { Agenda::::try_mutate(when, |agenda| -> DispatchResult { if let Some(s) = agenda.get_mut(i) { if let (Some(ref o), Some(ref s)) = (origin, s.borrow()) { - if matches!( - T::OriginPrivilegeCmp::cmp_privilege(o, &s.origin), - Some(Ordering::Less) | None - ) { - return Err(BadOrigin.into()) - } + Self::ensure_privilege(o, &s.origin)?; + Retries::::remove((when, index)); T::Preimages::drop(&s.call); } *s = None; @@ -973,6 +1123,20 @@ impl Pallet { Self::deposit_event(Event::Canceled { when, index }); Self::place_task(new_time, task).map_err(|x| x.0) } + + fn do_cancel_retry( + origin: &T::PalletsOrigin, + (when, index): TaskAddress>, + ) -> Result<(), DispatchError> { + let agenda = Agenda::::get(when); + let scheduled = agenda + .get(index as usize) + .and_then(Option::as_ref) + .ok_or(Error::::NotFound)?; + Self::ensure_privilege(origin, &scheduled.origin)?; + Retries::::remove((when, index)); + Ok(()) + } } enum ServiceTaskError { @@ -1124,11 +1288,21 @@ impl Pallet { }, Err(()) => Err((Overweight, Some(task))), Ok(result) => { + let failed = result.is_err(); + let maybe_retry_config = Retries::::take((when, agenda_index)); Self::deposit_event(Event::Dispatched { task: (when, agenda_index), id: task.maybe_id, result, }); + + match maybe_retry_config { + Some(retry_config) if failed => { + Self::schedule_retry(weight, now, when, agenda_index, &task, retry_config); + }, + _ => {}, + } + if let &Some((period, count)) = &task.maybe_periodic { if count > 1 { task.maybe_periodic = Some((period, count - 1)); @@ -1137,7 +1311,10 @@ impl Pallet { } let wake = now.saturating_add(period); match Self::place_task(wake, task) { - Ok(_) => {}, + Ok(new_address) => + if let Some(retry_config) = maybe_retry_config { + Retries::::insert(new_address, retry_config); + }, Err((_, task)) => { // TODO: Leave task in storage somewhere for it to be rescheduled // manually. @@ -1192,6 +1369,70 @@ impl Pallet { let _ = weight.try_consume(call_weight); Ok(result) } + + /// Check if a task has a retry configuration in place and, if so, try to reschedule it. + /// + /// Possible causes for failure to schedule a retry for a task: + /// - there wasn't enough weight to run the task reschedule logic + /// - there was no retry configuration in place + /// - there were no more retry attempts left + /// - the agenda was full. + fn schedule_retry( + weight: &mut WeightMeter, + now: BlockNumberFor, + when: BlockNumberFor, + agenda_index: u32, + task: &ScheduledOf, + retry_config: RetryConfig>, + ) { + if weight + .try_consume(T::WeightInfo::schedule_retry(T::MaxScheduledPerBlock::get())) + .is_err() + { + Self::deposit_event(Event::RetryFailed { + task: (when, agenda_index), + id: task.maybe_id, + }); + return; + } + + let RetryConfig { total_retries, mut remaining, period } = retry_config; + remaining = match remaining.checked_sub(1) { + Some(n) => n, + None => return, + }; + let wake = now.saturating_add(period); + match Self::place_task(wake, task.as_retry()) { + Ok(address) => { + // Reinsert the retry config to the new address of the task after it was + // placed. + Retries::::insert(address, RetryConfig { total_retries, remaining, period }); + }, + Err((_, task)) => { + // TODO: Leave task in storage somewhere for it to be + // rescheduled manually. + T::Preimages::drop(&task.call); + Self::deposit_event(Event::RetryFailed { + task: (when, agenda_index), + id: task.maybe_id, + }); + }, + } + } + + /// Ensure that `left` has at least the same level of privilege or higher than `right`. + /// + /// Returns an error if `left` has a lower level of privilege or the two cannot be compared. + fn ensure_privilege( + left: &::PalletsOrigin, + right: &::PalletsOrigin, + ) -> Result<(), DispatchError> { + if matches!(T::OriginPrivilegeCmp::cmp_privilege(left, right), Some(Ordering::Less) | None) + { + return Err(BadOrigin.into()); + } + Ok(()) + } } impl schedule::v2::Anon, ::RuntimeCall, T::PalletsOrigin> diff --git a/substrate/frame/scheduler/src/mock.rs b/substrate/frame/scheduler/src/mock.rs index d22b9fcf8d99..dbdc430a8331 100644 --- a/substrate/frame/scheduler/src/mock.rs +++ b/substrate/frame/scheduler/src/mock.rs @@ -51,6 +51,17 @@ pub mod logger { #[pallet::pallet] pub struct Pallet(_); + #[pallet::storage] + pub type Threshold = StorageValue<_, (BlockNumberFor, BlockNumberFor)>; + + #[pallet::error] + pub enum Error { + /// Under the threshold. + TooEarly, + /// Over the threshold. + TooLate, + } + #[pallet::hooks] impl Hooks> for Pallet {} @@ -89,6 +100,20 @@ pub mod logger { }); Ok(()) } + + #[pallet::call_index(2)] + #[pallet::weight(*weight)] + pub fn timed_log(origin: OriginFor, i: u32, weight: Weight) -> DispatchResult { + let now = frame_system::Pallet::::block_number(); + let (start, end) = Threshold::::get().unwrap_or((0u32.into(), u32::MAX.into())); + ensure!(now >= start, Error::::TooEarly); + ensure!(now <= end, Error::::TooLate); + Self::deposit_event(Event::Logged(i, weight)); + Log::mutate(|log| { + log.push((origin.caller().clone(), i)); + }); + Ok(()) + } } } @@ -198,6 +223,21 @@ impl WeightInfo for TestWeightInfo { fn cancel_named(_s: u32) -> Weight { Weight::from_parts(50, 0) } + fn schedule_retry(_s: u32) -> Weight { + Weight::from_parts(100000, 0) + } + fn set_retry() -> Weight { + Weight::from_parts(50, 0) + } + fn set_retry_named() -> Weight { + Weight::from_parts(50, 0) + } + fn cancel_retry() -> Weight { + Weight::from_parts(50, 0) + } + fn cancel_retry_named() -> Weight { + Weight::from_parts(50, 0) + } } parameter_types! { pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * diff --git a/substrate/frame/scheduler/src/tests.rs b/substrate/frame/scheduler/src/tests.rs index 1bf8b3e5f3a0..1ed2ca9e2f36 100644 --- a/substrate/frame/scheduler/src/tests.rs +++ b/substrate/frame/scheduler/src/tests.rs @@ -19,7 +19,8 @@ use super::*; use crate::mock::{ - logger, new_test_ext, root, run_to_block, LoggerCall, RuntimeCall, Scheduler, Test, *, + logger::{self, Threshold}, + new_test_ext, root, run_to_block, LoggerCall, RuntimeCall, Scheduler, Test, *, }; use frame_support::{ assert_err, assert_noop, assert_ok, @@ -179,6 +180,865 @@ fn periodic_scheduling_works() { }); } +#[test] +fn retry_scheduling_works() { + new_test_ext().execute_with(|| { + // task fails until block 8 is reached + Threshold::::put((8, 100)); + // task 42 at #4 + assert_ok!(Scheduler::do_schedule( + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(RuntimeCall::Logger(logger::Call::timed_log { + i: 42, + weight: Weight::from_parts(10, 0) + })) + .unwrap() + )); + assert!(Agenda::::get(4)[0].is_some()); + // retry 10 times every 3 blocks + assert_ok!(Scheduler::set_retry(root().into(), (4, 0), 10, 3)); + assert_eq!(Retries::::iter().count(), 1); + run_to_block(3); + assert!(logger::log().is_empty()); + assert!(Agenda::::get(4)[0].is_some()); + // task should be retried in block 7 + run_to_block(4); + assert!(Agenda::::get(4).is_empty()); + assert!(Agenda::::get(7)[0].is_some()); + assert!(logger::log().is_empty()); + run_to_block(6); + assert!(Agenda::::get(7)[0].is_some()); + assert!(logger::log().is_empty()); + // task still fails, should be retried in block 10 + run_to_block(7); + assert!(Agenda::::get(7).is_empty()); + assert!(Agenda::::get(10)[0].is_some()); + assert!(logger::log().is_empty()); + run_to_block(8); + assert!(Agenda::::get(10)[0].is_some()); + assert!(logger::log().is_empty()); + run_to_block(9); + assert!(logger::log().is_empty()); + assert_eq!(Retries::::iter().count(), 1); + // finally it should succeed + run_to_block(10); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + assert_eq!(Retries::::iter().count(), 0); + run_to_block(11); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + run_to_block(12); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + run_to_block(100); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + }); +} + +#[test] +fn named_retry_scheduling_works() { + new_test_ext().execute_with(|| { + // task fails until block 8 is reached + Threshold::::put((8, 100)); + // task 42 at #4 + let call = RuntimeCall::Logger(logger::Call::timed_log { + i: 42, + weight: Weight::from_parts(10, 0), + }); + assert_eq!( + Scheduler::do_schedule_named( + [1u8; 32], + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(call).unwrap(), + ) + .unwrap(), + (4, 0) + ); + assert!(Agenda::::get(4)[0].is_some()); + // retry 10 times every 3 blocks + assert_ok!(Scheduler::set_retry_named(root().into(), [1u8; 32], 10, 3)); + assert_eq!(Retries::::iter().count(), 1); + run_to_block(3); + assert!(logger::log().is_empty()); + assert!(Agenda::::get(4)[0].is_some()); + // task should be retried in block 7 + run_to_block(4); + assert!(Agenda::::get(4).is_empty()); + assert!(Agenda::::get(7)[0].is_some()); + assert!(logger::log().is_empty()); + run_to_block(6); + assert!(Agenda::::get(7)[0].is_some()); + assert!(logger::log().is_empty()); + // task still fails, should be retried in block 10 + run_to_block(7); + assert!(Agenda::::get(7).is_empty()); + assert!(Agenda::::get(10)[0].is_some()); + assert!(logger::log().is_empty()); + run_to_block(8); + assert!(Agenda::::get(10)[0].is_some()); + assert!(logger::log().is_empty()); + run_to_block(9); + assert!(logger::log().is_empty()); + assert_eq!(Retries::::iter().count(), 1); + // finally it should succeed + run_to_block(10); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + assert_eq!(Retries::::iter().count(), 0); + run_to_block(11); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + run_to_block(12); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + run_to_block(100); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + }); +} + +#[test] +fn retry_scheduling_multiple_tasks_works() { + new_test_ext().execute_with(|| { + // task fails until block 8 is reached + Threshold::::put((8, 100)); + // task 20 at #4 + assert_ok!(Scheduler::do_schedule( + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(RuntimeCall::Logger(logger::Call::timed_log { + i: 20, + weight: Weight::from_parts(10, 0) + })) + .unwrap() + )); + // task 42 at #4 + assert_ok!(Scheduler::do_schedule( + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(RuntimeCall::Logger(logger::Call::timed_log { + i: 42, + weight: Weight::from_parts(10, 0) + })) + .unwrap() + )); + + assert_eq!(Agenda::::get(4).len(), 2); + // task 20 will be retried 3 times every block + assert_ok!(Scheduler::set_retry(root().into(), (4, 0), 3, 1)); + // task 42 will be retried 10 times every 3 blocks + assert_ok!(Scheduler::set_retry(root().into(), (4, 1), 10, 3)); + assert_eq!(Retries::::iter().count(), 2); + run_to_block(3); + assert!(logger::log().is_empty()); + assert_eq!(Agenda::::get(4).len(), 2); + // both tasks fail + run_to_block(4); + assert!(Agenda::::get(4).is_empty()); + // 20 is rescheduled for next block + assert_eq!(Agenda::::get(5).len(), 1); + // 42 is rescheduled for block 7 + assert_eq!(Agenda::::get(7).len(), 1); + assert!(logger::log().is_empty()); + // 20 still fails + run_to_block(5); + // 20 rescheduled for next block + assert_eq!(Agenda::::get(6).len(), 1); + assert_eq!(Agenda::::get(7).len(), 1); + assert_eq!(Retries::::iter().count(), 2); + assert!(logger::log().is_empty()); + // 20 still fails + run_to_block(6); + // rescheduled for next block together with 42 + assert_eq!(Agenda::::get(7).len(), 2); + assert_eq!(Retries::::iter().count(), 2); + assert!(logger::log().is_empty()); + // both tasks will fail, for 20 it was the last retry so it's dropped + run_to_block(7); + assert!(Agenda::::get(7).is_empty()); + assert!(Agenda::::get(8).is_empty()); + // 42 is rescheduled for block 10 + assert_eq!(Agenda::::get(10).len(), 1); + assert_eq!(Retries::::iter().count(), 1); + assert!(logger::log().is_empty()); + run_to_block(8); + assert_eq!(Agenda::::get(10).len(), 1); + assert!(logger::log().is_empty()); + run_to_block(9); + assert!(logger::log().is_empty()); + assert_eq!(Retries::::iter().count(), 1); + // 42 runs successfully + run_to_block(10); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + assert_eq!(Retries::::iter().count(), 0); + run_to_block(11); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + run_to_block(12); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + run_to_block(100); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + }); +} + +#[test] +fn retry_scheduling_multiple_named_tasks_works() { + new_test_ext().execute_with(|| { + // task fails until we reach block 8 + Threshold::::put((8, 100)); + // task 20 at #4 + assert_ok!(Scheduler::do_schedule_named( + [20u8; 32], + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(RuntimeCall::Logger(logger::Call::timed_log { + i: 20, + weight: Weight::from_parts(10, 0) + })) + .unwrap() + )); + // task 42 at #4 + assert_ok!(Scheduler::do_schedule_named( + [42u8; 32], + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(RuntimeCall::Logger(logger::Call::timed_log { + i: 42, + weight: Weight::from_parts(10, 0) + })) + .unwrap() + )); + + assert_eq!(Agenda::::get(4).len(), 2); + // task 20 will be retried 3 times every block + assert_ok!(Scheduler::set_retry_named(root().into(), [20u8; 32], 3, 1)); + // task 42 will be retried 10 times every 3 block + assert_ok!(Scheduler::set_retry_named(root().into(), [42u8; 32], 10, 3)); + assert_eq!(Retries::::iter().count(), 2); + run_to_block(3); + assert!(logger::log().is_empty()); + assert_eq!(Agenda::::get(4).len(), 2); + // both tasks fail + run_to_block(4); + assert!(Agenda::::get(4).is_empty()); + // 42 is rescheduled for block 7 + assert_eq!(Agenda::::get(7).len(), 1); + // 20 is rescheduled for next block + assert_eq!(Agenda::::get(5).len(), 1); + assert!(logger::log().is_empty()); + // 20 still fails + run_to_block(5); + // 20 rescheduled for next block + assert_eq!(Agenda::::get(6).len(), 1); + assert_eq!(Agenda::::get(7).len(), 1); + assert_eq!(Retries::::iter().count(), 2); + assert!(logger::log().is_empty()); + // 20 still fails + run_to_block(6); + // 20 rescheduled for next block together with 42 + assert_eq!(Agenda::::get(7).len(), 2); + assert_eq!(Retries::::iter().count(), 2); + assert!(logger::log().is_empty()); + // both tasks will fail, for 20 it was the last retry so it's dropped + run_to_block(7); + assert!(Agenda::::get(7).is_empty()); + assert!(Agenda::::get(8).is_empty()); + // 42 is rescheduled for block 10 + assert_eq!(Agenda::::get(10).len(), 1); + assert_eq!(Retries::::iter().count(), 1); + assert!(logger::log().is_empty()); + run_to_block(8); + assert_eq!(Agenda::::get(10).len(), 1); + assert!(logger::log().is_empty()); + run_to_block(9); + assert!(logger::log().is_empty()); + assert_eq!(Retries::::iter().count(), 1); + // 42 runs successfully + run_to_block(10); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + assert_eq!(Retries::::iter().count(), 0); + run_to_block(11); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + run_to_block(12); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + run_to_block(100); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + }); +} + +#[test] +fn retry_scheduling_with_period_works() { + new_test_ext().execute_with(|| { + // tasks fail until we reach block 4 and after we're past block 8 + Threshold::::put((4, 8)); + // task 42 at #4, every 3 blocks, 6 times + assert_ok!(Scheduler::do_schedule( + DispatchTime::At(4), + Some((3, 6)), + 127, + root(), + Preimage::bound(RuntimeCall::Logger(logger::Call::timed_log { + i: 42, + weight: Weight::from_parts(10, 0) + })) + .unwrap() + )); + + assert!(Agenda::::get(4)[0].is_some()); + // 42 will be retried 10 times every 2 blocks + assert_ok!(Scheduler::set_retry(root().into(), (4, 0), 10, 2)); + assert_eq!(Retries::::iter().count(), 1); + run_to_block(3); + assert!(logger::log().is_empty()); + assert!(Agenda::::get(4)[0].is_some()); + // 42 runs successfully once, it will run again at block 7 + run_to_block(4); + assert!(Agenda::::get(4).is_empty()); + assert!(Agenda::::get(7)[0].is_some()); + assert_eq!(Retries::::iter().count(), 1); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + // nothing changed + run_to_block(6); + assert!(Agenda::::get(7)[0].is_some()); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + // 42 runs successfully again, it will run again at block 10 + run_to_block(7); + assert!(Agenda::::get(7).is_empty()); + assert!(Agenda::::get(10)[0].is_some()); + assert_eq!(Retries::::iter().count(), 1); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); + run_to_block(9); + assert!(Agenda::::get(10)[0].is_some()); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); + // 42 has 10 retries left out of a total of 10 + assert_eq!(Retries::::get((10, 0)).unwrap().remaining, 10); + // 42 will fail because we're outside the set threshold (block number in `4..8`), so it + // should be retried in 2 blocks (at block 12) + run_to_block(10); + // should be queued for the normal period of 3 blocks + assert!(Agenda::::get(13)[0].is_some()); + // should also be queued to be retried in 2 blocks + assert!(Agenda::::get(12)[0].is_some()); + // 42 has consumed one retry attempt + assert_eq!(Retries::::get((12, 0)).unwrap().remaining, 9); + assert_eq!(Retries::::get((13, 0)).unwrap().remaining, 10); + assert_eq!(Retries::::iter().count(), 2); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); + // 42 will fail again + run_to_block(12); + // should still be queued for the normal period + assert!(Agenda::::get(13)[0].is_some()); + // should be queued to be retried in 2 blocks + assert!(Agenda::::get(14)[0].is_some()); + // 42 has consumed another retry attempt + assert_eq!(Retries::::get((14, 0)).unwrap().remaining, 8); + assert_eq!(Retries::::get((13, 0)).unwrap().remaining, 10); + assert_eq!(Retries::::iter().count(), 2); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); + // 42 will fail for the regular periodic run + run_to_block(13); + // should still be queued for the normal period + assert!(Agenda::::get(16)[0].is_some()); + // should still be queued to be retried next block + assert!(Agenda::::get(14)[0].is_some()); + // 42 consumed another periodic run, which failed, so another retry is queued for block 15 + assert!(Agenda::::get(16)[0].as_ref().unwrap().maybe_periodic.is_some()); + assert!(Agenda::::get(15)[0].as_ref().unwrap().maybe_periodic.is_none()); + assert!(Agenda::::get(14)[0].as_ref().unwrap().maybe_periodic.is_none()); + assert_eq!(Retries::::iter().count(), 3); + assert!(Retries::::get((14, 0)).unwrap().remaining == 8); + assert!(Retries::::get((15, 0)).unwrap().remaining == 9); + assert!(Retries::::get((16, 0)).unwrap().remaining == 10); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); + // change the threshold to allow the task to succeed + Threshold::::put((14, 100)); + // first retry should now succeed + run_to_block(14); + assert!(Agenda::::get(15)[0].as_ref().unwrap().maybe_periodic.is_none()); + assert_eq!(Agenda::::get(16).iter().filter(|entry| entry.is_some()).count(), 1); + assert!(Agenda::::get(16)[0].is_some()); + assert_eq!(Retries::::get((15, 0)).unwrap().remaining, 9); + assert_eq!(Retries::::get((16, 0)).unwrap().remaining, 10); + assert_eq!(Retries::::iter().count(), 2); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32), (root(), 42u32)]); + // second retry should also succeed + run_to_block(15); + assert_eq!(Agenda::::get(16).iter().filter(|entry| entry.is_some()).count(), 1); + assert!(Agenda::::get(16)[0].is_some()); + assert!(Agenda::::get(17).is_empty()); + assert_eq!(Retries::::get((16, 0)).unwrap().remaining, 10); + assert_eq!(Retries::::iter().count(), 1); + assert_eq!( + logger::log(), + vec![(root(), 42u32), (root(), 42u32), (root(), 42u32), (root(), 42u32)] + ); + // normal periodic run on block 16 will succeed + run_to_block(16); + // next periodic run at block 19 + assert!(Agenda::::get(19)[0].is_some()); + assert!(Agenda::::get(18).is_empty()); + assert!(Agenda::::get(17).is_empty()); + assert_eq!(Retries::::get((19, 0)).unwrap().remaining, 10); + assert_eq!(Retries::::iter().count(), 1); + assert_eq!( + logger::log(), + vec![ + (root(), 42u32), + (root(), 42u32), + (root(), 42u32), + (root(), 42u32), + (root(), 42u32) + ] + ); + // final periodic run on block 19 will succeed + run_to_block(19); + // next periodic run at block 19 + assert_eq!(Agenda::::iter().count(), 0); + assert_eq!(Retries::::iter().count(), 0); + assert_eq!( + logger::log(), + vec![ + (root(), 42u32), + (root(), 42u32), + (root(), 42u32), + (root(), 42u32), + (root(), 42u32), + (root(), 42u32) + ] + ); + }); +} + +#[test] +fn named_retry_scheduling_with_period_works() { + new_test_ext().execute_with(|| { + // tasks fail until we reach block 4 and after we're past block 8 + Threshold::::put((4, 8)); + // task 42 at #4, every 3 blocks, 6 times + assert_ok!(Scheduler::do_schedule_named( + [42u8; 32], + DispatchTime::At(4), + Some((3, 6)), + 127, + root(), + Preimage::bound(RuntimeCall::Logger(logger::Call::timed_log { + i: 42, + weight: Weight::from_parts(10, 0) + })) + .unwrap() + )); + + assert!(Agenda::::get(4)[0].is_some()); + // 42 will be retried 10 times every 2 blocks + assert_ok!(Scheduler::set_retry_named(root().into(), [42u8; 32], 10, 2)); + assert_eq!(Retries::::iter().count(), 1); + run_to_block(3); + assert!(logger::log().is_empty()); + assert!(Agenda::::get(4)[0].is_some()); + // 42 runs successfully once, it will run again at block 7 + run_to_block(4); + assert!(Agenda::::get(4).is_empty()); + assert!(Agenda::::get(7)[0].is_some()); + assert_eq!(Retries::::iter().count(), 1); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + // nothing changed + run_to_block(6); + assert!(Agenda::::get(7)[0].is_some()); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + // 42 runs successfully again, it will run again at block 10 + run_to_block(7); + assert!(Agenda::::get(7).is_empty()); + assert!(Agenda::::get(10)[0].is_some()); + assert_eq!(Retries::::iter().count(), 1); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); + run_to_block(9); + assert!(Agenda::::get(10)[0].is_some()); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); + // 42 has 10 retries left out of a total of 10 + assert_eq!(Retries::::get((10, 0)).unwrap().remaining, 10); + // 42 will fail because we're outside the set threshold (block number in `4..8`), so it + // should be retried in 2 blocks (at block 12) + run_to_block(10); + // should be queued for the normal period of 3 blocks + assert!(Agenda::::get(13)[0].is_some()); + // should also be queued to be retried in 2 blocks + assert!(Agenda::::get(12)[0].is_some()); + // 42 has consumed one retry attempt + assert_eq!(Retries::::get((12, 0)).unwrap().remaining, 9); + assert_eq!(Retries::::get((13, 0)).unwrap().remaining, 10); + assert_eq!(Retries::::iter().count(), 2); + assert_eq!(Lookup::::get([42u8; 32]).unwrap(), (13, 0)); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); + // 42 will fail again + run_to_block(12); + // should still be queued for the normal period + assert!(Agenda::::get(13)[0].is_some()); + // should be queued to be retried in 2 blocks + assert!(Agenda::::get(14)[0].is_some()); + // 42 has consumed another retry attempt + assert_eq!(Retries::::get((14, 0)).unwrap().remaining, 8); + assert_eq!(Retries::::get((13, 0)).unwrap().remaining, 10); + assert_eq!(Retries::::iter().count(), 2); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); + // 42 will fail for the regular periodic run + run_to_block(13); + // should still be queued for the normal period + assert!(Agenda::::get(16)[0].is_some()); + // should still be queued to be retried next block + assert!(Agenda::::get(14)[0].is_some()); + // 42 consumed another periodic run, which failed, so another retry is queued for block 15 + assert!(Agenda::::get(16)[0].as_ref().unwrap().maybe_periodic.is_some()); + assert!(Agenda::::get(15)[0].as_ref().unwrap().maybe_periodic.is_none()); + assert!(Agenda::::get(14)[0].as_ref().unwrap().maybe_periodic.is_none()); + assert_eq!(Retries::::iter().count(), 3); + assert!(Retries::::get((14, 0)).unwrap().remaining == 8); + assert!(Retries::::get((15, 0)).unwrap().remaining == 9); + assert!(Retries::::get((16, 0)).unwrap().remaining == 10); + assert_eq!(Lookup::::get([42u8; 32]).unwrap(), (16, 0)); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); + // change the threshold to allow the task to succeed + Threshold::::put((14, 100)); + // first retry should now succeed + run_to_block(14); + assert!(Agenda::::get(15)[0].as_ref().unwrap().maybe_periodic.is_none()); + assert_eq!(Agenda::::get(16).iter().filter(|entry| entry.is_some()).count(), 1); + assert!(Agenda::::get(16)[0].is_some()); + assert_eq!(Retries::::get((15, 0)).unwrap().remaining, 9); + assert_eq!(Retries::::get((16, 0)).unwrap().remaining, 10); + assert_eq!(Retries::::iter().count(), 2); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32), (root(), 42u32)]); + // second retry should also succeed + run_to_block(15); + assert_eq!(Agenda::::get(16).iter().filter(|entry| entry.is_some()).count(), 1); + assert!(Agenda::::get(16)[0].is_some()); + assert!(Agenda::::get(17).is_empty()); + assert_eq!(Retries::::get((16, 0)).unwrap().remaining, 10); + assert_eq!(Retries::::iter().count(), 1); + assert_eq!(Lookup::::get([42u8; 32]).unwrap(), (16, 0)); + assert_eq!( + logger::log(), + vec![(root(), 42u32), (root(), 42u32), (root(), 42u32), (root(), 42u32)] + ); + // normal periodic run on block 16 will succeed + run_to_block(16); + // next periodic run at block 19 + assert!(Agenda::::get(19)[0].is_some()); + assert!(Agenda::::get(18).is_empty()); + assert!(Agenda::::get(17).is_empty()); + assert_eq!(Retries::::get((19, 0)).unwrap().remaining, 10); + assert_eq!(Retries::::iter().count(), 1); + assert_eq!(Lookup::::get([42u8; 32]).unwrap(), (19, 0)); + assert_eq!( + logger::log(), + vec![ + (root(), 42u32), + (root(), 42u32), + (root(), 42u32), + (root(), 42u32), + (root(), 42u32) + ] + ); + // final periodic run on block 19 will succeed + run_to_block(19); + // next periodic run at block 19 + assert_eq!(Agenda::::iter().count(), 0); + assert_eq!(Retries::::iter().count(), 0); + assert_eq!(Lookup::::iter().count(), 0); + assert_eq!( + logger::log(), + vec![ + (root(), 42u32), + (root(), 42u32), + (root(), 42u32), + (root(), 42u32), + (root(), 42u32), + (root(), 42u32) + ] + ); + }); +} + +#[test] +fn retry_scheduling_expires() { + new_test_ext().execute_with(|| { + // task will fail if we're past block 3 + Threshold::::put((1, 3)); + // task 42 at #4 + assert_ok!(Scheduler::do_schedule( + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(RuntimeCall::Logger(logger::Call::timed_log { + i: 42, + weight: Weight::from_parts(10, 0) + })) + .unwrap() + )); + assert!(Agenda::::get(4)[0].is_some()); + // task 42 will be retried 3 times every block + assert_ok!(Scheduler::set_retry(root().into(), (4, 0), 3, 1)); + assert_eq!(Retries::::iter().count(), 1); + run_to_block(3); + assert!(logger::log().is_empty()); + // task 42 is scheduled for next block + assert!(Agenda::::get(4)[0].is_some()); + // task fails because we're past block 3 + run_to_block(4); + // task is scheduled for next block + assert!(Agenda::::get(4).is_empty()); + assert!(Agenda::::get(5)[0].is_some()); + // one retry attempt is consumed + assert_eq!(Retries::::get((5, 0)).unwrap().remaining, 2); + assert!(logger::log().is_empty()); + // task fails again + run_to_block(5); + // task is scheduled for next block + assert!(Agenda::::get(5).is_empty()); + assert!(Agenda::::get(6)[0].is_some()); + // another retry attempt is consumed + assert_eq!(Retries::::get((6, 0)).unwrap().remaining, 1); + assert!(logger::log().is_empty()); + // task fails again + run_to_block(6); + // task is scheduled for next block + assert!(Agenda::::get(6).is_empty()); + assert!(Agenda::::get(7)[0].is_some()); + // another retry attempt is consumed + assert_eq!(Retries::::get((7, 0)).unwrap().remaining, 0); + assert!(logger::log().is_empty()); + // task fails again + run_to_block(7); + // task ran out of retries so it gets dropped + assert_eq!(Agenda::::iter().count(), 0); + assert_eq!(Retries::::iter().count(), 0); + assert!(logger::log().is_empty()); + }); +} + +#[test] +fn set_retry_bad_origin() { + new_test_ext().execute_with(|| { + // task 42 at #4 with account 101 as origin + assert_ok!(Scheduler::do_schedule( + DispatchTime::At(4), + None, + 127, + 101.into(), + Preimage::bound(RuntimeCall::Logger(logger::Call::timed_log { + i: 42, + weight: Weight::from_parts(10, 0) + })) + .unwrap() + )); + + assert!(Agenda::::get(4)[0].is_some()); + // try to change the retry config with a different (non-root) account + let res: Result<(), DispatchError> = + Scheduler::set_retry(RuntimeOrigin::signed(102), (4, 0), 10, 2); + assert_eq!(res, Err(BadOrigin.into())); + }); +} + +#[test] +fn set_named_retry_bad_origin() { + new_test_ext().execute_with(|| { + // task 42 at #4 with account 101 as origin + assert_ok!(Scheduler::do_schedule_named( + [42u8; 32], + DispatchTime::At(4), + None, + 127, + 101.into(), + Preimage::bound(RuntimeCall::Logger(logger::Call::timed_log { + i: 42, + weight: Weight::from_parts(10, 0) + })) + .unwrap() + )); + + assert!(Agenda::::get(4)[0].is_some()); + // try to change the retry config with a different (non-root) account + let res: Result<(), DispatchError> = + Scheduler::set_retry_named(RuntimeOrigin::signed(102), [42u8; 32], 10, 2); + assert_eq!(res, Err(BadOrigin.into())); + }); +} + +#[test] +fn set_retry_works() { + new_test_ext().execute_with(|| { + // task 42 at #4 + assert_ok!(Scheduler::do_schedule( + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(RuntimeCall::Logger(logger::Call::timed_log { + i: 42, + weight: Weight::from_parts(10, 0) + })) + .unwrap() + )); + + assert!(Agenda::::get(4)[0].is_some()); + // make sure the retry configuration was stored + assert_ok!(Scheduler::set_retry(root().into(), (4, 0), 10, 2)); + assert_eq!( + Retries::::get((4, 0)), + Some(RetryConfig { total_retries: 10, remaining: 10, period: 2 }) + ); + }); +} + +#[test] +fn set_named_retry_works() { + new_test_ext().execute_with(|| { + // task 42 at #4 with account 101 as origin + assert_ok!(Scheduler::do_schedule_named( + [42u8; 32], + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(RuntimeCall::Logger(logger::Call::timed_log { + i: 42, + weight: Weight::from_parts(10, 0) + })) + .unwrap() + )); + + assert!(Agenda::::get(4)[0].is_some()); + // make sure the retry configuration was stored + assert_ok!(Scheduler::set_retry_named(root().into(), [42u8; 32], 10, 2)); + let address = Lookup::::get([42u8; 32]).unwrap(); + assert_eq!( + Retries::::get(address), + Some(RetryConfig { total_retries: 10, remaining: 10, period: 2 }) + ); + }); +} + +#[test] +fn retry_periodic_full_cycle() { + new_test_ext().execute_with(|| { + // tasks fail after we pass block 1000 + Threshold::::put((1, 1000)); + // task 42 at #4, every 100 blocks, 4 times + assert_ok!(Scheduler::do_schedule_named( + [42u8; 32], + DispatchTime::At(10), + Some((100, 4)), + 127, + root(), + Preimage::bound(RuntimeCall::Logger(logger::Call::timed_log { + i: 42, + weight: Weight::from_parts(10, 0) + })) + .unwrap() + )); + + assert!(Agenda::::get(10)[0].is_some()); + // 42 will be retried 2 times every block + assert_ok!(Scheduler::set_retry_named(root().into(), [42u8; 32], 2, 1)); + assert_eq!(Retries::::iter().count(), 1); + run_to_block(9); + assert!(logger::log().is_empty()); + assert!(Agenda::::get(10)[0].is_some()); + // 42 runs successfully once, it will run again at block 110 + run_to_block(10); + assert!(Agenda::::get(10).is_empty()); + assert!(Agenda::::get(110)[0].is_some()); + assert_eq!(Retries::::iter().count(), 1); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + // nothing changed + run_to_block(109); + assert!(Agenda::::get(110)[0].is_some()); + // original task still has 2 remaining retries + assert_eq!(Retries::::get((110, 0)).unwrap().remaining, 2); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + // make 42 fail next block + Threshold::::put((1, 2)); + // 42 will fail because we're outside the set threshold (block number in `1..2`), so it + // should be retried next block (at block 111) + run_to_block(110); + // should be queued for the normal period of 100 blocks + assert!(Agenda::::get(210)[0].is_some()); + // should also be queued to be retried next block + assert!(Agenda::::get(111)[0].is_some()); + // 42 retry clone has consumed one retry attempt + assert_eq!(Retries::::get((111, 0)).unwrap().remaining, 1); + // 42 original task still has the original remaining attempts + assert_eq!(Retries::::get((210, 0)).unwrap().remaining, 2); + assert_eq!(Retries::::iter().count(), 2); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + // 42 retry will fail again + run_to_block(111); + // should still be queued for the normal period + assert!(Agenda::::get(210)[0].is_some()); + // should be queued to be retried next block + assert!(Agenda::::get(112)[0].is_some()); + // 42 has consumed another retry attempt + assert_eq!(Retries::::get((210, 0)).unwrap().remaining, 2); + assert_eq!(Retries::::get((112, 0)).unwrap().remaining, 0); + assert_eq!(Retries::::iter().count(), 2); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + // 42 retry will fail again + run_to_block(112); + // should still be queued for the normal period + assert!(Agenda::::get(210)[0].is_some()); + // 42 retry clone ran out of retries, must have been evicted + assert_eq!(Agenda::::iter().count(), 1); + + // advance + run_to_block(209); + // should still be queued for the normal period + assert!(Agenda::::get(210)[0].is_some()); + // 42 retry clone ran out of retries, must have been evicted + assert_eq!(Agenda::::iter().count(), 1); + // 42 should fail again and should spawn another retry clone + run_to_block(210); + // should be queued for the normal period of 100 blocks + assert!(Agenda::::get(310)[0].is_some()); + // should also be queued to be retried next block + assert!(Agenda::::get(211)[0].is_some()); + // 42 retry clone has consumed one retry attempt + assert_eq!(Retries::::get((211, 0)).unwrap().remaining, 1); + // 42 original task still has the original remaining attempts + assert_eq!(Retries::::get((310, 0)).unwrap().remaining, 2); + assert_eq!(Retries::::iter().count(), 2); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + // make 42 run successfully again + Threshold::::put((1, 1000)); + // 42 retry clone should now succeed + run_to_block(211); + // should be queued for the normal period of 100 blocks + assert!(Agenda::::get(310)[0].is_some()); + // retry was successful, retry task should have been discarded + assert_eq!(Agenda::::iter().count(), 1); + // 42 original task still has the original remaining attempts + assert_eq!(Retries::::get((310, 0)).unwrap().remaining, 2); + assert_eq!(Retries::::iter().count(), 1); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); + + // fast forward to the last periodic run of 42 + run_to_block(310); + // 42 was successful, the period ended as this was the 4th scheduled periodic run so 42 must + // have been discarded + assert_eq!(Agenda::::iter().count(), 0); + // agenda is empty so no retries should exist + assert_eq!(Retries::::iter().count(), 0); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32), (root(), 42u32)]); + }); +} + #[test] fn reschedule_works() { new_test_ext().execute_with(|| { @@ -430,6 +1290,117 @@ fn scheduler_respects_weight_limits() { }); } +#[test] +fn retry_respects_weight_limits() { + let max_weight: Weight = ::MaximumWeight::get(); + new_test_ext().execute_with(|| { + // schedule 42 + let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: max_weight / 3 * 2 }); + assert_ok!(Scheduler::do_schedule( + DispatchTime::At(8), + None, + 127, + root(), + Preimage::bound(call).unwrap(), + )); + // schedule 20 with a call that will fail until we reach block 8 + Threshold::::put((8, 100)); + let call = RuntimeCall::Logger(LoggerCall::timed_log { i: 20, weight: max_weight / 3 * 2 }); + assert_ok!(Scheduler::do_schedule( + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(call).unwrap(), + )); + // set a retry config for 20 for 10 retries every block + assert_ok!(Scheduler::set_retry(root().into(), (4, 0), 10, 1)); + // 20 should fail and be retried later + run_to_block(4); + assert!(Agenda::::get(5)[0].is_some()); + assert!(Agenda::::get(8)[0].is_some()); + assert_eq!(Retries::::iter().count(), 1); + assert!(logger::log().is_empty()); + // 20 still fails but is scheduled next block together with 42 + run_to_block(7); + assert_eq!(Agenda::::get(8).len(), 2); + assert_eq!(Retries::::iter().count(), 1); + assert!(logger::log().is_empty()); + // 20 and 42 do not fit together + // 42 is executed as it was first in the queue + // 20 is still on the 8th block's agenda + run_to_block(8); + assert!(Agenda::::get(8)[0].is_none()); + assert!(Agenda::::get(8)[1].is_some()); + assert_eq!(Retries::::iter().count(), 1); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + // 20 is executed and the schedule is cleared + run_to_block(9); + assert_eq!(Agenda::::iter().count(), 0); + assert_eq!(Retries::::iter().count(), 0); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 20u32)]); + }); +} + +#[test] +fn try_schedule_retry_respects_weight_limits() { + let max_weight: Weight = ::MaximumWeight::get(); + new_test_ext().execute_with(|| { + let service_agendas_weight = ::WeightInfo::service_agendas_base(); + let service_agenda_weight = ::WeightInfo::service_agenda_base( + ::MaxScheduledPerBlock::get(), + ); + let actual_service_agenda_weight = ::WeightInfo::service_agenda_base(1); + // Some weight for `service_agenda` will be refunded, so we need to make sure the weight + // `try_schedule_retry` is going to ask for is greater than this difference, and we take a + // safety factor of 10 to make sure we're over that limit. + let meter = WeightMeter::with_limit( + ::WeightInfo::schedule_retry( + ::MaxScheduledPerBlock::get(), + ) / 10, + ); + assert!(meter.can_consume(service_agenda_weight - actual_service_agenda_weight)); + + let reference_call = + RuntimeCall::Logger(LoggerCall::timed_log { i: 20, weight: max_weight / 3 * 2 }); + let bounded = ::Preimages::bound(reference_call).unwrap(); + let base_weight = ::WeightInfo::service_task( + bounded.lookup_len().map(|x| x as usize), + false, + false, + ); + // we make the call cost enough so that all checks have enough weight to run aside from + // `try_schedule_retry` + let call_weight = max_weight - service_agendas_weight - service_agenda_weight - base_weight; + let call = RuntimeCall::Logger(LoggerCall::timed_log { i: 20, weight: call_weight }); + // schedule 20 with a call that will fail until we reach block 8 + Threshold::::put((8, 100)); + + assert_ok!(Scheduler::do_schedule( + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(call).unwrap(), + )); + // set a retry config for 20 for 10 retries every block + assert_ok!(Scheduler::set_retry(root().into(), (4, 0), 10, 1)); + // 20 should fail and, because of insufficient weight, it should not be scheduled again + run_to_block(4); + // nothing else should be scheduled + assert_eq!(Agenda::::iter().count(), 0); + assert_eq!(Retries::::iter().count(), 0); + assert_eq!(logger::log(), vec![]); + // check the `RetryFailed` event happened + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = + Event::RetryFailed { task: (4, 0), id: None }.into(); + // compare to the last event record + let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); + }); +} + /// Permanently overweight calls are not deleted but also not executed. #[test] fn scheduler_does_not_delete_permanently_overweight_call() { @@ -877,6 +1848,134 @@ fn should_check_origin_for_cancel() { }); } +#[test] +fn cancel_removes_retry_entry() { + new_test_ext().execute_with(|| { + // task fails until block 99 is reached + Threshold::::put((99, 100)); + // task 20 at #4 + assert_ok!(Scheduler::do_schedule( + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(RuntimeCall::Logger(logger::Call::timed_log { + i: 20, + weight: Weight::from_parts(10, 0) + })) + .unwrap() + )); + // named task 42 at #4 + assert_ok!(Scheduler::do_schedule_named( + [1u8; 32], + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(RuntimeCall::Logger(logger::Call::timed_log { + i: 42, + weight: Weight::from_parts(10, 0) + })) + .unwrap() + )); + + assert_eq!(Agenda::::get(4).len(), 2); + // task 20 will be retried 3 times every block + assert_ok!(Scheduler::set_retry(root().into(), (4, 0), 10, 1)); + // task 42 will be retried 10 times every 3 blocks + assert_ok!(Scheduler::set_retry_named(root().into(), [1u8; 32], 10, 1)); + assert_eq!(Retries::::iter().count(), 2); + run_to_block(3); + assert!(logger::log().is_empty()); + assert_eq!(Agenda::::get(4).len(), 2); + // both tasks fail + run_to_block(4); + assert!(Agenda::::get(4).is_empty()); + // 42 and 20 are rescheduled for next block + assert_eq!(Agenda::::get(5).len(), 2); + assert!(logger::log().is_empty()); + // 42 and 20 still fail + run_to_block(5); + // 42 and 20 rescheduled for next block + assert_eq!(Agenda::::get(6).len(), 2); + assert_eq!(Retries::::iter().count(), 2); + assert!(logger::log().is_empty()); + + // even though 42 is being retried, the tasks scheduled for retries are not named + assert_eq!(Lookup::::iter().count(), 0); + assert!(Scheduler::cancel(root().into(), 6, 0).is_ok()); + + // 20 is removed, 42 still fails + run_to_block(6); + // 42 rescheduled for next block + assert_eq!(Agenda::::get(7).len(), 1); + // 20's retry entry is removed + assert!(!Retries::::contains_key((4, 0))); + assert_eq!(Retries::::iter().count(), 1); + assert!(logger::log().is_empty()); + + assert!(Scheduler::cancel(root().into(), 7, 0).is_ok()); + + // both tasks are canceled, everything is removed now + run_to_block(7); + assert!(Agenda::::get(8).is_empty()); + assert_eq!(Retries::::iter().count(), 0); + }); +} + +#[test] +fn cancel_retries_works() { + new_test_ext().execute_with(|| { + // task fails until block 99 is reached + Threshold::::put((99, 100)); + // task 20 at #4 + assert_ok!(Scheduler::do_schedule( + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(RuntimeCall::Logger(logger::Call::timed_log { + i: 20, + weight: Weight::from_parts(10, 0) + })) + .unwrap() + )); + // named task 42 at #4 + assert_ok!(Scheduler::do_schedule_named( + [1u8; 32], + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(RuntimeCall::Logger(logger::Call::timed_log { + i: 42, + weight: Weight::from_parts(10, 0) + })) + .unwrap() + )); + + assert_eq!(Agenda::::get(4).len(), 2); + // task 20 will be retried 3 times every block + assert_ok!(Scheduler::set_retry(root().into(), (4, 0), 10, 1)); + // task 42 will be retried 10 times every 3 blocks + assert_ok!(Scheduler::set_retry_named(root().into(), [1u8; 32], 10, 1)); + assert_eq!(Retries::::iter().count(), 2); + run_to_block(3); + assert!(logger::log().is_empty()); + assert_eq!(Agenda::::get(4).len(), 2); + // cancel the retry config for 20 + assert_ok!(Scheduler::cancel_retry(root().into(), (4, 0))); + assert_eq!(Retries::::iter().count(), 1); + // cancel the retry config for 42 + assert_ok!(Scheduler::cancel_retry_named(root().into(), [1u8; 32])); + assert_eq!(Retries::::iter().count(), 0); + run_to_block(4); + // both tasks failed and there are no more retries, so they are evicted + assert_eq!(Agenda::::get(4).len(), 0); + assert_eq!(Retries::::iter().count(), 0); + }); +} + #[test] fn migration_to_v4_works() { new_test_ext().execute_with(|| { @@ -1054,6 +2153,8 @@ fn test_migrate_origin() { match self { 3u32 => system::RawOrigin::Root.into(), 2u32 => system::RawOrigin::None.into(), + 101u32 => system::RawOrigin::Signed(101).into(), + 102u32 => system::RawOrigin::Signed(102).into(), _ => unreachable!("test make no use of it"), } } diff --git a/substrate/frame/scheduler/src/weights.rs b/substrate/frame/scheduler/src/weights.rs index 58d711862591..9b7e5405a1b5 100644 --- a/substrate/frame/scheduler/src/weights.rs +++ b/substrate/frame/scheduler/src/weights.rs @@ -15,32 +15,29 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_scheduler +//! Autogenerated weights for `pallet_scheduler` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-grjcggob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_scheduler -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/scheduler/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_scheduler +// --chain=dev +// --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/scheduler/src/weights.rs +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,7 +47,7 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_scheduler. +/// Weight functions needed for `pallet_scheduler`. pub trait WeightInfo { fn service_agendas_base() -> Weight; fn service_agenda_base(s: u32, ) -> Weight; @@ -64,33 +61,38 @@ pub trait WeightInfo { fn cancel(s: u32, ) -> Weight; fn schedule_named(s: u32, ) -> Weight; fn cancel_named(s: u32, ) -> Weight; + fn schedule_retry(s: u32, ) -> Weight; + fn set_retry() -> Weight; + fn set_retry_named() -> Weight; + fn cancel_retry() -> Weight; + fn cancel_retry_named() -> Weight; } -/// Weights for pallet_scheduler using the Substrate node and recommended hardware. +/// Weights for `pallet_scheduler` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Scheduler IncompleteSince (r:1 w:1) - /// Proof: Scheduler IncompleteSince (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Scheduler::IncompleteSince` (r:1 w:1) + /// Proof: `Scheduler::IncompleteSince` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn service_agendas_base() -> Weight { // Proof Size summary in bytes: // Measured: `31` // Estimated: `1489` - // Minimum execution time: 3_991_000 picoseconds. - Weight::from_parts(4_174_000, 1489) + // Minimum execution time: 3_040_000 picoseconds. + Weight::from_parts(3_202_000, 1489) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 512]`. fn service_agenda_base(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 3_581_000 picoseconds. - Weight::from_parts(7_413_174, 110487) - // Standard Error: 971 - .saturating_add(Weight::from_parts(348_077, 0).saturating_mul(s.into())) + // Minimum execution time: 3_462_000 picoseconds. + Weight::from_parts(6_262_125, 110487) + // Standard Error: 536 + .saturating_add(Weight::from_parts(332_570, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -98,145 +100,226 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_250_000 picoseconds. - Weight::from_parts(5_549_000, 0) + // Minimum execution time: 3_425_000 picoseconds. + Weight::from_parts(3_680_000, 0) } - /// Storage: Preimage PreimageFor (r:1 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: Measured) - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::PreimageFor` (r:1 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `s` is `[128, 4194304]`. fn service_task_fetched(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `179 + s * (1 ±0)` - // Estimated: `3644 + s * (1 ±0)` - // Minimum execution time: 20_089_000 picoseconds. - Weight::from_parts(20_376_000, 3644) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_170, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2_u64)) + // Measured: `246 + s * (1 ±0)` + // Estimated: `3711 + s * (1 ±0)` + // Minimum execution time: 17_564_000 picoseconds. + Weight::from_parts(17_887_000, 3711) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_253, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) } - /// Storage: Scheduler Lookup (r:0 w:1) - /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Scheduler::Lookup` (r:0 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn service_task_named() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_998_000 picoseconds. - Weight::from_parts(7_303_000, 0) + // Minimum execution time: 4_934_000 picoseconds. + Weight::from_parts(5_275_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } fn service_task_periodic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_078_000 picoseconds. - Weight::from_parts(5_315_000, 0) + // Minimum execution time: 3_348_000 picoseconds. + Weight::from_parts(3_561_000, 0) } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) fn execute_dispatch_signed() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_228_000 picoseconds. - Weight::from_parts(2_352_000, 0) + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 6_395_000 picoseconds. + Weight::from_parts(6_642_000, 3997) + .saturating_add(T::DbWeight::get().reads(2_u64)) } fn execute_dispatch_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_226_000 picoseconds. - Weight::from_parts(2_371_000, 0) + // Minimum execution time: 2_167_000 picoseconds. + Weight::from_parts(2_266_000, 0) } - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 511]`. fn schedule(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 12_683_000 picoseconds. - Weight::from_parts(16_951_846, 110487) - // Standard Error: 1_046 - .saturating_add(Weight::from_parts(380_842, 0).saturating_mul(s.into())) + // Minimum execution time: 10_009_000 picoseconds. + Weight::from_parts(13_565_985, 110487) + // Standard Error: 575 + .saturating_add(Weight::from_parts(354_760, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) - /// Storage: Scheduler Lookup (r:0 w:1) - /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:0 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 512]`. fn cancel(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 16_201_000 picoseconds. - Weight::from_parts(18_259_422, 110487) - // Standard Error: 1_344 - .saturating_add(Weight::from_parts(545_863, 0).saturating_mul(s.into())) + // Minimum execution time: 14_048_000 picoseconds. + Weight::from_parts(15_141_696, 110487) + // Standard Error: 1_082 + .saturating_add(Weight::from_parts(533_390, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Scheduler Lookup (r:1 w:1) - /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 511]`. fn schedule_named(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `596 + s * (178 ±0)` // Estimated: `110487` - // Minimum execution time: 16_180_000 picoseconds. - Weight::from_parts(25_128_925, 110487) - // Standard Error: 1_118 - .saturating_add(Weight::from_parts(375_631, 0).saturating_mul(s.into())) + // Minimum execution time: 12_902_000 picoseconds. + Weight::from_parts(18_957_156, 110487) + // Standard Error: 792 + .saturating_add(Weight::from_parts(361_909, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: Scheduler Lookup (r:1 w:1) - /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 512]`. fn cancel_named(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `709 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 18_244_000 picoseconds. - Weight::from_parts(21_439_366, 110487) - // Standard Error: 1_084 - .saturating_add(Weight::from_parts(557_691, 0).saturating_mul(s.into())) + // Minimum execution time: 15_933_000 picoseconds. + Weight::from_parts(18_091_415, 110487) + // Standard Error: 779 + .saturating_add(Weight::from_parts(534_402, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } + /// Storage: `Scheduler::Retries` (r:1 w:2) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:0 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 512]`. + fn schedule_retry(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `159` + // Estimated: `110487` + // Minimum execution time: 14_155_000 picoseconds. + Weight::from_parts(16_447_031, 110487) + // Standard Error: 233 + .saturating_add(Weight::from_parts(8_424, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(4_u64)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn set_retry() -> Weight { + // Proof Size summary in bytes: + // Measured: `81 + s * (177 ±0)` + // Estimated: `110487` + // Minimum execution time: 8_130_000 picoseconds. + Weight::from_parts(9_047_554, 110487) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Scheduler::Lookup` (r:1 w:0) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn set_retry_named() -> Weight { + // Proof Size summary in bytes: + // Measured: `647 + s * (178 ±0)` + // Estimated: `110487` + // Minimum execution time: 10_838_000 picoseconds. + Weight::from_parts(12_804_076, 110487) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn cancel_retry() -> Weight { + // Proof Size summary in bytes: + // Measured: `81 + s * (177 ±0)` + // Estimated: `110487` + // Minimum execution time: 8_130_000 picoseconds. + Weight::from_parts(9_047_554, 110487) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Scheduler::Lookup` (r:1 w:0) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn cancel_retry_named() -> Weight { + // Proof Size summary in bytes: + // Measured: `647 + s * (178 ±0)` + // Estimated: `110487` + // Minimum execution time: 10_838_000 picoseconds. + Weight::from_parts(12_804_076, 110487) + .saturating_add(T::DbWeight::get().reads(2_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Scheduler IncompleteSince (r:1 w:1) - /// Proof: Scheduler IncompleteSince (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `Scheduler::IncompleteSince` (r:1 w:1) + /// Proof: `Scheduler::IncompleteSince` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn service_agendas_base() -> Weight { // Proof Size summary in bytes: // Measured: `31` // Estimated: `1489` - // Minimum execution time: 3_991_000 picoseconds. - Weight::from_parts(4_174_000, 1489) + // Minimum execution time: 3_040_000 picoseconds. + Weight::from_parts(3_202_000, 1489) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 512]`. fn service_agenda_base(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 3_581_000 picoseconds. - Weight::from_parts(7_413_174, 110487) - // Standard Error: 971 - .saturating_add(Weight::from_parts(348_077, 0).saturating_mul(s.into())) + // Minimum execution time: 3_462_000 picoseconds. + Weight::from_parts(6_262_125, 110487) + // Standard Error: 536 + .saturating_add(Weight::from_parts(332_570, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -244,117 +327,198 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_250_000 picoseconds. - Weight::from_parts(5_549_000, 0) + // Minimum execution time: 3_425_000 picoseconds. + Weight::from_parts(3_680_000, 0) } - /// Storage: Preimage PreimageFor (r:1 w:1) - /// Proof: Preimage PreimageFor (max_values: None, max_size: Some(4194344), added: 4196819, mode: Measured) - /// Storage: Preimage StatusFor (r:1 w:1) - /// Proof: Preimage StatusFor (max_values: None, max_size: Some(91), added: 2566, mode: MaxEncodedLen) + /// Storage: `Preimage::PreimageFor` (r:1 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::RequestStatusFor` (r:1 w:1) + /// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) /// The range of component `s` is `[128, 4194304]`. fn service_task_fetched(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `179 + s * (1 ±0)` - // Estimated: `3644 + s * (1 ±0)` - // Minimum execution time: 20_089_000 picoseconds. - Weight::from_parts(20_376_000, 3644) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_170, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(2_u64)) + // Measured: `246 + s * (1 ±0)` + // Estimated: `3711 + s * (1 ±0)` + // Minimum execution time: 17_564_000 picoseconds. + Weight::from_parts(17_887_000, 3711) + // Standard Error: 1 + .saturating_add(Weight::from_parts(1_253, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) } - /// Storage: Scheduler Lookup (r:0 w:1) - /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Scheduler::Lookup` (r:0 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) fn service_task_named() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_998_000 picoseconds. - Weight::from_parts(7_303_000, 0) + // Minimum execution time: 4_934_000 picoseconds. + Weight::from_parts(5_275_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } fn service_task_periodic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_078_000 picoseconds. - Weight::from_parts(5_315_000, 0) + // Minimum execution time: 3_348_000 picoseconds. + Weight::from_parts(3_561_000, 0) } + /// Storage: `SafeMode::EnteredUntil` (r:1 w:0) + /// Proof: `SafeMode::EnteredUntil` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `TxPause::PausedCalls` (r:1 w:0) + /// Proof: `TxPause::PausedCalls` (`max_values`: None, `max_size`: Some(532), added: 3007, mode: `MaxEncodedLen`) fn execute_dispatch_signed() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_228_000 picoseconds. - Weight::from_parts(2_352_000, 0) + // Measured: `145` + // Estimated: `3997` + // Minimum execution time: 6_395_000 picoseconds. + Weight::from_parts(6_642_000, 3997) + .saturating_add(RocksDbWeight::get().reads(2_u64)) } fn execute_dispatch_unsigned() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_226_000 picoseconds. - Weight::from_parts(2_371_000, 0) + // Minimum execution time: 2_167_000 picoseconds. + Weight::from_parts(2_266_000, 0) } - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 511]`. fn schedule(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 12_683_000 picoseconds. - Weight::from_parts(16_951_846, 110487) - // Standard Error: 1_046 - .saturating_add(Weight::from_parts(380_842, 0).saturating_mul(s.into())) + // Minimum execution time: 10_009_000 picoseconds. + Weight::from_parts(13_565_985, 110487) + // Standard Error: 575 + .saturating_add(Weight::from_parts(354_760, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) - /// Storage: Scheduler Lookup (r:0 w:1) - /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:0 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 512]`. fn cancel(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `81 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 16_201_000 picoseconds. - Weight::from_parts(18_259_422, 110487) - // Standard Error: 1_344 - .saturating_add(Weight::from_parts(545_863, 0).saturating_mul(s.into())) + // Minimum execution time: 14_048_000 picoseconds. + Weight::from_parts(15_141_696, 110487) + // Standard Error: 1_082 + .saturating_add(Weight::from_parts(533_390, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Scheduler Lookup (r:1 w:1) - /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 511]`. fn schedule_named(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `596 + s * (178 ±0)` // Estimated: `110487` - // Minimum execution time: 16_180_000 picoseconds. - Weight::from_parts(25_128_925, 110487) - // Standard Error: 1_118 - .saturating_add(Weight::from_parts(375_631, 0).saturating_mul(s.into())) + // Minimum execution time: 12_902_000 picoseconds. + Weight::from_parts(18_957_156, 110487) + // Standard Error: 792 + .saturating_add(Weight::from_parts(361_909, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: Scheduler Lookup (r:1 w:1) - /// Proof: Scheduler Lookup (max_values: None, max_size: Some(48), added: 2523, mode: MaxEncodedLen) - /// Storage: Scheduler Agenda (r:1 w:1) - /// Proof: Scheduler Agenda (max_values: None, max_size: Some(107022), added: 109497, mode: MaxEncodedLen) + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) /// The range of component `s` is `[1, 512]`. fn cancel_named(s: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `709 + s * (177 ±0)` // Estimated: `110487` - // Minimum execution time: 18_244_000 picoseconds. - Weight::from_parts(21_439_366, 110487) - // Standard Error: 1_084 - .saturating_add(Weight::from_parts(557_691, 0).saturating_mul(s.into())) + // Minimum execution time: 15_933_000 picoseconds. + Weight::from_parts(18_091_415, 110487) + // Standard Error: 779 + .saturating_add(Weight::from_parts(534_402, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } + /// Storage: `Scheduler::Retries` (r:1 w:2) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:0 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 512]`. + fn schedule_retry(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `159` + // Estimated: `110487` + // Minimum execution time: 14_155_000 picoseconds. + Weight::from_parts(16_447_031, 110487) + // Standard Error: 233 + .saturating_add(Weight::from_parts(8_424, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn set_retry() -> Weight { + // Proof Size summary in bytes: + // Measured: `81 + s * (177 ±0)` + // Estimated: `110487` + // Minimum execution time: 8_130_000 picoseconds. + Weight::from_parts(9_047_554, 110487) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Scheduler::Lookup` (r:1 w:0) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn set_retry_named() -> Weight { + // Proof Size summary in bytes: + // Measured: `647 + s * (178 ±0)` + // Estimated: `110487` + // Minimum execution time: 10_838_000 picoseconds. + Weight::from_parts(12_804_076, 110487) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn cancel_retry() -> Weight { + // Proof Size summary in bytes: + // Measured: `81 + s * (177 ±0)` + // Estimated: `110487` + // Minimum execution time: 8_130_000 picoseconds. + Weight::from_parts(9_047_554, 110487) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Scheduler::Lookup` (r:1 w:0) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:0) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(107022), added: 109497, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Retries` (r:0 w:1) + /// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`) + fn cancel_retry_named() -> Weight { + // Proof Size summary in bytes: + // Measured: `647 + s * (178 ±0)` + // Estimated: `110487` + // Minimum execution time: 10_838_000 picoseconds. + Weight::from_parts(12_804_076, 110487) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } }