Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/scripts/cmd/cmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -462,6 +462,7 @@ def main():
f"--no-storage-info --no-min-squares --no-median-slopes " \
f"{config['bench_flags']}"
print(f'-- Running: {cmd} \n')
os.environ['RUNTIME_LOG'] = 'off' # Turn off annoying logs during benchmarking
status = os.system(cmd)

if status != 0 and args.fail_fast:
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/check-frame-omni-bencher.yml
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ jobs:
id: required
run: |
forklift cargo build --locked --quiet --release -p asset-hub-westend-runtime --features runtime-benchmarks
forklift cargo run --locked --release -p frame-omni-bencher --quiet -- v1 benchmark pallet --runtime target/release/wbuild/asset-hub-westend-runtime/asset_hub_westend_runtime.compact.compressed.wasm --all --steps 2 --repeat 1 --quiet
forklift cargo run --locked --release -p frame-omni-bencher --quiet -- v1 benchmark pallet --runtime target/release/wbuild/asset-hub-westend-runtime/asset_hub_westend_runtime.compact.compressed.wasm --all --steps 2 --repeat 1 --quiet --min-duration 0
- name: Stop all workflows if failed
if: ${{ failure() && steps.required.conclusion == 'failure' && !github.event.pull_request.head.repo.fork }}
uses: ./.github/actions/workflow-stopper
Expand Down Expand Up @@ -102,7 +102,7 @@ jobs:
ls -lrt $RUNTIME_BLOB_PATH

if [[ "$BENCH_CMD" == "pallet" ]]; then
cmd="./target/release/frame-omni-bencher v1 benchmark pallet --runtime $RUNTIME_BLOB_PATH --all --steps 2 --repeat 1 $FLAGS"
cmd="./target/release/frame-omni-bencher v1 benchmark pallet --runtime $RUNTIME_BLOB_PATH --all --steps 2 --repeat 1 --min-duration 0 $FLAGS"
elif [[ "$BENCH_CMD" == "overhead" ]]; then
cmd="./target/release/frame-omni-bencher v1 benchmark overhead --runtime $RUNTIME_BLOB_PATH"
else
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ jobs:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: script
run: forklift cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks --quiet -- benchmark pallet --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 --quiet
run: forklift cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks --quiet -- benchmark pallet --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 --min-duration 0 --quiet

# cf https://github.com/paritytech/polkadot-sdk/issues/1652
test-syscalls:
Expand Down

Large diffs are not rendered by default.

11 changes: 11 additions & 0 deletions prdoc/pr_10794.prdoc
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
title: '[FRAME] Omni bencher run each benchmark at least 10 secs'
doc:
- audience: Runtime Dev
description: |-
- Ensure all benchmarks run for at least 10 seconds. Configurable with `--min-duration <s>`
- Turn off runtime logging in bench bot to reduce spam log output
crates:
- name: frame-benchmarking-cli
bump: major
- name: asset-hub-westend-runtime
bump: patch
175 changes: 130 additions & 45 deletions substrate/utils/frame/benchmarking-cli/src/pallet/command.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ use std::{
fs,
str::FromStr,
time,
time::Duration,
};

type SubstrateAndExtraHF<T> = (
Expand Down Expand Up @@ -350,7 +351,7 @@ impl PalletCmd {
// Run the benchmarks
let mut batches = Vec::new();
let mut batches_db = Vec::new();
let mut timer = time::SystemTime::now();
let mut progress_timer = time::Instant::now();
// Maps (pallet, extrinsic) to its component ranges.
let mut component_ranges = HashMap::<(String, String), Vec<ComponentRange>>::new();
let pov_modes =
Expand Down Expand Up @@ -412,30 +413,103 @@ impl PalletCmd {
all_components
};

for (s, selected_components) in all_components.iter().enumerate() {
let params = |verify: bool, repeats: u32| -> Vec<u8> {
if benchmark_api_version >= 2 {
(
pallet.as_bytes(),
instance.as_bytes(),
extrinsic.as_bytes(),
&selected_components.clone(),
verify,
repeats,
)
.encode()
} else {
(
pallet.as_bytes(),
extrinsic.as_bytes(),
&selected_components.clone(),
verify,
repeats,
)
.encode()
}
};
// Ensure each benchmark runs for at least its minimum duration.
let start = time::Instant::now();
let mut first = true;

loop {
for (s, selected_components) in all_components.iter().enumerate() {
let params = |verify: bool, repeats: u32| -> Vec<u8> {
if benchmark_api_version >= 2 {
(
pallet.as_bytes(),
instance.as_bytes(),
extrinsic.as_bytes(),
&selected_components.clone(),
verify,
repeats,
)
.encode()
} else {
(
pallet.as_bytes(),
extrinsic.as_bytes(),
&selected_components.clone(),
verify,
repeats,
)
.encode()
}
};

// Maybe run a verification if we are the first iteration.
if !self.no_verify && first {
let state = &state_without_tracking;
// Don't use these results since verification code will add overhead.
let _batch: Vec<BenchmarkBatch> = match Self::exec_state_machine::<
std::result::Result<Vec<BenchmarkBatch>, String>,
_,
_,
>(
StateMachine::new(
state,
&mut Default::default(),
&executor,
"Benchmark_dispatch_benchmark",
&params(true, 1),
&mut Self::build_extensions(executor.clone(), state.recorder()),
&runtime_code,
CallContext::Offchain,
),
"dispatch a benchmark",
) {
Err(e) => {
log::error!(target: LOG_TARGET, "Benchmark {pallet}::{extrinsic} failed: {e}");
failed.push((pallet.clone(), extrinsic.clone()));
continue 'outer;
},
Ok(Err(e)) => {
log::error!(target: LOG_TARGET, "Benchmark {pallet}::{extrinsic} failed: {e}");
failed.push((pallet.clone(), extrinsic.clone()));
continue 'outer;
},
Ok(Ok(b)) => b,
};
}
// Do one loop of DB tracking.
{
let state = &state_with_tracking;
let batch: Vec<BenchmarkBatch> = match Self::exec_state_machine::<
std::result::Result<Vec<BenchmarkBatch>, String>,
_,
_,
>(
StateMachine::new(
state,
&mut Default::default(),
&executor,
"Benchmark_dispatch_benchmark",
&params(false, 1),
&mut Self::build_extensions(executor.clone(), state.recorder()),
&runtime_code,
CallContext::Offchain,
),
"dispatch a benchmark",
) {
Err(e) => {
log::error!(target: LOG_TARGET, "Benchmark {pallet}::{extrinsic} failed: {e}");
failed.push((pallet.clone(), extrinsic.clone()));
continue 'outer;
},
Ok(Err(e)) => {
log::error!(target: LOG_TARGET, "Benchmark {pallet}::{extrinsic} failed: {e}");
failed.push((pallet.clone(), extrinsic.clone()));
continue 'outer;
},
Ok(Ok(b)) => b,
};

<<<<<<< HEAD
// First we run a verification
if !self.no_verify {
let state = &state_without_tracking;
Expand Down Expand Up @@ -502,19 +576,20 @@ impl PalletCmd {
},
Ok(Ok(b)) => b,
};
=======
batches_db.extend(batch);
}
>>>>>>> 822c6f6f ([FRAME] Omni bencher run each benchmark at least 10 secs (#10794))

batches_db.extend(batch);
}
// Finally run a bunch of loops to get extrinsic timing information.
for r in 0..self.external_repeat {
// Finally run a bunch of loops to get extrinsic timing information.
let state = &state_without_tracking;
let batch = match Self::exec_state_machine::<
std::result::Result<Vec<BenchmarkBatch>, String>,
_,
_,
>(
StateMachine::new(
state, // todo remove tracking
state,
&mut Default::default(),
&executor,
"Benchmark_dispatch_benchmark",
Expand All @@ -540,29 +615,35 @@ impl PalletCmd {

batches.extend(batch);

// Show progress information
if let Ok(elapsed) = timer.elapsed() {
if elapsed >= time::Duration::from_secs(5) {
timer = time::SystemTime::now();
// Show progress information at most every 5 seconds.
if progress_timer.elapsed() >= time::Duration::from_secs(5) {
progress_timer = time::Instant::now();

log::info!(
target: LOG_TARGET,
"[{: >3} % ] Running benchmark: {pallet}::{extrinsic}({} args) {}/{} {}/{}",
(i * 100) / benchmarks_to_run.len(),
components.len(),
let msg = if first {
format!(
"{}/{}",
s + 1, // s starts at 0.
all_components.len(),
r + 1,
self.external_repeat,
);
}
all_components.len()
)
} else {
"(overtime)".to_string()
};

log::info!(
target: LOG_TARGET,
"[{: >3} % ] Running benchmark: {pallet}::{extrinsic} {msg}",
(i * 100) / benchmarks_to_run.len()
);
}
}

first = false;
if start.elapsed() >= Duration::from_secs(self.min_duration) {
break;
}
}
}

assert!(batches_db.len() == batches.len() / self.external_repeat as usize);

if !failed.is_empty() {
failed.sort();
eprintln!(
Expand Down Expand Up @@ -1019,6 +1100,10 @@ impl PalletCmd {
unreachable!("Clap should not allow both `--runtime` and `--chain` to be provided.")
}

if self.external_repeat.is_some() {
log::warn!(target: LOG_TARGET, "The `--external-repeat` argument is deprecated and will be removed in a future release.");
}

if chain_spec.is_none() && self.runtime.is_none() && self.shared_params.chain.is_none() {
return Err((
ErrorKind::MissingRequiredArgument,
Expand Down
16 changes: 11 additions & 5 deletions substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,15 +87,21 @@ pub struct PalletCmd {
#[arg(long = "high", value_delimiter = ',')]
pub highest_range_values: Vec<u32>,

/// Select how many repetitions of this benchmark should run from within the wasm.
/// Minimum number of repetitions of this benchmark should run from within the wasm.
///
/// It may run more often than this to reach its `min_duration`.
#[arg(short, long, default_value_t = 20)]
pub repeat: u32,

/// Select how many repetitions of this benchmark should run from the client.
/// DEPRECATED: Please remove usage.
#[arg(long)]
pub external_repeat: Option<u32>,

/// Minimum duration in seconds for each benchmark.
///
/// NOTE: Using this alone may give slower results, but will afford you maximum Wasm memory.
#[arg(long, default_value_t = 1)]
pub external_repeat: u32,
/// Can be set to 0 to disable the feature and solely rely on the `repeat` parameter.
#[arg(long, default_value_t = 10)]
pub min_duration: u64,

/// Print the raw results in JSON format.
#[arg(long = "json")]
Expand Down
Loading