diff --git a/Cargo.lock b/Cargo.lock index 29d6b6ed5abb9..9c2477b01573e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -20648,6 +20648,7 @@ dependencies = [ "linked-hash-map", "parity-scale-codec", "parking_lot 0.12.3", + "rstest", "sc-block-builder", "sc-client-api", "sc-transaction-pool-api", diff --git a/prdoc/pr_8596.prdoc b/prdoc/pr_8596.prdoc new file mode 100644 index 0000000000000..105e375cee14b --- /dev/null +++ b/prdoc/pr_8596.prdoc @@ -0,0 +1,8 @@ +title: '`fatxpool`: limits handling optimizations and fixes' +doc: +- audience: Node Dev + description: |- + This PR adds some optimization and fixes in handling limits in fork-aware transaction pool. +crates: +- name: sc-transaction-pool + bump: major diff --git a/substrate/client/transaction-pool/Cargo.toml b/substrate/client/transaction-pool/Cargo.toml index dea881ccef547..b51ceb45d0986 100644 --- a/substrate/client/transaction-pool/Cargo.toml +++ b/substrate/client/transaction-pool/Cargo.toml @@ -49,6 +49,7 @@ tracing = { workspace = true, default-features = true } anyhow = { workspace = true } assert_matches = { workspace = true } criterion = { workspace = true, default-features = true } +rstest = { workspace = true } sc-block-builder = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } substrate-test-runtime = { workspace = true } diff --git a/substrate/client/transaction-pool/benches/basics.rs b/substrate/client/transaction-pool/benches/basics.rs index 74dd69a8aaf8d..0672d47f70431 100644 --- a/substrate/client/transaction-pool/benches/basics.rs +++ b/substrate/client/transaction-pool/benches/basics.rs @@ -64,8 +64,9 @@ impl ChainApi for TestApi { fn validate_transaction( &self, at: ::Hash, - _source: TransactionSource, + _: TransactionSource, uxt: Arc<::Extrinsic>, + _: ValidateTransactionPriority, ) -> Self::ValidationFuture { let uxt = (*uxt).clone(); let transfer = TransferData::try_from(&uxt) diff --git a/substrate/client/transaction-pool/src/common/api.rs b/substrate/client/transaction-pool/src/common/api.rs index 14212a51a8558..b428582a01803 100644 --- a/substrate/client/transaction-pool/src/common/api.rs +++ b/substrate/client/transaction-pool/src/common/api.rs @@ -18,16 +18,13 @@ //! Chain api required for the transaction pool. -use crate::LOG_TARGET; -use codec::Encode; -use futures::{ - channel::{mpsc, oneshot}, - future::{ready, Future, FutureExt, Ready}, - lock::Mutex, - SinkExt, StreamExt, +use crate::{ + common::{sliding_stat::DurationSlidingStats, STAT_SLIDING_WINDOW}, + graph::ValidateTransactionPriority, + insert_and_log_throttled, LOG_TARGET, LOG_TARGET_STAT, }; -use std::{marker::PhantomData, pin::Pin, sync::Arc}; - +use codec::Encode; +use futures::future::{ready, Future, FutureExt, Ready}; use prometheus_endpoint::Registry as PrometheusRegistry; use sc_client_api::{blockchain::HeaderBackend, BlockBackend}; use sp_api::{ApiExt, ProvideRuntimeApi}; @@ -39,38 +36,85 @@ use sp_runtime::{ transaction_validity::{TransactionSource, TransactionValidity}, }; use sp_transaction_pool::runtime_api::TaggedTransactionQueue; +use std::{ + marker::PhantomData, + pin::Pin, + sync::Arc, + time::{Duration, Instant}, +}; +use tokio::sync::{mpsc, oneshot, Mutex}; use super::{ error::{self, Error}, metrics::{ApiMetrics, ApiMetricsExt}, }; use crate::graph; -use tracing::{trace, warn}; +use tracing::{trace, warn, Level}; /// The transaction pool logic for full client. pub struct FullChainApi { client: Arc, _marker: PhantomData, metrics: Option>, - validation_pool: mpsc::Sender + Send>>>, + validation_pool_normal: mpsc::Sender + Send>>>, + validation_pool_maintained: mpsc::Sender + Send>>>, + validate_transaction_normal_stats: DurationSlidingStats, + validate_transaction_maintained_stats: DurationSlidingStats, } /// Spawn a validation task that will be used by the transaction pool to validate transactions. fn spawn_validation_pool_task( name: &'static str, - receiver: Arc + Send>>>>>, + receiver_normal: Arc + Send>>>>>, + receiver_maintained: Arc + Send>>>>>, spawner: &impl SpawnEssentialNamed, + stats: DurationSlidingStats, + blocking_stats: DurationSlidingStats, ) { spawner.spawn_essential_blocking( name, Some("transaction-pool"), async move { loop { - let task = receiver.lock().await.next().await; - match task { - None => return, - Some(task) => task.await, - } + let start = Instant::now(); + + let task = { + let receiver_maintained = receiver_maintained.clone(); + let receiver_normal = receiver_normal.clone(); + tokio::select! { + Some(task) = async { + receiver_maintained.lock().await.recv().await + } => { task } + Some(task) = async { + receiver_normal.lock().await.recv().await + } => { task } + else => { + return + } + } + }; + + let blocking_duration = { + let start = Instant::now(); + task.await; + start.elapsed() + }; + + insert_and_log_throttled!( + Level::DEBUG, + target:LOG_TARGET_STAT, + prefix:format!("validate_transaction_inner_stats"), + stats, + start.elapsed().into() + ); + insert_and_log_throttled!( + Level::DEBUG, + target:LOG_TARGET_STAT, + prefix:format!("validate_transaction_blocking_stats"), + blocking_stats, + blocking_duration.into() + ); + trace!(target:LOG_TARGET, duration=?start.elapsed(), "spawn_validation_pool_task"); } } .boxed(), @@ -84,6 +128,9 @@ impl FullChainApi { prometheus: Option<&PrometheusRegistry>, spawner: &impl SpawnEssentialNamed, ) -> Self { + let stats = DurationSlidingStats::new(Duration::from_secs(STAT_SLIDING_WINDOW)); + let blocking_stats = DurationSlidingStats::new(Duration::from_secs(STAT_SLIDING_WINDOW)); + let metrics = prometheus.map(ApiMetrics::register).and_then(|r| match r { Err(error) => { warn!( @@ -96,13 +143,41 @@ impl FullChainApi { Ok(api) => Some(Arc::new(api)), }); - let (sender, receiver) = mpsc::channel(0); + let (sender, receiver) = mpsc::channel(1); + let (sender_maintained, receiver_maintained) = mpsc::channel(1); let receiver = Arc::new(Mutex::new(receiver)); - spawn_validation_pool_task("transaction-pool-task-0", receiver.clone(), spawner); - spawn_validation_pool_task("transaction-pool-task-1", receiver, spawner); - - FullChainApi { client, validation_pool: sender, _marker: Default::default(), metrics } + let receiver_maintained = Arc::new(Mutex::new(receiver_maintained)); + spawn_validation_pool_task( + "transaction-pool-task-0", + receiver.clone(), + receiver_maintained.clone(), + spawner, + stats.clone(), + blocking_stats.clone(), + ); + spawn_validation_pool_task( + "transaction-pool-task-1", + receiver, + receiver_maintained, + spawner, + stats.clone(), + blocking_stats.clone(), + ); + + FullChainApi { + client, + validation_pool_normal: sender, + validation_pool_maintained: sender_maintained, + _marker: Default::default(), + metrics, + validate_transaction_normal_stats: DurationSlidingStats::new(Duration::from_secs( + STAT_SLIDING_WINDOW, + )), + validate_transaction_maintained_stats: DurationSlidingStats::new(Duration::from_secs( + STAT_SLIDING_WINDOW, + )), + } } } @@ -132,10 +207,25 @@ where at: ::Hash, source: TransactionSource, uxt: graph::ExtrinsicFor, + validation_priority: ValidateTransactionPriority, ) -> Self::ValidationFuture { + let start = Instant::now(); let (tx, rx) = oneshot::channel(); let client = self.client.clone(); - let mut validation_pool = self.validation_pool.clone(); + let (stats, validation_pool, prefix) = + if validation_priority == ValidateTransactionPriority::Maintained { + ( + self.validate_transaction_maintained_stats.clone(), + self.validation_pool_maintained.clone(), + "validate_transaction_maintained_stats", + ) + } else { + ( + self.validate_transaction_normal_stats.clone(), + self.validation_pool_normal.clone(), + "validate_transaction_stats", + ) + }; let metrics = self.metrics.clone(); async move { @@ -155,10 +245,20 @@ where .map_err(|e| Error::RuntimeApi(format!("Validation pool down: {:?}", e)))?; } - match rx.await { + let validity = match rx.await { Ok(r) => r, Err(_) => Err(Error::RuntimeApi("Validation was canceled".into())), - } + }; + + insert_and_log_throttled!( + Level::DEBUG, + target:LOG_TARGET_STAT, + prefix:prefix, + stats, + start.elapsed().into() + ); + + validity } .boxed() } diff --git a/substrate/client/transaction-pool/src/common/mod.rs b/substrate/client/transaction-pool/src/common/mod.rs index d35f6c2fea0e7..7e7711682da16 100644 --- a/substrate/client/transaction-pool/src/common/mod.rs +++ b/substrate/client/transaction-pool/src/common/mod.rs @@ -22,6 +22,7 @@ pub(crate) mod api; pub(crate) mod enactment_state; pub(crate) mod error; pub(crate) mod metrics; +pub(crate) mod sliding_stat; #[cfg(test)] pub(crate) mod tests; pub(crate) mod tracing_log_xt; @@ -29,6 +30,9 @@ pub(crate) mod tracing_log_xt; use futures::StreamExt; use std::sync::Arc; +/// Stat sliding window, in seconds for per-transaction activities. +pub(crate) const STAT_SLIDING_WINDOW: u64 = 3; + /// Inform the transaction pool about imported and finalized blocks. pub async fn notification_future(client: Arc, txpool: Arc) where diff --git a/substrate/client/transaction-pool/src/common/sliding_stat.rs b/substrate/client/transaction-pool/src/common/sliding_stat.rs new file mode 100644 index 0000000000000..3ac7681b6f0cb --- /dev/null +++ b/substrate/client/transaction-pool/src/common/sliding_stat.rs @@ -0,0 +1,617 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Logging helper. Sliding window statistics with retention-based pruning. +//! +//! `SlidingStats` tracks timestamped values and computes statistical summaries +//! (min, max, average, percentiles, count) over a rolling time window. +//! +//! Old entries are automatically pruned based on a configurable retention `Duration`. +//! Values can be logged periodically using `insert_with_log` or the `insert_and_log_throttled!` +//! macro. + +use std::{ + collections::{BTreeSet, HashMap, VecDeque}, + fmt::Display, + sync::Arc, + time::{Duration, Instant}, +}; +use tokio::sync::RwLock; + +mod sealed { + pub trait HasDefaultStatFormatter {} +} + +impl sealed::HasDefaultStatFormatter for u32 {} +impl sealed::HasDefaultStatFormatter for i64 {} + +pub trait StatFormatter { + fn format_stat(value: f64) -> String; +} + +impl StatFormatter for T +where + T: Display + sealed::HasDefaultStatFormatter, +{ + fn format_stat(value: f64) -> String { + format!("{value:.2}") + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub struct StatDuration(pub std::time::Duration); + +impl Into for StatDuration { + fn into(self) -> f64 { + self.0.as_secs_f64() + } +} + +impl Into for Duration { + fn into(self) -> StatDuration { + StatDuration(self) + } +} + +impl std::fmt::Display for StatDuration { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } +} + +impl StatFormatter for StatDuration { + fn format_stat(value: f64) -> String { + format!("{:?}", Duration::from_secs_f64(value)) + } +} + +/// Sliding window statistics collector. +/// +/// `SlidingStats` maintains a rolling buffer of values with timestamps, +/// automatically pruning values older than the configured `retention` period. +/// It provides percentile queries (e.g., p50, p95), min/max, average, and count. +pub struct SlidingStats { + inner: Arc>>, +} + +/// Sync version of `SlidingStats` +pub struct SyncSlidingStats { + inner: Arc>>, +} + +/// A type alias for `SlidingStats` specialized for durations with human-readable formatting. +/// +/// Wraps `std::time::Duration` values using `StatDuration`, allowing for statistical summaries +/// (e.g. p50, p95, average) to be displayed in units like nanoseconds, milliseconds, or seconds. +pub type DurationSlidingStats = SlidingStats; + +/// Sync version of `DurationSlidingStats` +pub type SyncDurationSlidingStats = SyncSlidingStats; + +/// Internal state of the statistics buffer. +pub struct Inner { + /// How long to retain items after insertion. + retention: Duration, + + /// Counter to assign unique ids to each entry. + next_id: usize, + + /// Maps id to actual value + timestamp. + entries: HashMap>, + + /// Queue of IDs in insertion order for expiration. + by_time: VecDeque, + + /// Set of values with ids, ordered by value. + by_value: BTreeSet<(T, usize)>, + + /// The time stamp of most recent insertion with log. + /// + /// Used to throttle debug messages. + last_log: Option, +} + +impl Default for Inner { + fn default() -> Self { + Self { + retention: Default::default(), + next_id: Default::default(), + entries: Default::default(), + by_time: Default::default(), + by_value: Default::default(), + last_log: None, + } + } +} + +impl Display for Inner +where + T: Ord + Copy + Into + std::fmt::Display + StatFormatter, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut parts = Vec::new(); + + parts.push(format!("count={}", self.count())); + if let Some(min) = self.min() { + parts.push(format!("min={}", min)); + } + if let Some(max) = self.max() { + parts.push(format!("max={}", max)); + } + if let Some(avg) = self.avg() { + parts.push(format!("avg={}", ::format_stat(avg))); + } + + for p in [50, 90, 95, 99] { + let val = self.percentile(p); + if val.is_finite() { + parts.push(format!("p{}={}", p, ::format_stat(val))); + } + } + parts.push(format!("span={:?}", self.retention)); + write!(f, "{}", parts.join(", ")) + } +} + +/// A value inserted into the buffer, along with its insertion time. +#[derive(Clone, Copy)] +struct Entry { + timestamp: Instant, + value: T, +} + +impl SlidingStats +where + T: Ord + Copy, +{ + /// Creates a new `SlidingStats` with the given retention duration. + pub fn new(retention: Duration) -> Self { + Self { inner: Arc::new(RwLock::new(Inner { retention, ..Default::default() })) } + } + + /// Inserts a value into the buffer, timestamped with `Instant::now()`. + /// + /// May trigger pruning of old items. + #[cfg(test)] + pub async fn insert(&self, value: T) { + self.inner.write().await.insert(value) + } + + /// Inserts a value into the buffer with provided timestamp. + /// + /// May trigger pruning of old items. + #[cfg(test)] + pub async fn insert_using_timestamp(&self, value: T, now: Instant) { + self.inner.write().await.insert_using_timestamp(value, now) + } + + #[cfg(test)] + pub async fn len(&self) -> usize { + self.inner.read().await.len() + } + + /// Grants temporary read-only access to the locked inner structure, + /// passing it into the provided closure. + /// + /// Intended to dump stats and prune inner based on current timestamp. + #[cfg(test)] + pub async fn with_inner(&self, f: impl FnOnce(&mut Inner) -> R) -> R { + let mut guard = self.inner.write().await; + f(&mut *guard) + } +} + +impl SyncSlidingStats +where + T: Ord + Copy, +{ + /// Creates a new `SlidingStats` with the given retention duration. + pub fn new(retention: Duration) -> Self { + Self { + inner: Arc::new(parking_lot::RwLock::new(Inner { retention, ..Default::default() })), + } + } +} + +impl SlidingStats +where + T: Ord + Copy + Into + std::fmt::Display + StatFormatter, +{ + /// Inserts a value and optionally returns a formatted log string of the current stats. + /// + /// If enough time has passed since the last log (determined by `log_interval` or retention), + /// this method returns `Some(log_string)`, otherwise it returns `None`. + /// + /// This method performs: + /// - Automatic pruning of expired entries + /// - Throttling via `last_log` timestamp + /// + /// Note: The newly inserted value may not be included in the returned summary. + pub async fn insert_with_log( + &self, + value: T, + log_interval: Option, + now: Instant, + ) -> Option { + let mut inner = self.inner.write().await; + inner.insert_with_log(value, log_interval, now) + } +} + +impl SyncSlidingStats +where + T: Ord + Copy + Into + std::fmt::Display + StatFormatter, +{ + pub fn insert_with_log( + &self, + value: T, + log_interval: Option, + now: Instant, + ) -> Option { + let mut inner = self.inner.write(); + inner.insert_with_log(value, log_interval, now) + } +} + +impl Inner +where + T: Ord + Copy, +{ + #[cfg(test)] + fn insert(&mut self, value: T) { + self.insert_using_timestamp(value, Instant::now()) + } + + /// Refer to [`SlidingStats::insert_using_timestamp`] + fn insert_using_timestamp(&mut self, value: T, now: Instant) { + let id = self.next_id; + self.next_id += 1; + + let entry = Entry { timestamp: now, value }; + + self.entries.insert(id, entry); + self.by_time.push_back(id); + self.by_value.insert((value, id)); + + self.prune(now); + } + + /// Returns the minimum value in the current window. + pub fn min(&self) -> Option { + self.by_value.first().map(|(v, _)| *v) + } + + /// Returns the maximum value in the current window. + pub fn max(&self) -> Option { + self.by_value.last().map(|(v, _)| *v) + } + + /// Returns the number of items currently retained. + pub fn count(&self) -> usize { + self.len() + } + + /// Explicitly prunes expired items from the buffer. + /// + /// This is also called automatically during insertions. + pub fn prune(&mut self, now: Instant) { + let cutoff = now - self.retention; + + while let Some(&oldest_id) = self.by_time.front() { + let expired = match self.entries.get(&oldest_id) { + Some(entry) => entry.timestamp < cutoff, + None => { + debug_assert!(false); + true + }, + }; + + if !expired { + break; + } + + if let Some(entry) = self.entries.remove(&oldest_id) { + self.by_value.remove(&(entry.value, oldest_id)); + } else { + debug_assert!(false); + } + self.by_time.pop_front(); + } + } + + pub fn len(&self) -> usize { + debug_assert_eq!(self.entries.len(), self.by_time.len()); + debug_assert_eq!(self.entries.len(), self.by_value.len()); + self.entries.len() + } +} + +impl Inner +where + T: Ord + Copy + Into, +{ + /// Returns the average (mean) of values in the current window. + pub fn avg(&self) -> Option { + let len = self.len(); + if len == 0 { + None + } else { + Some(self.entries.values().map(|e| e.value.into()).sum::() / len as f64) + } + } + + /// Returns the value at the given percentile (e.g., 0.5 for p50). + /// + /// Returns `None` if the buffer is empty. + // note: copied from: https://docs.rs/statrs/0.18.0/src/statrs/statistics/slice_statistics.rs.html#164-182 + pub fn percentile(&self, percentile: usize) -> f64 { + if self.len() == 0 || percentile > 100 { + return f64::NAN; + } + + let tau = percentile as f64 / 100.0; + let len = self.len(); + + let h = (len as f64 + 1.0 / 3.0) * tau + 1.0 / 3.0; + let hf = h as i64; + + if hf <= 0 || percentile == 0 { + return self.min().map(|v| v.into()).unwrap_or(f64::NAN); + } + + if hf >= len as i64 || percentile == 100 { + return self.max().map(|v| v.into()).unwrap_or(f64::NAN); + } + + let mut iter = self.by_value.iter().map(|(v, _)| (*v).into()); + + let a = iter.nth((hf as usize).saturating_sub(1)).unwrap_or(f64::NAN); + let b = iter.next().unwrap_or(f64::NAN); + + a + (h - hf as f64) * (b - a) + } +} + +impl Inner +where + T: Ord + Copy + Into + std::fmt::Display + StatFormatter, +{ + /// Refer to [`SlidingStats::insert_with_log`] + pub fn insert_with_log( + &mut self, + value: T, + log_interval: Option, + now: Instant, + ) -> Option { + let Some(last_log) = self.last_log else { + self.last_log = Some(now); + self.insert_using_timestamp(value, now); + return None; + }; + + let log_interval = log_interval.unwrap_or(self.retention); + let should_log = now.duration_since(last_log) >= log_interval; + let result = should_log.then(|| { + self.last_log = Some(now); + format!("{self}") + }); + self.insert_using_timestamp(value, now); + result + } +} + +impl Clone for SlidingStats { + fn clone(&self) -> Self { + Self { inner: Arc::clone(&self.inner) } + } +} + +impl Clone for SyncSlidingStats { + fn clone(&self) -> Self { + Self { inner: Arc::clone(&self.inner) } + } +} + +/// Inserts a value into a `SlidingStats` and conditionally logs the current stats using `tracing`. +/// +/// This macro inserts the given `$value` into the `$stats` collector only if tracing is enabled +/// for the given `$target` and `$level`. The log will be emiited only if enough time has passed +/// since the last logged output (as tracked by the internal last_log timestamp). +/// +/// The macro respects throttling: stats will not be logged more frequently than either the +/// explicitly provided `log_interval` or the stats' retention period (if no interval is given). +/// +/// Note that: +/// - Logging is skipped unless `tracing::enabled!` returns true for the target and level. +/// - All entries older than the retention period will be logged and pruned, +/// - The newly inserted value may not be included in the logged statistics output (it is inserted +/// *after* the log decision). +#[macro_export] +macro_rules! insert_and_log_throttled { + ( + $level:expr, + target: $target:expr, + log_interval: $log_interval:expr, + prefix: $prefix:expr, + $stats:expr, + $value:expr + ) => {{ + if tracing::enabled!(target: $target, $level) { + let now = Instant::now(); + if let Some(msg) = $stats.insert_with_log($value, Some($log_interval), now).await { + tracing::event!(target: $target, $level, "{}: {}", $prefix, msg); + } + } + }}; + + ( + $level:expr, + target: $target:expr, + prefix: $prefix:expr, + $stats:expr, + $value:expr + ) => {{ + if tracing::enabled!(target: $target, $level) { + let now = std::time::Instant::now(); + if let Some(msg) = $stats.insert_with_log($value, None, now).await { + tracing::event!(target: $target, $level, "{}: {}", $prefix, msg); + } + } + }}; +} + +/// Sync version of `insert_and_log_throttled` +#[macro_export] +macro_rules! insert_and_log_throttled_sync { + ( + $level:expr, + target: $target:literal, + prefix: $prefix:expr, + $stats:expr, + $value:expr + ) => {{ + if tracing::enabled!(target: $target, $level) { + let now = std::time::Instant::now(); + if let Some(msg) = $stats.insert_with_log($value, None, now){ + tracing::event!(target: $target, $level, "{}: {}", $prefix, msg); + } + } + }}; +} + +#[cfg(test)] +mod test { + use super::*; + use std::time::{Duration, Instant}; + + #[tokio::test] + async fn retention_prunes_old_items() { + let stats = SlidingStats::::new(Duration::from_secs(10)); + + let base = Instant::now(); + for i in 0..5 { + stats.insert_using_timestamp(i * 10, base + Duration::from_secs(i * 5)).await; + } + assert_eq!(stats.len().await, 3); + + stats.insert_using_timestamp(999, base + Duration::from_secs(26)).await; + + assert_eq!(stats.len().await, 2); + } + + #[tokio::test] + async fn retention_prunes_old_items2() { + let stats = SlidingStats::::new(Duration::from_secs(10)); + + let base = Instant::now(); + for i in 0..100 { + stats.insert_using_timestamp(i * 10, base + Duration::from_secs(5)).await; + } + assert_eq!(stats.len().await, 100); + + stats.insert_using_timestamp(999, base + Duration::from_secs(16)).await; + + let len = stats.len().await; + assert_eq!(len, 1); + } + + #[tokio::test] + async fn insert_with_log_message_contains_all_old_items() { + let stats = SlidingStats::::new(Duration::from_secs(100)); + + let base = Instant::now(); + for _ in 0..10 { + stats.insert_with_log(1, None, base + Duration::from_secs(5)).await; + } + assert_eq!(stats.len().await, 10); + + let output = stats.insert_with_log(1, None, base + Duration::from_secs(200)).await.unwrap(); + assert!(output.contains("count=10")); + + let len = stats.len().await; + assert_eq!(len, 1); + } + + #[tokio::test] + async fn insert_with_log_message_prunes_all_old_items() { + let stats = SlidingStats::::new(Duration::from_secs(25)); + + let base = Instant::now(); + for i in 0..10 { + stats.insert_with_log(1, None, base + Duration::from_secs(i * 5)).await; + } + assert_eq!(stats.len().await, 6); + + let output = stats.insert_with_log(1, None, base + Duration::from_secs(200)).await.unwrap(); + assert!(output.contains("count=6")); + + let len = stats.len().await; + assert_eq!(len, 1); + } + + #[tokio::test] + async fn test_avg_min_max() { + let stats = SlidingStats::::new(Duration::from_secs(100)); + let base = Instant::now(); + + stats.insert_using_timestamp(10, base).await; + stats.insert_using_timestamp(20, base + Duration::from_secs(1)).await; + stats.insert_using_timestamp(30, base + Duration::from_secs(2)).await; + + stats + .with_inner(|inner| { + assert_eq!(inner.count(), 3); + assert_eq!(inner.avg(), Some(20.0)); + assert_eq!(inner.min(), Some(10)); + assert_eq!(inner.max(), Some(30)); + }) + .await; + } + + #[tokio::test] + async fn duration_format() { + let stats = SlidingStats::::new(Duration::from_secs(100)); + stats.insert(Duration::from_nanos(100).into()).await; + let output = stats.with_inner(|i| format!("{i}")).await; + assert!(output.contains("max=100ns")); + + let stats = SlidingStats::::new(Duration::from_secs(100)); + stats.insert(Duration::from_micros(100).into()).await; + let output = stats.with_inner(|i| format!("{i}")).await; + assert!(output.contains("max=100µs")); + + let stats = SlidingStats::::new(Duration::from_secs(100)); + stats.insert(Duration::from_millis(100).into()).await; + let output = stats.with_inner(|i| format!("{i}")).await; + assert!(output.contains("max=100ms")); + + let stats = SlidingStats::::new(Duration::from_secs(100)); + stats.insert(Duration::from_secs(100).into()).await; + let output = stats.with_inner(|i| format!("{i}")).await; + assert!(output.contains("max=100s")); + + let stats = SlidingStats::::new(Duration::from_secs(100)); + stats.insert(Duration::from_nanos(100).into()).await; + stats.insert(Duration::from_micros(100).into()).await; + stats.insert(Duration::from_millis(100).into()).await; + stats.insert(Duration::from_secs(100).into()).await; + let output = stats.with_inner(|i| format!("{i}")).await; + println!("{output}"); + assert_eq!(output, "count=4, min=100ns, max=100s, avg=25.025025025s, p50=50.05ms, p90=100s, p95=100s, p99=100s, span=100s"); + } +} diff --git a/substrate/client/transaction-pool/src/common/tests.rs b/substrate/client/transaction-pool/src/common/tests.rs index c391beb21b07f..71caf005333de 100644 --- a/substrate/client/transaction-pool/src/common/tests.rs +++ b/substrate/client/transaction-pool/src/common/tests.rs @@ -18,7 +18,10 @@ //! Testing related primitives for internal usage in this crate. -use crate::graph::{BlockHash, ChainApi, ExtrinsicFor, NumberFor, RawExtrinsicFor}; +use crate::{ + graph::{BlockHash, ChainApi, ExtrinsicFor, NumberFor, RawExtrinsicFor}, + ValidateTransactionPriority, +}; use codec::Encode; use parking_lot::Mutex; use sc_transaction_pool_api::error; @@ -78,6 +81,7 @@ impl ChainApi for TestApi { at: ::Hash, _source: TransactionSource, uxt: ExtrinsicFor, + _: ValidateTransactionPriority, ) -> Self::ValidationFuture { let uxt = (*uxt).clone(); self.validation_requests.lock().push(uxt.clone()); diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index 3553465668e31..cc655db91f0f9 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -23,13 +23,17 @@ use super::{ import_notification_sink::MultiViewImportNotificationSink, metrics::{EventsMetricsCollector, MetricsLink as PrometheusMetrics}, multi_view_listener::MultiViewListener, - tx_mem_pool::{InsertionInfo, TxMemPool, TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER}, + tx_mem_pool::{InsertionInfo, TxMemPool}, view::View, view_store::ViewStore, }; use crate::{ api::FullChainApi, - common::tracing_log_xt::{log_xt_debug, log_xt_trace}, + common::{ + sliding_stat::DurationSlidingStats, + tracing_log_xt::{log_xt_debug, log_xt_trace}, + STAT_SLIDING_WINDOW, + }, enactment_state::{EnactmentAction, EnactmentState}, fork_aware_txpool::{ dropped_watcher::{DroppedReason, DroppedTransaction}, @@ -40,7 +44,8 @@ use crate::{ base_pool::{TimedTransactionSource, Transaction}, BlockHash, ExtrinsicFor, ExtrinsicHash, IsValidator, Options, RawExtrinsicFor, }, - ReadyIteratorFor, LOG_TARGET, + insert_and_log_throttled, ReadyIteratorFor, ValidateTransactionPriority, LOG_TARGET, + LOG_TARGET_STAT, }; use async_trait::async_trait; use futures::{ @@ -53,8 +58,8 @@ use parking_lot::Mutex; use prometheus_endpoint::Registry as PrometheusRegistry; use sc_transaction_pool_api::{ error::Error as TxPoolApiError, ChainEvent, ImportNotificationStream, - MaintainedTransactionPool, PoolStatus, TransactionFor, TransactionPool, TransactionPriority, - TransactionSource, TransactionStatusStreamFor, TxHash, TxInvalidityReportMap, + MaintainedTransactionPool, PoolStatus, TransactionFor, TransactionPool, TransactionSource, + TransactionStatusStreamFor, TxHash, TxInvalidityReportMap, }; use sp_blockchain::{HashAndNumber, TreeRoute}; use sp_core::traits::SpawnEssentialNamed; @@ -68,16 +73,22 @@ use std::{ collections::{BTreeMap, HashMap, HashSet}, pin::Pin, sync::Arc, - time::Instant, + time::{Duration, Instant}, }; use tokio::select; -use tracing::{debug, info, trace, warn}; +use tracing::{debug, info, instrument, trace, warn, Level}; /// The maximum block height difference before considering a view or transaction as timed-out /// due to a finality stall. When the difference exceeds this threshold, elements are treated /// as stale and are subject to cleanup. const FINALITY_TIMEOUT_THRESHOLD: usize = 128; +/// The number of transactions that will be sent from the mempool to the newly created view during +/// the maintain process. +//todo [#8835]: better approach is needed - maybe time-budget approach? +//note: yap parachain block size. +const MEMPOOL_TO_VIEW_BATCH_SIZE: usize = 7_000; + /// Fork aware transaction pool task, that needs to be polled. pub type ForkAwareTxPoolTask = Pin + Send>>; @@ -112,10 +123,10 @@ where /// /// `ready_iterator` is a closure that generates the result data to be sent to the pollers. fn trigger(&mut self, at: Block::Hash, ready_iterator: impl Fn() -> T) { - trace!(target: LOG_TARGET, ?at, keys = ?self.pollers.keys(), "fatp::trigger"); + debug!(target: LOG_TARGET, ?at, keys = ?self.pollers.keys(), "fatp::trigger"); let Some(pollers) = self.pollers.remove(&at) else { return }; pollers.into_iter().for_each(|p| { - debug!(target: LOG_TARGET, "trigger ready signal at block {}", at); + debug!(target: LOG_TARGET, "fatp::trigger trigger ready signal at block {}", at); let _ = p.send(ready_iterator()); }); } @@ -183,6 +194,12 @@ where /// Intended to be used in the finality stall cleanups and also as a cache for all in-block /// transactions. included_transactions: Mutex, Vec>>>, + + /// Stats for submit call durations + submit_stats: DurationSlidingStats, + + /// Stats for submit_and_watch call durations + submit_and_watch_stats: DurationSlidingStats, } impl ForkAwareTxPool @@ -198,7 +215,7 @@ where best_block_hash: Block::Hash, finalized_hash: Block::Hash, finality_timeout_threshold: Option, - ) -> (Self, ForkAwareTxPoolTask) { + ) -> (Self, [ForkAwareTxPoolTask; 2]) { Self::new_test_with_limits( pool_api, best_block_hash, @@ -220,20 +237,21 @@ where future_limits: crate::PoolLimit, mempool_max_transactions_count: usize, finality_timeout_threshold: Option, - ) -> (Self, ForkAwareTxPoolTask) { + ) -> (Self, [ForkAwareTxPoolTask; 2]) { let (listener, listener_task) = MultiViewListener::new_with_worker(Default::default()); let listener = Arc::new(listener); let (import_notification_sink, import_notification_sink_task) = MultiViewImportNotificationSink::new_with_worker(); - let mempool = Arc::from(TxMemPool::new( + let (mempool, mempool_task) = TxMemPool::new( pool_api.clone(), listener.clone(), Default::default(), mempool_max_transactions_count, ready_limits.total_bytes + future_limits.total_bytes, - )); + ); + let mempool = Arc::from(mempool); let (dropped_stream_controller, dropped_stream) = MultiViewDroppedWatcherController::::new(); @@ -278,8 +296,12 @@ where finality_timeout_threshold: finality_timeout_threshold .unwrap_or(FINALITY_TIMEOUT_THRESHOLD), included_transactions: Default::default(), + submit_stats: DurationSlidingStats::new(Duration::from_secs(STAT_SLIDING_WINDOW)), + submit_and_watch_stats: DurationSlidingStats::new(Duration::from_secs( + STAT_SLIDING_WINDOW, + )), }, - combined_tasks, + [combined_tasks, mempool_task], ) } @@ -299,11 +321,13 @@ where ExtrinsicHash, >, ) { + let dropped_stats = DurationSlidingStats::new(Duration::from_secs(STAT_SLIDING_WINDOW)); loop { let Some(dropped) = dropped_stream.next().await else { debug!(target: LOG_TARGET, "fatp::dropped_monitor_task: terminated..."); break; }; + let start = Instant::now(); let tx_hash = dropped.tx_hash; trace!( target: LOG_TARGET, @@ -313,7 +337,7 @@ where ); match dropped.reason { DroppedReason::Usurped(new_tx_hash) => { - if let Some(new_tx) = mempool.get_by_hash(new_tx_hash) { + if let Some(new_tx) = mempool.get_by_hash(new_tx_hash).await { view_store.replace_transaction(new_tx.source(), new_tx.tx(), tx_hash).await; } else { trace!( @@ -328,9 +352,16 @@ where }, }; - mempool.remove_transactions(&[tx_hash]); + mempool.remove_transactions(&[tx_hash]).await; import_notification_sink.clean_notified_items(&[tx_hash]); view_store.listener.transaction_dropped(dropped); + insert_and_log_throttled!( + Level::DEBUG, + target:LOG_TARGET_STAT, + prefix:"dropped_stats", + dropped_stats, + start.elapsed().into() + ); } } @@ -361,13 +392,14 @@ where let (import_notification_sink, import_notification_sink_task) = MultiViewImportNotificationSink::new_with_worker(); - let mempool = Arc::from(TxMemPool::new( + let (mempool, blocking_mempool_task) = TxMemPool::new( pool_api.clone(), listener.clone(), metrics.clone(), - TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER * options.total_count(), + options.total_count(), options.ready.total_bytes + options.future.total_bytes, - )); + ); + let mempool = Arc::from(mempool); let (dropped_stream_controller, dropped_stream) = MultiViewDroppedWatcherController::::new(); @@ -393,6 +425,11 @@ where } .boxed(); spawner.spawn_essential("txpool-background", Some("transaction-pool"), combined_tasks); + spawner.spawn_essential_blocking( + "txpool-background", + Some("transaction-pool"), + blocking_mempool_task, + ); Self { mempool, @@ -411,6 +448,10 @@ where is_validator, finality_timeout_threshold: FINALITY_TIMEOUT_THRESHOLD, included_transactions: Default::default(), + submit_stats: DurationSlidingStats::new(Duration::from_secs(STAT_SLIDING_WINDOW)), + submit_and_watch_stats: DurationSlidingStats::new(Duration::from_secs( + STAT_SLIDING_WINDOW, + )), } } @@ -455,8 +496,8 @@ where /// Returns a number of unwatched and watched transactions in internal mempool. /// /// Intended for use in unit tests. - pub fn mempool_len(&self) -> (usize, usize) { - self.mempool.unwatched_and_watched_count() + pub async fn mempool_len(&self) -> (usize, usize) { + self.mempool.unwatched_and_watched_count().await } /// Returns a set of future transactions for given block hash. @@ -484,7 +525,7 @@ where pub async fn ready_at_light(&self, at: Block::Hash) -> ReadyIteratorFor { let start = Instant::now(); let api = self.api.clone(); - trace!( + debug!( target: LOG_TARGET, ?at, "fatp::ready_at_light" @@ -546,19 +587,20 @@ where to_be_removed = all_extrinsics.len(), after_count, duration = ?start.elapsed(), - "fatp::ready_at_light" + "fatp::ready_at_light -> light" ); Box::new(tmp_view.pool.validated_pool().ready()) - } else if let Some((most_recent_view, _)) = self - .view_store - .most_recent_view - .read() - .and_then(|at| self.view_store.get_view_at(at, true)) - { + } else if let Some(most_recent_view) = self.view_store.most_recent_view.read().clone() { // Fallback for the case when `at` is not on the already known fork. // Falls back to the most recent view, which may include txs which // are invalid or already included in the blocks but can still yield a // partially valid ready set, which is still better than including nothing. + debug!( + target: LOG_TARGET, + ?at, + duration = ?start.elapsed(), + "fatp::ready_at_light -> most_recent_view" + ); Box::new(most_recent_view.pool.validated_pool().ready()) } else { let empty: ReadyIteratorFor = Box::new(std::iter::empty()); @@ -658,6 +700,143 @@ where ); (false, pending) } + + /// Refer to [`Self::submit_and_watch`] + async fn submit_and_watch_inner( + &self, + at: Block::Hash, + source: TransactionSource, + xt: TransactionFor, + ) -> Result>>, ChainApi::Error> { + let xt = Arc::from(xt); + + let insertion = match self.mempool.push_watched(source, xt.clone()).await { + Ok(result) => result, + Err(TxPoolApiError::ImmediatelyDropped) => + self.attempt_transaction_replacement(source, true, xt.clone()).await?, + Err(e) => return Err(e.into()), + }; + + self.metrics.report(|metrics| metrics.submitted_transactions.inc()); + self.events_metrics_collector.report_submitted(&insertion); + + match self.view_store.submit_and_watch(at, insertion.source, xt).await { + Err(e) => { + self.mempool.remove_transactions(&[insertion.hash]).await; + Err(e.into()) + }, + Ok(mut outcome) => { + self.mempool + .update_transaction_priority(outcome.hash(), outcome.priority()) + .await; + Ok(outcome.expect_watcher()) + }, + } + } + + /// Refer to [`Self::submit_at`] + async fn submit_at_inner( + &self, + source: TransactionSource, + xts: Vec>, + ) -> Result, ChainApi::Error>>, ChainApi::Error> { + let view_store = self.view_store.clone(); + trace!( + target: LOG_TARGET, + count = xts.len(), + active_views_count = self.active_views_count(), + "fatp::submit_at" + ); + log_xt_trace!(target: LOG_TARGET, xts.iter().map(|xt| self.tx_hash(xt)), "fatp::submit_at"); + let xts = xts.into_iter().map(Arc::from).collect::>(); + let mempool_results = self.mempool.extend_unwatched(source, &xts).await; + + if view_store.is_empty() { + return Ok(mempool_results + .into_iter() + .map(|r| r.map(|r| r.hash).map_err(Into::into)) + .collect::>()) + } + + // Submit all the transactions to the mempool + let retries = mempool_results + .into_iter() + .zip(xts.clone()) + .map(|(result, xt)| async move { + match result { + Err(TxPoolApiError::ImmediatelyDropped) => + self.attempt_transaction_replacement(source, false, xt).await, + _ => result, + } + }) + .collect::>(); + + let mempool_results = futures::future::join_all(retries).await; + + // Collect transactions that were successfully submitted to the mempool... + let to_be_submitted = mempool_results + .iter() + .zip(xts) + .filter_map(|(result, xt)| { + result.as_ref().ok().map(|insertion| { + self.events_metrics_collector.report_submitted(&insertion); + (insertion.source.clone(), xt) + }) + }) + .collect::>(); + + self.metrics + .report(|metrics| metrics.submitted_transactions.inc_by(to_be_submitted.len() as _)); + + // ... and submit them to the view_store. Please note that transactions rejected by mempool + // are not sent here. + let mempool = self.mempool.clone(); + let results_map = view_store.submit(to_be_submitted.into_iter()).await; + let mut submission_results = reduce_multiview_result(results_map).into_iter(); + + // Note for composing final result: + // + // For each failed insertion into the mempool, the mempool result should be placed into + // the returned vector. + // + // For each successful insertion into the mempool, the corresponding + // view_store submission result needs to be examined (merged_results): + // - If there is an error during view_store submission, the transaction is removed from + // the mempool, and the final result recorded in the vector for this transaction is the + // view_store submission error. + // + // - If the view_store submission is successful, the transaction priority is updated in the + // mempool. + // + // Finally, it collects the hashes of updated transactions or submission errors (either + // from the mempool or view_store) into a returned vector (final_results). + const RESULTS_ASSUMPTION : &str = + "The number of Ok results in mempool is exactly the same as the size of view_store submission result. qed."; + let merged_results = mempool_results.into_iter().map(|result| { + result.map_err(Into::into).and_then(|insertion| { + Ok((insertion.hash, submission_results.next().expect(RESULTS_ASSUMPTION))) + }) + }); + + let mut final_results = vec![]; + for r in merged_results { + match r { + Ok((hash, submission_result)) => match submission_result { + Ok(r) => { + mempool.update_transaction_priority(r.hash(), r.priority()).await; + final_results.push(Ok(r.hash())); + }, + Err(e) => { + mempool.remove_transactions(&[hash]).await; + final_results.push(Err(e)); + }, + }, + Err(e) => final_results.push(Err(e)), + } + } + + Ok(final_results) + } } /// Converts the input view-to-statuses map into the output vector of statuses. @@ -731,94 +910,25 @@ where source: TransactionSource, xts: Vec>, ) -> Result, Self::Error>>, Self::Error> { - let view_store = self.view_store.clone(); - debug!( + let start = Instant::now(); + trace!( target: LOG_TARGET, count = xts.len(), active_views_count = self.active_views_count(), "fatp::submit_at" ); log_xt_trace!(target: LOG_TARGET, xts.iter().map(|xt| self.tx_hash(xt)), "fatp::submit_at"); - let xts = xts.into_iter().map(Arc::from).collect::>(); - let mempool_results = self.mempool.extend_unwatched(source, &xts); - if view_store.is_empty() { - return Ok(mempool_results - .into_iter() - .map(|r| r.map(|r| r.hash).map_err(Into::into)) - .collect::>()) - } + let result = self.submit_at_inner(source, xts).await; - // Submit all the transactions to the mempool - let retries = mempool_results - .into_iter() - .zip(xts.clone()) - .map(|(result, xt)| async move { - match result { - Err(TxPoolApiError::ImmediatelyDropped) => - self.attempt_transaction_replacement(source, false, xt).await, - _ => result, - } - }) - .collect::>(); - - let mempool_results = futures::future::join_all(retries).await; - - // Collect transactions that were successfully submitted to the mempool... - let to_be_submitted = mempool_results - .iter() - .zip(xts) - .filter_map(|(result, xt)| { - result.as_ref().ok().map(|insertion| { - self.events_metrics_collector.report_submitted(&insertion); - (insertion.source.clone(), xt) - }) - }) - .collect::>(); - - self.metrics - .report(|metrics| metrics.submitted_transactions.inc_by(to_be_submitted.len() as _)); - - // ... and submit them to the view_store. Please note that transactions rejected by mempool - // are not sent here. - let mempool = self.mempool.clone(); - let results_map = view_store.submit(to_be_submitted.into_iter()).await; - let mut submission_results = reduce_multiview_result(results_map).into_iter(); - - // Note for composing final result: - // - // For each failed insertion into the mempool, the mempool result should be placed into - // the returned vector. - // - // For each successful insertion into the mempool, the corresponding - // view_store submission result needs to be examined: - // - If there is an error during view_store submission, the transaction is removed from - // the mempool, and the final result recorded in the vector for this transaction is the - // view_store submission error. - // - // - If the view_store submission is successful, the transaction priority is updated in the - // mempool. - // - // Finally, it collects the hashes of updated transactions or submission errors (either - // from the mempool or view_store) into a returned vector. - const RESULTS_ASSUMPTION : &str = - "The number of Ok results in mempool is exactly the same as the size of view_store submission result. qed."; - Ok(mempool_results - .into_iter() - .map(|result| { - result.map_err(Into::into).and_then(|insertion| { - submission_results.next().expect(RESULTS_ASSUMPTION).inspect_err(|_| { - mempool.remove_transactions(&[insertion.hash]); - }) - }) - }) - .map(|r| { - r.map(|r| { - mempool.update_transaction_priority(&r); - r.hash() - }) - }) - .collect::>()) + insert_and_log_throttled!( + Level::DEBUG, + target:LOG_TARGET_STAT, + prefix:"submit_stats", + self.submit_stats, + start.elapsed().into() + ); + result } /// Submits a single transaction and returns a future resolving to the submission results. @@ -847,40 +957,29 @@ where /// status updates. /// /// Actual transaction submission process is delegated to the `ViewStore` internal instance. + #[instrument(level = Level::TRACE, skip_all, target = "txpool", name = "fatp::submit_and_watch")] async fn submit_and_watch( &self, at: ::Hash, source: TransactionSource, xt: TransactionFor, ) -> Result>>, Self::Error> { + let start = Instant::now(); trace!( target: LOG_TARGET, tx_hash = ?self.tx_hash(&xt), views = self.active_views_count(), "fatp::submit_and_watch" ); - let xt = Arc::from(xt); - - let insertion = match self.mempool.push_watched(source, xt.clone()) { - Ok(result) => result, - Err(TxPoolApiError::ImmediatelyDropped) => - self.attempt_transaction_replacement(source, true, xt.clone()).await?, - Err(e) => return Err(e.into()), - }; - - self.metrics.report(|metrics| metrics.submitted_transactions.inc()); - self.events_metrics_collector.report_submitted(&insertion); - - self.view_store - .submit_and_watch(at, insertion.source, xt) - .await - .inspect_err(|_| { - self.mempool.remove_transactions(&[insertion.hash]); - }) - .map(|mut outcome| { - self.mempool.update_transaction_priority(&outcome); - outcome.expect_watcher() - }) + let result = self.submit_and_watch_inner(at, source, xt).await; + insert_and_log_throttled!( + Level::DEBUG, + target:LOG_TARGET_STAT, + prefix:"submit_and_watch_stats", + self.submit_and_watch_stats, + start.elapsed().into() + ); + result } /// Reports invalid transactions to the transaction pool. @@ -904,7 +1003,7 @@ where let removed = self.view_store.report_invalid(at, invalid_tx_errors); let removed_hashes = removed.iter().map(|tx| tx.hash).collect::>(); - self.mempool.remove_transactions(&removed_hashes); + self.mempool.clone().remove_transactions_sync(removed_hashes.clone()); self.import_notification_sink.clean_notified_items(&removed_hashes); self.metrics @@ -924,7 +1023,8 @@ where self.view_store .most_recent_view .read() - .map(|hash| self.view_store.status()[&hash].clone()) + .as_ref() + .map(|v| v.status()) .unwrap_or(PoolStatus { ready: 0, ready_bytes: 0, future: 0, future_bytes: 0 }) } @@ -952,15 +1052,15 @@ where /// block (for which maintain process was accomplished). // todo [#5491]: api change: we probably should have at here? fn ready_transaction(&self, tx_hash: &TxHash) -> Option> { - let most_recent_view = self.view_store.most_recent_view.read(); - let result = most_recent_view - .map(|block_hash| self.view_store.ready_transaction(block_hash, tx_hash)) - .flatten(); + let most_recent_view_hash = + self.view_store.most_recent_view.read().as_ref().map(|v| v.at.hash); + let result = most_recent_view_hash + .and_then(|block_hash| self.view_store.ready_transaction(block_hash, tx_hash)); trace!( target: LOG_TARGET, ?tx_hash, is_ready = result.is_some(), - ?most_recent_view, + most_recent_view = ?most_recent_view_hash, "ready_transaction" ); result @@ -1017,15 +1117,19 @@ where _at: Block::Hash, xt: sc_transaction_pool_api::LocalTransactionFor, ) -> Result { - debug!( + trace!( target: LOG_TARGET, active_views_count = self.active_views_count(), "fatp::submit_local" ); let xt = Arc::from(xt); - let result = - self.mempool.extend_unwatched(TransactionSource::Local, &[xt.clone()]).remove(0); + // note: would be nice to get rid of sync methods one day. See: #8912 + let result = self + .mempool + .clone() + .extend_unwatched_sync(TransactionSource::Local, vec![xt.clone()]) + .remove(0); let insertion = match result { Err(TxPoolApiError::ImmediatelyDropped) => self.attempt_transaction_replacement_sync( @@ -1039,10 +1143,12 @@ where self.view_store .submit_local(xt) .inspect_err(|_| { - self.mempool.remove_transactions(&[insertion.hash]); + self.mempool.clone().remove_transactions_sync(vec![insertion.hash]); }) .map(|outcome| { - self.mempool.update_transaction_priority(&outcome); + self.mempool + .clone() + .update_transaction_priority_sync(outcome.hash(), outcome.priority()); outcome.hash() }) .or_else(|_| Ok(insertion.hash)) @@ -1062,6 +1168,7 @@ where /// block. /// /// If the view is correctly created, `ready_at` pollers for this block will be triggered. + #[instrument(level = Level::TRACE, skip_all, target = "txpool", name = "fatp::handle_new_block")] async fn handle_new_block(&self, tree_route: &TreeRoute) { let hash_and_number = match tree_route.last() { Some(hash_and_number) => hash_and_number, @@ -1076,7 +1183,7 @@ where }; if self.has_view(&hash_and_number.hash) { - trace!( + debug!( target: LOG_TARGET, ?hash_and_number, "view already exists for block" @@ -1098,7 +1205,7 @@ where View::start_background_revalidation(view, self.revalidation_queue.clone()).await; } - self.finality_stall_cleanup(hash_and_number); + self.finality_stall_cleanup(hash_and_number).await; } /// Cleans up transactions and views outdated by potential finality stalls. @@ -1110,7 +1217,7 @@ where /// /// Additionally, this method triggers the view store to handle and remove stale views caused by /// the finality stall. - fn finality_stall_cleanup(&self, at: &HashAndNumber) { + async fn finality_stall_cleanup(&self, at: &HashAndNumber) { let (oldest_block_number, finality_timedout_blocks) = { let mut included_transactions = self.included_transactions.lock(); @@ -1154,7 +1261,7 @@ where for (block_hash, tx_hashes) in finality_timedout_blocks { self.view_store.listener.transactions_finality_timeout(&tx_hashes, block_hash); - self.mempool.remove_transactions(&tx_hashes); + self.mempool.remove_transactions(&tx_hashes).await; self.import_notification_sink.clean_notified_items(&tx_hashes); self.view_store.dropped_stream_controller.remove_transactions(tx_hashes.clone()); } @@ -1186,6 +1293,7 @@ where at: &HashAndNumber, tree_route: &TreeRoute, ) -> Option>> { + let enter = Instant::now(); debug!( target: LOG_TARGET, ?at, @@ -1193,6 +1301,7 @@ where ?tree_route, "build_new_view" ); + let (mut view, view_dropped_stream, view_aggregated_stream) = if let Some(origin_view) = origin_view { let (mut view, view_dropped_stream, view_aggragated_stream) = @@ -1215,6 +1324,12 @@ where self.is_validator.clone(), ) }; + debug!( + target: LOG_TARGET, + ?at, + duration = ?enter.elapsed(), + "build_new_view::clone_view" + ); let start = Instant::now(); // 1. Capture all import notification from the very beginning, so first register all @@ -1263,6 +1378,13 @@ where ); let view = Arc::from(view); self.view_store.insert_new_view(view.clone(), tree_route).await; + + debug!( + target: LOG_TARGET, + duration = ?enter.elapsed(), + ?at, + "build_new_view" + ); Some(view) } @@ -1275,7 +1397,7 @@ where return txs.clone() }; - trace!( + debug!( target: LOG_TARGET, ?at, "fetch_block_transactions from api" @@ -1346,10 +1468,11 @@ where /// If there are no views, and mempool transaction is reported as invalid for the given view, /// the transaction is notified as invalid and removed from the mempool. async fn update_view_with_mempool(&self, view: &View) { + let xts_count = self.mempool.unwatched_and_watched_count().await; debug!( target: LOG_TARGET, view_at = ?view.at, - xts_count = ?self.mempool.unwatched_and_watched_count(), + ?xts_count, active_views_count = self.active_views_count(), "update_view_with_mempool" ); @@ -1357,25 +1480,37 @@ where let (hashes, xts_filtered): (Vec<_>, Vec<_>) = self .mempool - .clone_transactions() + .with_transactions(|iter| { + iter.filter(|(hash, _)| !view.is_imported(&hash) && !included_xts.contains(&hash)) + .map(|(k, v)| (*k, v.clone())) + //todo [#8835]: better approach is needed - maybe time-budget approach? + .take(MEMPOOL_TO_VIEW_BATCH_SIZE) + .collect::>() + }) + .await .into_iter() - .filter(|(hash, _)| !view.is_imported(hash)) - .filter(|(hash, _)| !included_xts.contains(&hash)) .map(|(tx_hash, tx)| (tx_hash, (tx.source(), tx.tx()))) .unzip(); let results = view - .submit_many(xts_filtered) + .submit_many(xts_filtered, ValidateTransactionPriority::Maintained) .await .into_iter() .zip(hashes) - .map(|(result, tx_hash)| { - result - .map(|outcome| self.mempool.update_transaction_priority(&outcome.into())) - .or_else(|_| Err(tx_hash)) + .map(|(result, tx_hash)| async move { + if let Ok(outcome) = result { + Ok(self + .mempool + .update_transaction_priority(outcome.hash(), outcome.priority()) + .await) + } else { + Err(tx_hash) + } }) .collect::>(); + let results = futures::future::join_all(results).await; + let submitted_count = results.len(); debug!( @@ -1395,7 +1530,7 @@ where for result in results { if let Err(tx_hash) = result { self.view_store.listener.transactions_invalidated(&[tx_hash]); - self.mempool.remove_transactions(&[tx_hash]); + self.mempool.remove_transactions(&[tx_hash]).await; } } } @@ -1511,11 +1646,9 @@ where self.included_transactions.lock().insert(key.clone(), enacted_log); }); - self.metrics.report(|metrics| { - metrics - .unknown_from_block_import_txs - .inc_by(self.mempool.count_unknown_transactions(pruned_log.iter()) as _) - }); + let unknown_count = self.mempool.count_unknown_transactions(pruned_log.iter()).await; + self.metrics + .report(|metrics| metrics.unknown_from_block_import_txs.inc_by(unknown_count as _)); //resubmit { @@ -1540,45 +1673,54 @@ where let mut resubmitted_to_report = 0; - resubmit_transactions.extend( - block_transactions - .into_iter() - .map(|tx| (self.hash_of(&tx), tx)) - .filter(|(tx_hash, _)| { - let contains = pruned_log.contains(&tx_hash); - - // need to count all transactions, not just filtered, here - resubmitted_to_report += 1; - - if !contains { - trace!( - target: LOG_TARGET, - ?tx_hash, - ?hash, - "Resubmitting from retracted block" - ); - } - !contains - }) - .map(|(tx_hash, tx)| { - //find arc if tx is known - self.mempool - .get_by_hash(tx_hash) - .map(|tx| (tx.source(), tx.tx())) - .unwrap_or_else(|| { - // These transactions are coming from retracted blocks, we - // should simply consider them external. - (TimedTransactionSource::new_external(true), Arc::from(tx)) - }) - }), + let txs = block_transactions.into_iter().map(|tx| (self.hash_of(&tx), tx)).filter( + |(tx_hash, _)| { + let contains = pruned_log.contains(&tx_hash); + + // need to count all transactions, not just filtered, here + resubmitted_to_report += 1; + + if !contains { + trace!( + target: LOG_TARGET, + ?tx_hash, + ?hash, + "Resubmitting from retracted block" + ); + } + !contains + }, ); + let mut result = vec![]; + for (tx_hash, tx) in txs { + result.push( + //find arc if tx is known + self.mempool + .get_by_hash(tx_hash) + .await + .map(|tx| (tx.source(), tx.tx())) + .unwrap_or_else(|| { + // These transactions are coming from retracted blocks, we + // should simply consider them external. + (TimedTransactionSource::new_external(true), Arc::from(tx)) + }), + ); + } + resubmit_transactions.extend(result); self.metrics.report(|metrics| { metrics.resubmitted_retracted_txs.inc_by(resubmitted_to_report) }); } - let _ = view.pool.resubmit_at(&hash_and_number, resubmit_transactions).await; + let _ = view + .pool + .resubmit_at( + &hash_and_number, + resubmit_transactions, + ValidateTransactionPriority::Maintained, + ) + .await; } } @@ -1588,6 +1730,7 @@ where /// - executing the on finalized procedure for the view store, /// - purging finalized transactions from the mempool and triggering mempool revalidation, async fn handle_finalized(&self, finalized_hash: Block::Hash, tree_route: &[Block::Hash]) { + let start = Instant::now(); let finalized_number = self.api.block_id_to_number(&BlockId::Hash(finalized_hash)); debug!( target: LOG_TARGET, @@ -1616,7 +1759,7 @@ where ) .await; } else { - trace!( + debug!( target: LOG_TARGET, ?finalized_number, "handle_finalized: revalidation/cleanup skipped: could not resolve finalized block number" @@ -1629,6 +1772,7 @@ where target: LOG_TARGET, active_views_count = self.active_views_count(), included_transactions_len = ?self.included_transactions.lock().len(), + duration = ?start.elapsed(), "handle_finalized after" ); } @@ -1647,22 +1791,20 @@ where /// /// If no lower-priority transaction is found, the function returns an error indicating the /// transaction was dropped immediately. + #[instrument(level = Level::TRACE, skip_all, target = "txpool", name = "fatp::attempt_transaction_replacement")] async fn attempt_transaction_replacement( &self, source: TransactionSource, watched: bool, xt: ExtrinsicFor, ) -> Result>, TxPoolApiError> { - let at = self + let best_view = self .view_store .most_recent_view .read() - .ok_or(TxPoolApiError::ImmediatelyDropped)?; - - let (best_view, _) = self - .view_store - .get_view_at(at, false) - .ok_or(TxPoolApiError::ImmediatelyDropped)?; + .as_ref() + .ok_or(TxPoolApiError::ImmediatelyDropped)? + .clone(); let (xt_hash, validated_tx) = best_view .pool @@ -1672,6 +1814,7 @@ where TimedTransactionSource::from_transaction_source(source, false), xt.clone(), crate::graph::CheckBannedBeforeVerify::Yes, + ValidateTransactionPriority::Submitted, ) .await; @@ -1679,7 +1822,9 @@ where return Err(TxPoolApiError::ImmediatelyDropped) }; - self.attempt_transaction_replacement_inner(xt, xt_hash, priority, source, watched) + let insertion_info = + self.mempool.try_insert_with_replacement(xt, priority, source, watched).await?; + self.post_attempt_transaction_replacement(xt_hash, insertion_info) } /// Sync version of [`Self::attempt_transaction_replacement`]. @@ -1693,7 +1838,10 @@ where .view_store .most_recent_view .read() - .ok_or(TxPoolApiError::ImmediatelyDropped)?; + .as_ref() + .ok_or(TxPoolApiError::ImmediatelyDropped)? + .at + .hash; let ValidTransaction { priority, .. } = self .api @@ -1704,20 +1852,19 @@ where TransactionValidityError::Unknown(u) => TxPoolApiError::UnknownTransaction(u), })?; let xt_hash = self.hash_of(&xt); - self.attempt_transaction_replacement_inner(xt, xt_hash, priority, source, watched) + + let insertion_info = self + .mempool + .clone() + .try_insert_with_replacement_sync(xt, priority, source, watched)?; + self.post_attempt_transaction_replacement(xt_hash, insertion_info) } - fn attempt_transaction_replacement_inner( + fn post_attempt_transaction_replacement( &self, - xt: ExtrinsicFor, tx_hash: ExtrinsicHash, - priority: TransactionPriority, - source: TransactionSource, - watched: bool, + insertion_info: InsertionInfo>, ) -> Result>, TxPoolApiError> { - let insertion_info = - self.mempool.try_insert_with_replacement(xt, priority, source, watched)?; - for worst_hash in &insertion_info.removed { trace!( target: LOG_TARGET, @@ -1778,7 +1925,7 @@ where match result { Err(error) => { - trace!( + debug!( target: LOG_TARGET, %error, "enactment_state::update error" @@ -1805,7 +1952,7 @@ where ChainEvent::Finalized { hash, ref tree_route } => { self.handle_finalized(hash, tree_route).await; - trace!( + debug!( target: LOG_TARGET, ?tree_route, ?prev_finalized_block, @@ -1815,10 +1962,10 @@ where } let duration = start.elapsed(); - + let mempool_len = self.mempool_len().await; info!( target: LOG_TARGET, - txs = ?self.mempool_len(), + txs = ?mempool_len, a = self.active_views_count(), i = self.inactive_views_count(), views = ?self.views_stats(), @@ -1828,7 +1975,7 @@ where ); self.metrics.report(|metrics| { - let (unwatched, watched) = self.mempool_len(); + let (unwatched, watched) = mempool_len; let _ = ( self.active_views_count().try_into().map(|v| metrics.active_views.set(v)), self.inactive_views_count().try_into().map(|v| metrics.inactive_views.set(v)), diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs index 24f71982c7816..be1c850aa09a2 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/revalidation_worker.rs @@ -30,7 +30,7 @@ use sp_runtime::traits::Block as BlockT; use super::{tx_mem_pool::TxMemPool, view_store::ViewStore}; use futures::prelude::*; -use tracing::{trace, warn}; +use tracing::{debug, warn}; use super::view::{FinishRevalidationWorkerChannels, View}; @@ -135,7 +135,7 @@ where view: Arc>, finish_revalidation_worker_channels: FinishRevalidationWorkerChannels, ) { - trace!( + debug!( target: LOG_TARGET, view_at_hash = ?view.at.hash, "revalidation_queue::revalidate_view: Sending view to revalidation queue" @@ -170,7 +170,7 @@ where view_store: Arc>, finalized_hash: HashAndNumber, ) { - trace!( + debug!( target: LOG_TARGET, ?finalized_hash, "Sent mempool to revalidation queue" @@ -201,7 +201,7 @@ mod tests { use crate::{ common::tests::{uxt, TestApi}, fork_aware_txpool::view::FinishRevalidationLocalChannels, - TimedTransactionSource, + TimedTransactionSource, ValidateTransactionPriority, }; use futures::executor::block_on; use substrate_test_runtime::{AccountId, Transfer, H256}; @@ -223,10 +223,10 @@ mod tests { nonce: 0, }); - let _ = block_on(view.submit_many(std::iter::once(( - TimedTransactionSource::new_external(false), - uxt.clone().into(), - )))); + let _ = block_on(view.submit_many( + std::iter::once((TimedTransactionSource::new_external(false), uxt.clone().into())), + ValidateTransactionPriority::Submitted, + )); assert_eq!(api.validation_requests().len(), 1); let (finish_revalidation_request_tx, finish_revalidation_request_rx) = diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index 559f11da4cdb2..e67566baa8b33 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -25,54 +25,60 @@ //! - the transaction can be invalid on some forks (and thus the associated views may not contain //! it), while on other forks tx can be valid. Depending on which view is chosen to be cloned, //! such transaction could not be present in the newly created view. +//! +//! Sync methods (with `_sync` suffix) are also exposed, and it should be safe to call them from +//! sync or non-tokio contenxt. These methods are required for implementing some non-async methods. +//! See for some more information. The implementation of the +//! bridging is based on passing messages from sync context to tokio thread. -use std::{ - cmp::Ordering, - collections::{HashMap, HashSet}, - sync::{ - atomic::{self, AtomicU64}, - Arc, - }, - time::Instant, -}; - -use futures::FutureExt; +use futures::{future::join_all, FutureExt}; use itertools::Itertools; use parking_lot::RwLock; -use tracing::{debug, trace}; - use sc_transaction_pool_api::{TransactionPriority, TransactionSource}; use sp_blockchain::HashAndNumber; use sp_runtime::{ traits::Block as BlockT, transaction_validity::{InvalidTransaction, TransactionValidityError}, }; +use std::{ + collections::HashSet, + future::Future, + pin::Pin, + sync::{ + atomic::{self, AtomicU64}, + mpsc::{ + channel as sync_bridge_channel, Receiver as SyncBridgeReceiver, + Sender as SyncBridgeSender, + }, + Arc, + }, + time::Instant, +}; +use tracing::{debug, trace}; use crate::{ common::tracing_log_xt::log_xt_trace, graph, - graph::{base_pool::TimedTransactionSource, tracked_map::Size, ExtrinsicFor, ExtrinsicHash}, - LOG_TARGET, + graph::{base_pool::TimedTransactionSource, ExtrinsicFor, ExtrinsicHash}, + ValidateTransactionPriority, LOG_TARGET, }; use super::{ - metrics::MetricsLink as PrometheusMetrics, - multi_view_listener::MultiViewListener, - view_store::{ViewStore, ViewStoreSubmitOutcome}, + metrics::MetricsLink as PrometheusMetrics, multi_view_listener::MultiViewListener, + view_store::ViewStore, }; +mod tx_mem_pool_map; + /// The minimum interval between single transaction revalidations. Given in blocks. pub(crate) const TXMEMPOOL_REVALIDATION_PERIOD: u64 = 10; /// The number of transactions revalidated in single revalidation batch. pub(crate) const TXMEMPOOL_MAX_REVALIDATION_BATCH_SIZE: usize = 1000; -/// The maximum number of transactions kept in the mem pool. Given as multiple of -/// the view's total limit. -pub const TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER: usize = 4; +const SYNC_BRIDGE_EXPECT: &str = "The mempool blocking task shall not be terminated. qed."; /// Represents the transaction in the intermediary buffer. -#[derive(Debug)] pub(crate) struct TxInMemPool where Block: BlockT, @@ -175,7 +181,65 @@ where } } -impl Size for Arc> +impl std::fmt::Debug for TxInMemPool +where + Block: BlockT, + ChainApi: graph::ChainApi + 'static, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TxInMemPool") + .field("watched", &self.watched) + .field("tx", &"...") + .field("bytes", &self.bytes) + .field("source", &self.source) + .field("validated_at", &self.validated_at) + .field("priority", &self.priority) + .finish() + } +} + +impl std::cmp::PartialEq for TxInMemPool +where + Block: BlockT, + ChainApi: graph::ChainApi + 'static, +{ + fn eq(&self, other: &Self) -> bool { + self.watched == other.watched && + self.tx == other.tx && + self.bytes == other.bytes && + self.source == other.source && + *self.priority.read() == *other.priority.read() && + self.validated_at.load(atomic::Ordering::Relaxed) == + other.validated_at.load(atomic::Ordering::Relaxed) + } +} + +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +struct MempoolTxPriority(pub Option); + +impl Ord for MempoolTxPriority { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + match (&self.0, &other.0) { + (Some(a), Some(b)) => a.cmp(b), + (Some(_), None) => std::cmp::Ordering::Less, + (None, Some(_)) => std::cmp::Ordering::Greater, + (None, None) => std::cmp::Ordering::Equal, + } + } +} +impl From> for MempoolTxPriority { + fn from(value: Option) -> Self { + MempoolTxPriority(value) + } +} + +impl PartialOrd for MempoolTxPriority { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl tx_mem_pool_map::Size for Arc> where Block: BlockT, ChainApi: graph::ChainApi + 'static, @@ -185,8 +249,33 @@ where } } -type InternalTxMemPoolMap = - graph::tracked_map::TrackedMap, Arc>>; +impl tx_mem_pool_map::PriorityAndTimestamp for Arc> +where + Block: BlockT, + ChainApi: graph::ChainApi + 'static, +{ + type Priority = MempoolTxPriority; + type Timestamp = Option; + + fn priority(&self) -> Self::Priority { + TxInMemPool::priority(self).into() + } + + fn timestamp(&self) -> Self::Timestamp { + self.source().timestamp + } +} + +type InternalTxMemPoolMap = tx_mem_pool_map::SizeTrackedStore< + ExtrinsicHash, + tx_mem_pool_map::PriorityKey>, + Arc>, +>; + +/// Internal (blocking) task for bridging sync and async code. +/// +/// Should be polled in blocking task. +pub type TxMemPoolBlockingTask = Pin + Send>>; /// An intermediary transactions buffer. /// @@ -210,6 +299,9 @@ where /// Provides a side-channel allowing to send per-transaction state changes notification. listener: Arc>, + /// Channel used to send the requests from the sync code. + sync_channel: SyncBridgeSender>, + /// A map that stores the transactions currently in the memory pool. /// /// The key is the hash of the transaction, and the value is a wrapper @@ -257,15 +349,21 @@ where metrics: PrometheusMetrics, max_transactions_count: usize, max_transactions_total_bytes: usize, - ) -> Self { - Self { - api, - listener, - transactions: Default::default(), - metrics, - max_transactions_count, - max_transactions_total_bytes, - } + ) -> (Self, TxMemPoolBlockingTask) { + let (sync_channel, rx) = sync_bridge_channel(); + let task = Self::sync_bridge_task(rx); + ( + Self { + api, + listener, + sync_channel, + transactions: Default::default(), + metrics, + max_transactions_count, + max_transactions_total_bytes, + }, + task.boxed(), + ) } /// Creates a new `TxMemPool` instance for testing purposes. @@ -275,34 +373,37 @@ where max_transactions_count: usize, max_transactions_total_bytes: usize, ) -> Self { + let (sync_channel, rx) = sync_bridge_channel(); + tokio::task::spawn_blocking(move || Self::sync_bridge_task(rx)); Self { api, listener: Arc::from(MultiViewListener::new_with_worker(Default::default()).0), transactions: Default::default(), metrics: Default::default(), + sync_channel, max_transactions_count, max_transactions_total_bytes, } } /// Retrieves a transaction by its hash if it exists in the memory pool. - pub(super) fn get_by_hash( + pub(super) async fn get_by_hash( &self, hash: ExtrinsicHash, ) -> Option>> { - self.transactions.read().get(&hash).map(Clone::clone) + self.transactions.read().await.get(&hash).map(Clone::clone) } /// Returns a tuple with the count of unwatched and watched transactions in the memory pool. - pub fn unwatched_and_watched_count(&self) -> (usize, usize) { - let transactions = self.transactions.read(); + pub async fn unwatched_and_watched_count(&self) -> (usize, usize) { + let transactions = self.transactions.read().await; let watched_count = transactions.values().filter(|t| t.is_watched()).count(); (transactions.len() - watched_count, watched_count) } /// Returns a total number of transactions kept within mempool. pub fn len(&self) -> usize { - self.transactions.read().len() + self.transactions.len() } /// Returns the number of bytes used by all extrinsics in the the pool. @@ -319,12 +420,12 @@ where /// Attempts to insert a transaction into the memory pool, ensuring it does not /// exceed the maximum allowed transaction count. - fn try_insert( + async fn try_insert( &self, tx_hash: ExtrinsicHash, tx: TxInMemPool, ) -> Result>, sc_transaction_pool_api::error::Error> { - let mut transactions = self.transactions.write(); + let mut transactions = self.transactions.write().await; let bytes = self.transactions.bytes(); @@ -362,7 +463,7 @@ where /// /// Returns a `Result` containing `InsertionInfo` if the new transaction is successfully /// inserted; otherwise, returns an appropriate error indicating the failure. - pub(super) fn try_insert_with_replacement( + pub(super) async fn try_insert_with_replacement( &self, new_tx: ExtrinsicFor, priority: TransactionPriority, @@ -375,103 +476,88 @@ where return Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped); } - let mut transactions = self.transactions.write(); + let mut transactions = self.transactions.write().await; if transactions.contains_key(&hash) { return Err(sc_transaction_pool_api::error::Error::AlreadyImported(Box::new(hash))); } - let mut sorted = transactions - .iter() - .filter_map(|(h, v)| v.priority().map(|_| (*h, v.clone()))) - .collect::>(); - // When pushing higher prio transaction, we need to find a number of lower prio txs, such // that the sum of their bytes is ge then size of new tx. Otherwise we could overflow size // limits. Naive way to do it - rev-sort by priority and eat the tail. // reverse (oldest, lowest prio last) - sorted.sort_by(|(_, a), (_, b)| match b.priority().cmp(&a.priority()) { - Ordering::Equal => match (a.source.timestamp, b.source.timestamp) { - (Some(a), Some(b)) => b.cmp(&a), - _ => Ordering::Equal, - }, - ordering => ordering, - }); - - let mut total_size_removed = 0usize; - let mut to_be_removed = vec![]; - let free_bytes = self.max_transactions_total_bytes - self.transactions.bytes(); - - loop { - let Some((worst_hash, worst_tx)) = sorted.pop() else { - return Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped); - }; - - if worst_tx.priority() >= new_tx.priority() { - return Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped); - } - - total_size_removed += worst_tx.bytes; - to_be_removed.push(worst_hash); - - if free_bytes + total_size_removed >= new_tx.bytes { - break; - } - } - let source = new_tx.source(); - transactions.insert(hash, Arc::from(new_tx)); - for worst_hash in &to_be_removed { - transactions.remove(worst_hash); - } + let new_tx = Arc::new(new_tx); + let insertion_result = transactions.try_insert_with_replacement( + self.max_transactions_total_bytes, + hash, + new_tx, + ); debug_assert!(!self.is_limit_exceeded(transactions.len(), self.transactions.bytes())); - - Ok(InsertionInfo::new_with_removed(hash, source, to_be_removed)) + match insertion_result { + None => Err(sc_transaction_pool_api::error::Error::ImmediatelyDropped), + Some(to_be_removed) => Ok(InsertionInfo::new_with_removed(hash, source, to_be_removed)), + } } /// Adds a new unwatched transactions to the internal buffer not exceeding the limit. /// /// Returns the vector of results for each transaction, the order corresponds to the input /// vector. - pub(super) fn extend_unwatched( + pub(super) async fn extend_unwatched( &self, source: TransactionSource, xts: &[ExtrinsicFor], ) -> Vec>, sc_transaction_pool_api::error::Error>> { - let result = xts - .iter() - .map(|xt| { - let (hash, length) = self.api.hash_and_length(&xt); - self.try_insert(hash, TxInMemPool::new_unwatched(source, xt.clone(), length)) - }) - .collect::>(); - result + let insert_futures = xts.into_iter().map(|xt| { + let api = self.api.clone(); + let xt = xt.clone(); + async move { + let (hash, length) = api.hash_and_length(&xt); + self.try_insert(hash, TxInMemPool::new_unwatched(source, xt, length)).await + } + }); + + join_all(insert_futures).await } /// Adds a new watched transaction to the memory pool if it does not exceed the maximum allowed /// transaction count. - pub(super) fn push_watched( + pub(super) async fn push_watched( &self, source: TransactionSource, xt: ExtrinsicFor, ) -> Result>, sc_transaction_pool_api::error::Error> { let (hash, length) = self.api.hash_and_length(&xt); self.try_insert(hash, TxInMemPool::new_watched(source, xt.clone(), length)) + .await } - /// Clones and returns a `HashMap` of references to all transactions in the memory pool. - pub(super) fn clone_transactions( - &self, - ) -> HashMap, Arc>> { - self.transactions.clone_map() + /// Provides read-only access to all transctions via an iterator. + /// + /// This function allows to iterate over all stored transaction without cloning. + /// The provided closure receives an iterator over references to keys and values. + /// + /// Note: Typically some filtering should be applied and required items can be cloned and return + /// outside the closure if required. Transacaction are `Arc` so clone shall be cheap. + pub(super) async fn with_transactions(&self, f: F) -> R + where + F: Fn( + std::collections::hash_map::Iter< + ExtrinsicHash, + Arc>, + >, + ) -> R, + { + self.transactions.read().await.with_items(f) } /// Removes transactions with given hashes from the memory pool. - pub(super) fn remove_transactions(&self, tx_hashes: &[ExtrinsicHash]) { + pub(super) async fn remove_transactions(&self, tx_hashes: &[ExtrinsicHash]) { log_xt_trace!(target: LOG_TARGET, tx_hashes, "mempool::remove_transaction"); - let mut transactions = self.transactions.write(); + let mut transactions = self.transactions.write().await; for tx_hash in tx_hashes { transactions.remove(tx_hash); } @@ -489,26 +575,32 @@ where let start = Instant::now(); let (count, input) = { - let transactions = self.transactions.clone_map(); - ( - transactions.len(), - transactions - .into_iter() - .filter(|xt| { + self.transactions.len(), + self.with_transactions(|iter| { + iter.filter(|(_, xt)| { let finalized_block_number = finalized_block.number.into().as_u64(); - xt.1.validated_at.load(atomic::Ordering::Relaxed) + + xt.validated_at.load(atomic::Ordering::Relaxed) + TXMEMPOOL_REVALIDATION_PERIOD < finalized_block_number }) - .sorted_by_key(|tx| tx.1.validated_at.load(atomic::Ordering::Relaxed)) - .take(TXMEMPOOL_MAX_REVALIDATION_BATCH_SIZE), + .sorted_by_key(|(_, tx)| tx.validated_at.load(atomic::Ordering::Relaxed)) + .take(TXMEMPOOL_MAX_REVALIDATION_BATCH_SIZE) + .map(|(k, v)| (*k, v.clone())) + .collect::>() + }) + .await, ) }; let validations_futures = input.into_iter().map(|(xt_hash, xt)| { self.api - .validate_transaction(finalized_block.hash, xt.source.clone().into(), xt.tx()) + .validate_transaction( + finalized_block.hash, + xt.source.clone().into(), + xt.tx(), + ValidateTransactionPriority::Maintained, + ) .map(move |validation_result| { xt.validated_at .store(finalized_block.number.into().as_u64(), atomic::Ordering::Relaxed); @@ -563,7 +655,7 @@ where "purge_finalized_transactions" ); log_xt_trace!(target: LOG_TARGET, finalized_xts, "purged finalized transactions"); - let mut transactions = self.transactions.write(); + let mut transactions = self.transactions.write().await; finalized_xts.iter().for_each(|t| { transactions.remove(t); }); @@ -590,7 +682,7 @@ where } { - let mut transactions = self.transactions.write(); + let mut transactions = self.transactions.write().await; invalid_hashes_subtrees.iter().for_each(|tx_hash| { transactions.remove(&tx_hash); }); @@ -619,32 +711,225 @@ where /// Updates the priority of transaction stored in mempool using provided view_store submission /// outcome. - pub(super) fn update_transaction_priority(&self, outcome: &ViewStoreSubmitOutcome) { - outcome.priority().map(|priority| { - self.transactions - .write() - .get_mut(&outcome.hash()) - .map(|p| *p.priority.write() = Some(priority)) - }); + pub(super) async fn update_transaction_priority( + &self, + hash: ExtrinsicHash, + prio: Option, + ) { + if let Some(priority) = prio { + let mut transactions = self.transactions.write().await; + + transactions.update_item(&hash, |t| { + *t.priority.write() = Some(priority); + }); + } } /// Counts the number of transactions in the provided iterator of hashes /// that are not known to the pool. - pub(super) fn count_unknown_transactions<'a>( + pub(super) async fn count_unknown_transactions<'a>( &self, hashes: impl Iterator>, ) -> usize { - let transactions = self.transactions.read(); + let transactions = self.transactions.read().await; hashes.filter(|tx_hash| !transactions.contains_key(tx_hash)).count() } } +/// Convenient return type of extend_unwatched +type ExtendUnwatchedResult = + Vec>, sc_transaction_pool_api::error::Error>>; + +/// Convenient return type of try_insert_with_replacement +type TryInsertWithReplacementResult = + Result>, sc_transaction_pool_api::error::Error>; + +/// Helper enum defining what requests can be made from sync code. +enum TxMemPoolSyncRequest +where + Block: BlockT, + ChainApi: graph::ChainApi + 'static, +{ + RemoveTransactions( + Arc>, + Vec>, + SyncBridgeSender<()>, + ), + ExtendUnwatched( + Arc>, + TransactionSource, + Vec>, + SyncBridgeSender>, + ), + UpdateTransactionPriority( + Arc>, + ExtrinsicHash, + Option, + SyncBridgeSender<()>, + ), + TryInsertWithReplacement( + Arc>, + ExtrinsicFor, + TransactionPriority, + TransactionSource, + bool, + SyncBridgeSender>, + ), +} + +impl TxMemPoolSyncRequest +where + Block: BlockT, + ChainApi: graph::ChainApi + 'static, +{ + fn remove_transactions( + mempool: Arc>, + hashes: Vec>, + ) -> (SyncBridgeReceiver<()>, Self) { + let (tx, rx) = sync_bridge_channel(); + (rx, Self::RemoveTransactions(mempool, hashes, tx)) + } + + fn extend_unwatched( + mempool: Arc>, + source: TransactionSource, + xts: Vec>, + ) -> (SyncBridgeReceiver>, Self) { + let (tx, rx) = sync_bridge_channel(); + (rx, Self::ExtendUnwatched(mempool, source, xts, tx)) + } + + fn update_transaction_priority( + mempool: Arc>, + hash: ExtrinsicHash, + prio: Option, + ) -> (SyncBridgeReceiver<()>, Self) { + let (tx, rx) = sync_bridge_channel(); + (rx, Self::UpdateTransactionPriority(mempool, hash, prio, tx)) + } + + fn try_insert_with_replacement( + mempool: Arc>, + new_tx: ExtrinsicFor, + priority: TransactionPriority, + source: TransactionSource, + watched: bool, + ) -> (SyncBridgeReceiver>, Self) { + let (tx, rx) = sync_bridge_channel(); + (rx, Self::TryInsertWithReplacement(mempool, new_tx, priority, source, watched, tx)) + } +} + +impl TxMemPool +where + Block: BlockT, + ChainApi: graph::ChainApi + 'static, + ::Hash: Unpin, +{ + async fn sync_bridge_task(rx: SyncBridgeReceiver>) { + for request in rx { + Self::handle_request(request).await; + } + } + + async fn handle_request(request: TxMemPoolSyncRequest) { + match request { + TxMemPoolSyncRequest::RemoveTransactions(mempool, hashes, tx) => { + mempool.remove_transactions(&hashes).await; + if let Err(error) = tx.send(()) { + debug!(target: LOG_TARGET, ?error, "RemoveTransaction: sending response failed"); + } + }, + TxMemPoolSyncRequest::ExtendUnwatched(mempool, source, txs, tx) => { + let result = mempool.extend_unwatched(source, &txs).await; + if let Err(error) = tx.send(result) { + debug!(target: LOG_TARGET, ?error, "ExtendUnwatched: sending response failed"); + } + }, + TxMemPoolSyncRequest::UpdateTransactionPriority(mempool, hash, prio, tx) => { + let result = mempool.update_transaction_priority(hash, prio).await; + if let Err(error) = tx.send(result) { + debug!(target: LOG_TARGET, ?error, "UpdateTransactionPriority2: sending response failed"); + } + }, + TxMemPoolSyncRequest::TryInsertWithReplacement( + mempool, + new_tx, + priority, + source, + watched, + tx, + ) => { + let result = + mempool.try_insert_with_replacement(new_tx, priority, source, watched).await; + if let Err(error) = tx.send(result) { + debug!(target: LOG_TARGET, ?error, "TryInsertWithReplacementSync: sending response failed"); + } + }, + } + } + + pub(super) fn try_insert_with_replacement_sync( + self: Arc, + new_tx: ExtrinsicFor, + priority: TransactionPriority, + source: TransactionSource, + watched: bool, + ) -> Result>, sc_transaction_pool_api::error::Error> { + let (response, request) = TxMemPoolSyncRequest::try_insert_with_replacement( + self.clone(), + new_tx, + priority, + source, + watched, + ); + let _ = self.sync_channel.send(request); + response.recv().expect(SYNC_BRIDGE_EXPECT) + } + + pub(super) fn extend_unwatched_sync( + self: Arc, + source: TransactionSource, + xts: Vec>, + ) -> Vec>, sc_transaction_pool_api::error::Error>> + { + let (response, request) = TxMemPoolSyncRequest::extend_unwatched(self.clone(), source, xts); + let _ = self.sync_channel.send(request); + response.recv().expect(SYNC_BRIDGE_EXPECT) + } + + pub(super) fn remove_transactions_sync( + self: Arc, + tx_hashes: Vec>, + ) { + let (response, request) = + TxMemPoolSyncRequest::remove_transactions(self.clone(), tx_hashes); + let _ = self.sync_channel.send(request); + response.recv().expect(SYNC_BRIDGE_EXPECT) + } + + pub(super) fn update_transaction_priority_sync( + self: Arc, + hash: ExtrinsicHash, + prio: Option, + ) { + let (response, request) = + TxMemPoolSyncRequest::update_transaction_priority(self.clone(), hash, prio); + let _ = self.sync_channel.send(request); + response.recv().expect(SYNC_BRIDGE_EXPECT) + } +} + #[cfg(test)] mod tx_mem_pool_tests { + use futures::future::join_all; use substrate_test_runtime::{AccountId, Extrinsic, ExtrinsicBuilder, Transfer, H256}; use substrate_test_runtime_client::Sr25519Keyring::*; - use crate::{common::tests::TestApi, graph::ChainApi}; + use crate::{ + common::tests::TestApi, fork_aware_txpool::view_store::ViewStoreSubmitOutcome, + graph::ChainApi, + }; use super::*; @@ -657,15 +942,15 @@ mod tx_mem_pool_tests { }) } - #[test] - fn extend_unwatched_obeys_limit() { + #[tokio::test] + async fn extend_unwatched_obeys_limit() { let max = 10; let api = Arc::from(TestApi::default()); let mempool = TxMemPool::new_test(api, max, usize::MAX); let xts = (0..max + 1).map(|x| Arc::from(uxt(x as _))).collect::>(); - let results = mempool.extend_unwatched(TransactionSource::External, &xts); + let results = mempool.extend_unwatched(TransactionSource::External, &xts).await; assert!(results.iter().take(max).all(Result::is_ok)); assert!(matches!( results.into_iter().last().unwrap().unwrap_err(), @@ -673,8 +958,8 @@ mod tx_mem_pool_tests { )); } - #[test] - fn extend_unwatched_detects_already_imported() { + #[tokio::test] + async fn extend_unwatched_detects_already_imported() { sp_tracing::try_init_simple(); let max = 10; let api = Arc::from(TestApi::default()); @@ -683,7 +968,7 @@ mod tx_mem_pool_tests { let mut xts = (0..max - 1).map(|x| Arc::from(uxt(x as _))).collect::>(); xts.push(xts.iter().last().unwrap().clone()); - let results = mempool.extend_unwatched(TransactionSource::External, &xts); + let results = mempool.extend_unwatched(TransactionSource::External, &xts).await; assert!(results.iter().take(max - 1).all(Result::is_ok)); assert!(matches!( results.into_iter().last().unwrap().unwrap_err(), @@ -691,33 +976,33 @@ mod tx_mem_pool_tests { )); } - #[test] - fn push_obeys_limit() { + #[tokio::test] + async fn push_obeys_limit() { let max = 10; let api = Arc::from(TestApi::default()); let mempool = TxMemPool::new_test(api, max, usize::MAX); let xts = (0..max).map(|x| Arc::from(uxt(x as _))).collect::>(); - let results = mempool.extend_unwatched(TransactionSource::External, &xts); + let results = mempool.extend_unwatched(TransactionSource::External, &xts).await; assert!(results.iter().all(Result::is_ok)); let xt = Arc::from(uxt(98)); - let result = mempool.push_watched(TransactionSource::External, xt); + let result = mempool.push_watched(TransactionSource::External, xt).await; assert!(matches!( result.unwrap_err(), sc_transaction_pool_api::error::Error::ImmediatelyDropped )); let xt = Arc::from(uxt(99)); - let mut result = mempool.extend_unwatched(TransactionSource::External, &[xt]); + let mut result = mempool.extend_unwatched(TransactionSource::External, &[xt]).await; assert!(matches!( result.pop().unwrap().unwrap_err(), sc_transaction_pool_api::error::Error::ImmediatelyDropped )); } - #[test] - fn push_detects_already_imported() { + #[tokio::test] + async fn push_detects_already_imported() { let max = 10; let api = Arc::from(TestApi::default()); let mempool = TxMemPool::new_test(api, 2 * max, usize::MAX); @@ -726,39 +1011,48 @@ mod tx_mem_pool_tests { let xt0 = xts.iter().last().unwrap().clone(); let xt1 = xts.iter().next().unwrap().clone(); - let results = mempool.extend_unwatched(TransactionSource::External, &xts); + let results = mempool.extend_unwatched(TransactionSource::External, &xts).await; assert!(results.iter().all(Result::is_ok)); - let result = mempool.push_watched(TransactionSource::External, xt0); + let result = mempool.push_watched(TransactionSource::External, xt0).await; assert!(matches!( result.unwrap_err(), sc_transaction_pool_api::error::Error::AlreadyImported(_) )); - let mut result = mempool.extend_unwatched(TransactionSource::External, &[xt1]); + let mut result = mempool.extend_unwatched(TransactionSource::External, &[xt1]).await; assert!(matches!( result.pop().unwrap().unwrap_err(), sc_transaction_pool_api::error::Error::AlreadyImported(_) )); } - #[test] - fn count_works() { + #[tokio::test] + async fn count_works() { + sp_tracing::try_init_simple(); + trace!(target:LOG_TARGET,line=line!(),"xxx"); + let max = 100; let api = Arc::from(TestApi::default()); let mempool = TxMemPool::new_test(api, max, usize::MAX); + trace!(target:LOG_TARGET,line=line!(),"xxx"); let xts0 = (0..10).map(|x| Arc::from(uxt(x as _))).collect::>(); + trace!(target:LOG_TARGET,line=line!(),"xxx"); - let results = mempool.extend_unwatched(TransactionSource::External, &xts0); + let results = mempool.extend_unwatched(TransactionSource::External, &xts0).await; + trace!(target:LOG_TARGET,line=line!(),"xxx"); assert!(results.iter().all(Result::is_ok)); + trace!(target:LOG_TARGET,line=line!(),"xxx"); let xts1 = (0..5).map(|x| Arc::from(uxt(2 * x))).collect::>(); - let results = xts1 - .into_iter() - .map(|t| mempool.push_watched(TransactionSource::External, t)) - .collect::>(); + trace!(target:LOG_TARGET,line=line!(),"xxx"); + let results = + xts1.into_iter().map(|t| mempool.push_watched(TransactionSource::External, t)); + trace!(target:LOG_TARGET,line=line!(),"xxx"); + let results = join_all(results).await; + trace!(target:LOG_TARGET,line=line!(),"xxx"); assert!(results.iter().all(Result::is_ok)); - assert_eq!(mempool.unwatched_and_watched_count(), (10, 5)); + assert_eq!(mempool.unwatched_and_watched_count().await, (10, 5)); } /// size of large extrinsic @@ -768,8 +1062,8 @@ mod tx_mem_pool_tests { ExtrinsicBuilder::new_include_data(vec![x as u8; 1024]).build() } - #[test] - fn push_obeys_size_limit() { + #[tokio::test] + async fn push_obeys_size_limit() { sp_tracing::try_init_simple(); let max = 10; let api = Arc::from(TestApi::default()); @@ -779,27 +1073,27 @@ mod tx_mem_pool_tests { let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1); - let results = mempool.extend_unwatched(TransactionSource::External, &xts); + let results = mempool.extend_unwatched(TransactionSource::External, &xts).await; assert!(results.iter().all(Result::is_ok)); assert_eq!(mempool.bytes(), total_xts_bytes); let xt = Arc::from(large_uxt(98)); - let result = mempool.push_watched(TransactionSource::External, xt); + let result = mempool.push_watched(TransactionSource::External, xt).await; assert!(matches!( result.unwrap_err(), sc_transaction_pool_api::error::Error::ImmediatelyDropped )); let xt = Arc::from(large_uxt(99)); - let mut result = mempool.extend_unwatched(TransactionSource::External, &[xt]); + let mut result = mempool.extend_unwatched(TransactionSource::External, &[xt]).await; assert!(matches!( result.pop().unwrap().unwrap_err(), sc_transaction_pool_api::error::Error::ImmediatelyDropped )); } - #[test] - fn replacing_txs_works_for_same_tx_size() { + #[tokio::test] + async fn replacing_txs_works_for_same_tx_size() { sp_tracing::try_init_simple(); let max = 10; let api = Arc::from(TestApi::default()); @@ -811,7 +1105,7 @@ mod tx_mem_pool_tests { let hi_prio = u64::MAX; let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1); - let (submit_outcomes, hashes): (Vec<_>, Vec<_>) = xts + let (submit_outcomes, hashes): (Vec>, Vec<_>) = xts .iter() .map(|t| { let h = api.hash_and_length(t).0; @@ -819,26 +1113,27 @@ mod tx_mem_pool_tests { }) .unzip(); - let results = mempool.extend_unwatched(TransactionSource::External, &xts); + let results = mempool.extend_unwatched(TransactionSource::External, &xts).await; assert!(results.iter().all(Result::is_ok)); assert_eq!(mempool.bytes(), total_xts_bytes); - submit_outcomes - .into_iter() - .for_each(|o| mempool.update_transaction_priority(&o)); + for o in submit_outcomes { + mempool.update_transaction_priority(o.hash(), o.priority()).await; + } let xt = Arc::from(large_uxt(98)); let hash = api.hash_and_length(&xt).0; let result = mempool .try_insert_with_replacement(xt, hi_prio, TransactionSource::External, false) + .await .unwrap(); assert_eq!(result.hash, hash); assert_eq!(result.removed, hashes[0..1]); } - #[test] - fn replacing_txs_removes_proper_size_of_txs() { + #[tokio::test] + async fn replacing_txs_removes_proper_size_of_txs() { sp_tracing::try_init_simple(); let max = 10; let api = Arc::from(TestApi::default()); @@ -850,7 +1145,7 @@ mod tx_mem_pool_tests { let hi_prio = u64::MAX; let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1); - let (submit_outcomes, hashes): (Vec<_>, Vec<_>) = xts + let (submit_outcomes, hashes): (Vec>, Vec<_>) = xts .iter() .map(|t| { let h = api.hash_and_length(t).0; @@ -858,14 +1153,14 @@ mod tx_mem_pool_tests { }) .unzip(); - let results = mempool.extend_unwatched(TransactionSource::External, &xts); + let results = mempool.extend_unwatched(TransactionSource::External, &xts).await; assert!(results.iter().all(Result::is_ok)); assert_eq!(mempool.bytes(), total_xts_bytes); assert_eq!(total_xts_bytes, max * LARGE_XT_SIZE); - submit_outcomes - .into_iter() - .for_each(|o| mempool.update_transaction_priority(&o)); + for o in submit_outcomes { + mempool.update_transaction_priority(o.hash(), o.priority()).await; + } //this one should drop 2 xts (size: 1130): let xt = Arc::from(ExtrinsicBuilder::new_include_data(vec![98 as u8; 1025]).build()); @@ -873,14 +1168,15 @@ mod tx_mem_pool_tests { assert_eq!(length, 1130); let result = mempool .try_insert_with_replacement(xt, hi_prio, TransactionSource::External, false) + .await .unwrap(); assert_eq!(result.hash, hash); assert_eq!(result.removed, hashes[0..2]); } - #[test] - fn replacing_txs_removes_proper_size_and_prios() { + #[tokio::test] + async fn replacing_txs_removes_proper_size_and_prios() { sp_tracing::try_init_simple(); const COUNT: usize = 10; let api = Arc::from(TestApi::default()); @@ -891,7 +1187,7 @@ mod tx_mem_pool_tests { let hi_prio = u64::MAX; let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1); - let (submit_outcomes, hashes): (Vec<_>, Vec<_>) = xts + let (submit_outcomes, hashes): (Vec>, Vec<_>) = xts .iter() .enumerate() .map(|(prio, t)| { @@ -900,13 +1196,13 @@ mod tx_mem_pool_tests { }) .unzip(); - let results = mempool.extend_unwatched(TransactionSource::External, &xts); + let results = mempool.extend_unwatched(TransactionSource::External, &xts).await; assert!(results.iter().all(Result::is_ok)); assert_eq!(mempool.bytes(), total_xts_bytes); - submit_outcomes - .into_iter() - .for_each(|o| mempool.update_transaction_priority(&o)); + for o in submit_outcomes { + mempool.update_transaction_priority(o.hash(), o.priority()).await; + } //this one should drop 3 xts (each of size 1129) let xt = Arc::from(ExtrinsicBuilder::new_include_data(vec![98 as u8; 2154]).build()); @@ -915,14 +1211,15 @@ mod tx_mem_pool_tests { assert_eq!(length, 2 * LARGE_XT_SIZE + 1); let result = mempool .try_insert_with_replacement(xt, hi_prio, TransactionSource::External, false) + .await .unwrap(); assert_eq!(result.hash, hash); assert!(result.removed.iter().eq(hashes[COUNT - 3..COUNT].iter().rev())); } - #[test] - fn replacing_txs_skips_lower_prio_tx() { + #[tokio::test] + async fn replacing_txs_skips_lower_prio_tx() { sp_tracing::try_init_simple(); const COUNT: usize = 10; let api = Arc::from(TestApi::default()); @@ -934,7 +1231,7 @@ mod tx_mem_pool_tests { let low_prio = 10u64; let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1); - let submit_outcomes = xts + let submit_outcomes: Vec> = xts .iter() .map(|t| { let h = api.hash_and_length(t).0; @@ -942,17 +1239,18 @@ mod tx_mem_pool_tests { }) .collect::>(); - let results = mempool.extend_unwatched(TransactionSource::External, &xts); + let results = mempool.extend_unwatched(TransactionSource::External, &xts).await; assert!(results.iter().all(Result::is_ok)); assert_eq!(mempool.bytes(), total_xts_bytes); - submit_outcomes - .into_iter() - .for_each(|o| mempool.update_transaction_priority(&o)); + for o in submit_outcomes { + mempool.update_transaction_priority(o.hash(), o.priority()).await; + } let xt = Arc::from(large_uxt(98)); - let result = - mempool.try_insert_with_replacement(xt, low_prio, TransactionSource::External, false); + let result = mempool + .try_insert_with_replacement(xt, low_prio, TransactionSource::External, false) + .await; // lower prio tx is rejected immediately assert!(matches!( @@ -961,8 +1259,8 @@ mod tx_mem_pool_tests { )); } - #[test] - fn replacing_txs_is_skipped_if_prios_are_not_set() { + #[tokio::test] + async fn replacing_txs_is_skipped_if_prios_are_not_set() { sp_tracing::try_init_simple(); const COUNT: usize = 10; let api = Arc::from(TestApi::default()); @@ -974,7 +1272,7 @@ mod tx_mem_pool_tests { let total_xts_bytes = xts.iter().fold(0, |r, x| r + api.hash_and_length(&x).1); - let results = mempool.extend_unwatched(TransactionSource::External, &xts); + let results = mempool.extend_unwatched(TransactionSource::External, &xts).await; assert!(results.iter().all(Result::is_ok)); assert_eq!(mempool.bytes(), total_xts_bytes); @@ -984,8 +1282,9 @@ mod tx_mem_pool_tests { // overhead is 105, thus length: 105 + 2154 assert_eq!(length, 2 * LARGE_XT_SIZE + 1); - let result = - mempool.try_insert_with_replacement(xt, hi_prio, TransactionSource::External, false); + let result = mempool + .try_insert_with_replacement(xt, hi_prio, TransactionSource::External, false) + .await; // we did not update priorities (update_transaction_priority was not called): assert!(matches!( diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool/tx_mem_pool_map.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool/tx_mem_pool_map.rs new file mode 100644 index 0000000000000..d72e4f114a3b7 --- /dev/null +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool/tx_mem_pool_map.rs @@ -0,0 +1,764 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Provides structures for storing and managing transactions in a transaction memory pool. +//! +//! The module includes `SizeTrackedStore`, a map designed for concurrent access, and +//! `IndexedStorage`, which manages transaction entries by key and priority. Transactions are stored +//! efficiently with operations to insert items based on priority and manage space utilization. This +//! module provides core functionality for maintaining the `TxMemPool` state. + +use std::{ + collections::{BTreeMap, HashMap}, + sync::{ + atomic::{AtomicIsize, Ordering as AtomicOrdering}, + Arc, + }, +}; + +use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; + +/// Something that can report its size. +pub(super) trait Size { + fn size(&self) -> usize; +} + +/// Trait for items with a priority and timestamp ordering. +/// +/// `PriorityAndTimestamp` defines methods to access the priority and timestamp, +/// facilitating sorting in structures like `SizeTrackedStore`. +pub(super) trait PriorityAndTimestamp { + type Priority: Ord; + type Timestamp: Ord; + + fn priority(&self) -> Self::Priority; + fn timestamp(&self) -> Self::Timestamp; +} + +/// A dual-key struct for ordering by priority and timestamp. +/// +/// `PriorityKey` allows sorting where the primary criteria is `Priority` +/// and ties are broken using `Timestamp`. +#[derive(Eq, PartialEq, Ord, PartialOrd, Debug)] +pub struct PriorityKey(U, V); + +/// Composite key for sorting collections by priority keys (embedded in `PriorityKey`) and item key +/// (typically hash). +#[derive(Eq, PartialEq, Ord, PartialOrd, Debug)] +struct SortKey(P, K); + +impl SortKey, K> +where + K: Ord + Copy, + A: Ord, + B: Ord, +{ + /// Creates a new `SortKey` for a given item and its key, based on the item's priority and + /// timestamp. + fn new>(key: &K, item: &V) -> Self { + Self(PriorityKey(item.priority(), item.timestamp()), *key) + } +} + +/// Internal storage for managing TxMemPool entries. +/// +/// `IndexedStorage` uses `HashMap` for fast access by key and `BTreeMap` for +/// efficient priority ordering. +#[derive(Debug)] +struct IndexedStorage +where + K: Ord, + S: Ord, +{ + /// HashMap storing transactions by unique key for quick access. + items_by_hashes: HashMap, + /// BTreeMap ordering transactions for prioritized access based on sort key. + items_by_priority: BTreeMap, V>, +} + +/// Core structure for storing and managing transactions in TxMemPool. +/// +/// `SizeTrackedStore` is a thread-safe map designed to track the size and priority +/// of transactions, optimized for use in a transaction memory pool. The map +/// preserves sorting order based on priority and timestamp. +/// +/// Size reported might be slightly off and only approximately true. +#[derive(Debug)] +pub struct SizeTrackedStore +where + K: Ord, + S: Ord, +{ + /// Internal storage maintaining transaction entries. + index: Arc>>, + /// Atomic counter tracking the total size, in bytes, of all transactions. + bytes: AtomicIsize, + /// Atomic counter maintaining the count of transactions. + length: AtomicIsize, +} + +impl Default for IndexedStorage +where + K: Ord, + S: Ord, +{ + fn default() -> Self { + Self { items_by_hashes: Default::default(), items_by_priority: Default::default() } + } +} + +impl IndexedStorage +where + K: Ord + std::hash::Hash, + S: Ord, +{ + /// Retrieves a reference to the value corresponding to the key, if present. + pub fn get(&self, key: &K) -> Option<&V> { + self.items_by_hashes.get(key) + } + + /// Checks if the map contains the specified key. + pub fn contains_key(&self, key: &K) -> bool { + self.items_by_hashes.contains_key(key) + } + + /// Returns an iterator over the values in the map. + pub fn values(&self) -> std::collections::hash_map::Values { + self.items_by_hashes.values() + } + + /// Returns the number of elements in the map. + pub fn len(&self) -> usize { + debug_assert_eq!(self.items_by_hashes.len(), self.items_by_priority.len()); + self.items_by_hashes.len() + } + + /// Removes and returns the first entry in the priority map, if it exists (testing only). + #[cfg(test)] + pub fn pop_first(&mut self) -> Option { + self.items_by_priority.pop_first().map(|(_, v)| v) + } + + pub fn with_items(&self, f: F) -> R + where + F: Fn(std::collections::hash_map::Iter) -> R, + { + f(self.items_by_hashes.iter()) + } +} + +impl IndexedStorage, V> +where + K: Ord + std::hash::Hash + Copy, + A: Ord, + B: Ord, + V: Clone + PriorityAndTimestamp, + V: std::cmp::PartialEq + std::fmt::Debug, +{ + /// Inserts a key-value pair into the map, ordering by priority. + pub fn insert(&mut self, key: K, val: V) -> Option { + let r = self.items_by_hashes.insert(key, val.clone()); + + if let Some(ref removed) = r { + let a = self.items_by_priority.remove(&SortKey::new(&key, removed)); + debug_assert_eq!(r, a); + } + let a = self.items_by_priority.insert(SortKey::new(&key, &val), val); + debug_assert!(a.is_none()); + r + } + + /// Removes a key-value pair from the map based on the key. + pub fn remove(&mut self, key: &K) -> Option { + let r = self.items_by_hashes.remove(key); + let _ = r.as_ref().map(|r| { + let k = SortKey::new(key, r); + let a = self.items_by_priority.remove(&k); + debug_assert_eq!(r.clone(), a.expect("item should be in both maps. qed.")); + }); + r + } + + /// Allows to mutate item for given key with a closure, if key exists. + /// + /// Intended to mutate priority and timestamp. Changing size is not possible. + pub fn update_item(&mut self, key: &K, f: F) -> Option<()> + where + F: FnOnce(&mut V), + { + let item = self.items_by_hashes.get_mut(key)?; + + let old_key = SortKey::new(key, item); + f(item); + let new_key = SortKey::new(key, item); + + if old_key != new_key { + self.items_by_priority.remove(&old_key); + self.items_by_priority.insert(new_key, item.clone()); + } + + Some(()) + } +} + +impl IndexedStorage, V> +where + K: Ord + std::hash::Hash + Copy + std::fmt::Debug, + A: Ord + std::fmt::Debug, + B: Ord + std::fmt::Debug, + V: Clone + PriorityAndTimestamp + Size, + V: std::cmp::PartialEq + std::fmt::Debug, +{ + /// Attempts to insert an item with replacement based on free space and priority. + /// Returns the total size in bytes of removed items, and their keys. + /// + /// Insertion always results with other item's removal, the len bound is kept elsewhere + /// + /// If nothing was inserted `(None,0)` is returned. + pub fn try_insert_with_replacement( + &mut self, + free_bytes: usize, + key: K, + item: V, + ) -> (Option>, usize) { + let mut total_size_removed = 0usize; + let mut to_be_removed = vec![]; + + if item.size() == 0 { + return (None, 0); + } + + if self.contains_key(&key) { + return (None, 0); + } + + for (SortKey(PriorityKey(worst_priority, worst_timestamp), worst_key), worst_item) in + &self.items_by_priority + { + if *worst_priority > item.priority() { + return (None, 0); + } + if *worst_priority == item.priority() && *worst_timestamp < item.timestamp() { + return (None, 0); + } + + total_size_removed += worst_item.size(); + to_be_removed.push(*worst_key); + + if free_bytes + total_size_removed >= item.size() { + break; + } + } + + if item.size() > free_bytes + total_size_removed { + return (None, 0); + } + + self.insert(key, item); + + for worst_key in &to_be_removed { + self.remove(worst_key); + } + + (Some(to_be_removed), total_size_removed) + } +} + +impl Default for SizeTrackedStore +where + K: Ord, + S: Ord, +{ + fn default() -> Self { + Self { + index: Arc::new(IndexedStorage::default().into()), + bytes: 0.into(), + length: 0.into(), + } + } +} +// +impl SizeTrackedStore +where + K: Ord, + S: Ord, +{ + /// Current tracked length of the content. + pub fn len(&self) -> usize { + std::cmp::max(self.length.load(AtomicOrdering::Relaxed), 0) as usize + } + + /// Current sum of content length. + pub fn bytes(&self) -> usize { + std::cmp::max(self.bytes.load(AtomicOrdering::Relaxed), 0) as usize + } + + /// Lock map for read. + pub async fn read(&self) -> SizeTrackedStoreReadAccess { + SizeTrackedStoreReadAccess { inner_guard: self.index.read().await } + } + + /// Lock map for write. + pub async fn write(&self) -> SizeTrackedStoreWriteAccess { + SizeTrackedStoreWriteAccess { + inner_guard: self.index.write().await, + bytes: &self.bytes, + length: &self.length, + } + } +} + +pub struct SizeTrackedStoreReadAccess<'a, K, S, V> +where + K: Ord, + S: Ord, +{ + inner_guard: RwLockReadGuard<'a, IndexedStorage>, +} + +impl SizeTrackedStoreReadAccess<'_, K, S, V> +where + K: Ord + std::hash::Hash, + S: Ord, +{ + /// Returns true if the map contains given key. + pub fn contains_key(&self, key: &K) -> bool { + self.inner_guard.contains_key(key) + } + + /// Returns the reference to the contained value by key, if exists. + pub fn get(&self, key: &K) -> Option<&V> { + self.inner_guard.get(key) + } + + /// Returns an iterator over all values. + pub fn values(&self) -> std::collections::hash_map::Values { + self.inner_guard.values() + } + + /// Returns the number of elements in the map. + pub fn len(&self) -> usize { + self.inner_guard.len() + } + + pub fn with_items(&self, f: F) -> R + where + F: Fn(std::collections::hash_map::Iter) -> R, + { + self.inner_guard.with_items(f) + } +} + +pub struct SizeTrackedStoreWriteAccess<'a, K, S, V> +where + K: Ord, + S: Ord, +{ + bytes: &'a AtomicIsize, + length: &'a AtomicIsize, + inner_guard: RwLockWriteGuard<'a, IndexedStorage>, +} + +impl SizeTrackedStoreWriteAccess<'_, K, PriorityKey, V> +where + K: Ord + std::hash::Hash + Copy + std::fmt::Debug, + A: Ord + std::fmt::Debug, + B: Ord + std::fmt::Debug, + V: Clone + PriorityAndTimestamp + Size, + V: std::cmp::PartialEq + std::fmt::Debug, +{ + /// Insert value and return previous (if any). + pub fn insert(&mut self, key: K, val: V) -> Option { + let new_bytes = val.size(); + self.bytes.fetch_add(new_bytes as isize, AtomicOrdering::Relaxed); + self.length.fetch_add(1, AtomicOrdering::Relaxed); + self.inner_guard.insert(key, val).inspect(|old_val| { + self.bytes.fetch_sub(old_val.size() as isize, AtomicOrdering::Relaxed); + self.length.fetch_sub(1, AtomicOrdering::Relaxed); + }) + } + + /// Remove value by key. + pub fn remove(&mut self, key: &K) -> Option { + let val = self.inner_guard.remove(key); + if let Some(size) = val.as_ref().map(Size::size) { + self.bytes.fetch_sub(size as isize, AtomicOrdering::Relaxed); + self.length.fetch_sub(1, AtomicOrdering::Relaxed); + } + val + } + + /// Refer to [`IndexedStorage::try_insert_with_replacement`] + pub fn try_insert_with_replacement( + &mut self, + max_total_bytes: usize, + key: K, + item: V, + ) -> Option> { + let item_size = item.size(); + let current_bytes = std::cmp::max(self.bytes.load(AtomicOrdering::Relaxed), 0) as usize; + let free_bytes = max_total_bytes - current_bytes; + let (removed_keys, removed_bytes) = + self.inner_guard.try_insert_with_replacement(free_bytes, key, item); + + if let Some(ref removed_keys) = removed_keys { + let delta = item_size as isize - removed_bytes as isize; + self.bytes.fetch_add(delta, AtomicOrdering::Relaxed); + self.length.fetch_sub(removed_keys.len() as isize, AtomicOrdering::Relaxed); + self.length.fetch_add(1, AtomicOrdering::Relaxed); + } + removed_keys + } + + /// Allows to mutate item for given key, if exists. + /// + /// Intended to mutate priority and timestamp. Changing size is not possible. + pub fn update_item(&mut self, hash: &K, f: F) -> Option<()> + where + F: FnOnce(&mut V), + { + self.inner_guard.update_item(hash, f) + } +} + +impl SizeTrackedStoreWriteAccess<'_, K, S, V> +where + K: Ord + std::hash::Hash, + S: Ord, +{ + /// Returns `true` if the inner map contains a value for the specified key. + pub fn contains_key(&self, key: &K) -> bool { + self.inner_guard.contains_key(key) + } + + /// Returns the number of elements in the map. + pub fn len(&mut self) -> usize { + self.inner_guard.len() + } + + #[cfg(test)] + pub fn pop_first(&mut self) -> Option { + self.inner_guard.pop_first() + } +} + +#[cfg(test)] +mod tests { + use rstest::rstest; + + use super::*; + + #[derive(Clone, Debug, PartialEq)] + struct TestItem { + size: usize, + prio: u32, + ts: u32, + } + + impl PriorityAndTimestamp for TestItem { + type Priority = u32; + type Timestamp = u32; + + fn priority(&self) -> Self::Priority { + self.prio + } + fn timestamp(&self) -> Self::Timestamp { + self.ts + } + } + + impl Size for TestItem { + fn size(&self) -> usize { + self.size + } + } + + impl TestItem { + fn new(prio: u32, ts: u32, size: usize) -> Self { + Self { prio, ts, size } + } + } + + #[tokio::test] + async fn basic() { + let map = SizeTrackedStore::default(); + + let i0 = TestItem::new(1, 0, 10); + let i1 = TestItem::new(2, 0, 11); + let i2 = TestItem::new(3, 0, 20); + + map.write().await.insert(0xa, i0); + map.write().await.insert(0xb, i1); + + assert_eq!(map.bytes(), 21); + assert_eq!(map.len(), 2); + + map.write().await.insert(0xc, i2); + + assert_eq!(map.bytes(), 41); + assert_eq!(map.len(), 3); + + map.write().await.remove(&0xa); + assert_eq!(map.bytes(), 31); + assert_eq!(map.len(), 2); + assert_eq!(map.read().await.len(), 2); + } + + #[tokio::test] + async fn insert_same_hash() { + let map = SizeTrackedStore::default(); + + let i0 = TestItem::new(1, 0, 10); + let i1 = TestItem::new(2, 0, 11); + let i2 = TestItem::new(3, 0, 20); + let i3 = TestItem::new(4, 0, 40); + let i4 = TestItem::new(5, 0, 50); + let i5 = TestItem::new(6, 0, 1); + + map.write().await.insert(0xa, i0); + map.write().await.insert(0xb, i1.clone()); + map.write().await.insert(0xc, i2); + assert_eq!(map.bytes(), 41); + assert_eq!(map.len(), 3); + assert_eq!(map.read().await.len(), 3); + + map.write().await.insert(0xc, i3.clone()); + assert_eq!(map.bytes(), 61); + assert_eq!(map.len(), 3); + assert_eq!(map.read().await.len(), 3); + + map.write().await.insert(0xa, i4); + assert_eq!(map.bytes(), 101); + assert_eq!(map.len(), 3); + assert_eq!(map.read().await.len(), 3); + + map.write().await.insert(0xa, i5.clone()); + assert_eq!(map.bytes(), 52); + assert_eq!(map.len(), 3); + assert_eq!(map.read().await.len(), 3); + + let items = map.read().await.values().cloned().collect::>(); + let expected = [i1, i3, i5]; + assert!(expected.iter().all(|e| items.contains(e)),); + assert_eq!(items.len(), expected.len()); + } + + #[tokio::test] + async fn remove_non_existent() { + let map = SizeTrackedStore::<_, _, TestItem>::default(); + map.write().await.remove(&0xa); + assert_eq!(map.bytes(), 0); + assert_eq!(map.len(), 0); + assert_eq!(map.read().await.len(), 0); + } + + #[rstest] + #[case(20, 30, 50, 50, 100, 3, 100, 2, 100)] + #[case(2, 46, 50, 3, 100, 3, 98, 3, 99)] + #[case(2, 46, 50, 2, 100, 3, 98, 3, 98)] + #[case(2, 47, 50, 4, 100, 3, 99, 2, 54)] + #[case(1, 47, 50, 99, 100, 3, 98, 1, 99)] + #[case(1, 1, 1, 2, 100, 3, 3, 3, 4)] //always remove + #[case(20, 30, 40, 150, 100, 3, 90, 3, 90)] + #[case(10, 20, 30, 80, 100, 3, 60, 1, 80)] + #[tokio::test] + #[allow(clippy::too_many_arguments)] + async fn try_insert_with_replacement_works_param2( + #[case] i0_bytes: usize, + #[case] i1_bytes: usize, + #[case] i2_bytes: usize, + #[case] i3_bytes: usize, + #[case] max_bytes: usize, + #[case] expected_len_before: usize, + #[case] expected_bytes_before: usize, + #[case] expected_len_after: usize, + #[case] expected_bytes_after: usize, + ) { + let map = SizeTrackedStore::default(); + + let i0 = TestItem::new(1, 0, i0_bytes); + let i1 = TestItem::new(2, 0, i1_bytes); + let i2 = TestItem::new(3, 0, i2_bytes); + + map.write().await.insert(0xa, i0); + map.write().await.insert(0xb, i1); + map.write().await.insert(0xc, i2.clone()); + + assert_eq!(map.bytes(), expected_bytes_before); + assert_eq!(map.len(), expected_len_before); + + let i3 = TestItem::new(4, 0, i3_bytes); + map.write().await.try_insert_with_replacement(max_bytes, 0xd, i3.clone()); + + assert_eq!(map.len(), expected_len_after); + assert_eq!(map.read().await.len(), expected_len_after); + assert_eq!(map.bytes(), expected_bytes_after); + } + + #[tokio::test] + async fn try_insert_with_replacement_items() { + let map = SizeTrackedStore::default(); + + let i0 = TestItem::new(1, 0, 20); + let i1 = TestItem::new(2, 0, 30); + let i2 = TestItem::new(3, 0, 50); + + map.write().await.insert(0xa, i0); + map.write().await.insert(0xb, i1); + map.write().await.insert(0xc, i2.clone()); + + assert_eq!(map.bytes(), 100); + assert_eq!(map.len(), 3); + + let i3 = TestItem::new(4, 0, 50); + let _ = map.write().await.try_insert_with_replacement(100, 0xd, i3.clone()); + + assert_eq!(map.bytes(), 100); + assert_eq!(map.len(), 2); + assert_eq!(map.read().await.len(), 2); + + let items = map.read().await.values().cloned().collect::>(); + let expected = [&i2, &i3]; + assert!(expected.iter().all(|e| items.contains(e)),); + assert_eq!(items.len(), expected.len()); + + assert_eq!(map.write().await.pop_first().unwrap(), i2); + assert_eq!(map.write().await.pop_first().unwrap(), i3); + } + + #[tokio::test] + async fn try_insert_with_replacement_works_known_key_reject() { + let map = SizeTrackedStore::default(); + + let i0 = TestItem::new(1, 0, 20); + let i1 = TestItem::new(2, 0, 30); + let i2 = TestItem::new(3, 0, 50); + + map.write().await.insert(0xa, i0.clone()); + map.write().await.insert(0xb, i1.clone()); + map.write().await.insert(0xc, i2.clone()); + + assert_eq!(map.bytes(), 100); + assert_eq!(map.len(), 3); + + let i3 = TestItem::new(4, 0, 50); + let r = map.write().await.try_insert_with_replacement(100, 0xb, i3); + + assert!(r.is_none()); + + assert_eq!(map.bytes(), 100); + assert_eq!(map.len(), 3); + assert_eq!(map.read().await.len(), 3); + + let items = map.read().await.values().cloned().collect::>(); + let expected = [&i0, &i1, &i2]; + assert!(expected.iter().all(|e| items.contains(e)),); + assert_eq!(items.len(), expected.len()); + assert_eq!(map.write().await.pop_first().unwrap(), i0); + assert_eq!(map.write().await.pop_first().unwrap(), i1); + assert_eq!(map.write().await.pop_first().unwrap(), i2); + } + + #[tokio::test] + async fn try_insert_with_replacement_zero_size_reject() { + let map = SizeTrackedStore::default(); + + let i0 = TestItem::new(1, 0, 20); + let i1 = TestItem::new(2, 0, 30); + let i2 = TestItem::new(3, 0, 50); + + map.write().await.insert(0xa, i0.clone()); + map.write().await.insert(0xb, i1.clone()); + map.write().await.insert(0xc, i2.clone()); + + assert_eq!(map.bytes(), 100); + assert_eq!(map.len(), 3); + + let i3 = TestItem::new(4, 0, 0); + let r = map.write().await.try_insert_with_replacement(100, 0xb, i3); + + assert!(r.is_none()); + + assert_eq!(map.bytes(), 100); + assert_eq!(map.len(), 3); + assert_eq!(map.read().await.len(), 3); + + let items = map.read().await.values().cloned().collect::>(); + let expected = [i0, i1, i2]; + assert!(expected.iter().all(|e| items.contains(e)),); + assert_eq!(items.len(), expected.len()); + } + + #[tokio::test] + async fn sorting_works() { + let map = SizeTrackedStore::default(); + + let i0 = TestItem::new(1, 0, 10); + let i1 = TestItem::new(1, 1, 10); + let i2 = TestItem::new(2, 0, 20); + let i3 = TestItem::new(3, 0, 30); + let i4 = TestItem::new(4, 0, 40); + + map.write().await.insert(0xc, i2.clone()); + map.write().await.insert(0xb, i1.clone()); + map.write().await.insert(0xa, i0.clone()); + map.write().await.insert(0xe, i4.clone()); + map.write().await.insert(0xd, i3.clone()); + + assert_eq!(map.bytes(), 110); + assert_eq!(map.len(), 5); + assert_eq!(map.write().await.pop_first().unwrap(), i0); + assert_eq!(map.write().await.pop_first().unwrap(), i1); + assert_eq!(map.write().await.pop_first().unwrap(), i2); + assert_eq!(map.write().await.pop_first().unwrap(), i3); + assert_eq!(map.write().await.pop_first().unwrap(), i4); + } + + #[tokio::test] + async fn update_item() { + let map = SizeTrackedStore::default(); + + let i0 = TestItem::new(1, 0, 20); + let i1 = TestItem::new(2, 0, 30); + let iu = TestItem::new(0, 0, 30); + let i2 = TestItem::new(3, 0, 50); + + map.write().await.insert(0xa, i0.clone()); + map.write().await.insert(0xb, i1.clone()); + map.write().await.insert(0xc, i2.clone()); + + assert_eq!(map.bytes(), 100); + assert_eq!(map.len(), 3); + assert_eq!(map.read().await.len(), 3); + + map.write().await.update_item(&0xb, |item| item.prio = iu.prio).unwrap(); + + assert_eq!(map.bytes(), 100); + assert_eq!(map.len(), 3); + assert_eq!(map.read().await.len(), 3); + + let items = map.read().await.values().cloned().collect::>(); + let expected = [&i0, &iu, &i2]; + assert!(expected.iter().all(|e| items.contains(e)),); + assert_eq!(items.len(), expected.len()); + + assert_eq!(map.write().await.pop_first().unwrap(), iu); + assert_eq!(map.write().await.pop_first().unwrap(), i0); + assert_eq!(map.write().await.pop_first().unwrap(), i2); + } +} diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs index 5c025b22573a9..c3854f9a58d78 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view.rs @@ -28,8 +28,8 @@ use crate::{ common::tracing_log_xt::log_xt_trace, graph::{ self, base_pool::TimedTransactionSource, BlockHash, ExtrinsicFor, ExtrinsicHash, - IsValidator, TransactionFor, ValidatedPoolSubmitOutcome, ValidatedTransaction, - ValidatedTransactionFor, + IsValidator, TransactionFor, ValidateTransactionPriority, ValidatedPoolSubmitOutcome, + ValidatedTransaction, ValidatedTransactionFor, }, LOG_TARGET, }; @@ -43,7 +43,7 @@ use sp_runtime::{ SaturatedConversion, }; use std::{sync::Arc, time::Instant}; -use tracing::{debug, trace}; +use tracing::{debug, instrument, trace, Level}; pub(super) struct RevalidationResult { revalidated: IndexMap, ValidatedTransactionFor>, @@ -311,21 +311,25 @@ where } /// Imports single unvalidated extrinsic into the view. + #[instrument(level = Level::TRACE, skip_all, target = "txpool", name = "view::submit_one")] pub(super) async fn submit_one( &self, source: TimedTransactionSource, xt: ExtrinsicFor, + validation_priority: ValidateTransactionPriority, ) -> Result, ChainApi::Error> { - self.submit_many(std::iter::once((source, xt))) + self.submit_many(std::iter::once((source, xt)), validation_priority) .await .pop() .expect("There is exactly one result, qed.") } /// Imports many unvalidated extrinsics into the view. + #[instrument(level = Level::TRACE, skip_all, target = "txpool", name = "view::submit_many")] pub(super) async fn submit_many( &self, xts: impl IntoIterator)>, + validation_priority: ValidateTransactionPriority, ) -> Vec, ChainApi::Error>> { if tracing::enabled!(target: LOG_TARGET, tracing::Level::TRACE) { let xts = xts.into_iter().collect::>(); @@ -335,9 +339,9 @@ where "view::submit_many at:{}", self.at.hash ); - self.pool.submit_at(&self.at, xts).await + self.pool.submit_at(&self.at, xts, validation_priority).await } else { - self.pool.submit_at(&self.at, xts).await + self.pool.submit_at(&self.at, xts, validation_priority).await } } @@ -413,7 +417,7 @@ where revalidation_result_tx, } = finish_revalidation_worker_channels; - trace!( + debug!( target: LOG_TARGET, at_hash = ?self.at.hash, "view::revalidate: at starting" @@ -438,7 +442,7 @@ where let mut should_break = false; tokio::select! { _ = finish_revalidation_request_rx.recv() => { - trace!( + debug!( target: LOG_TARGET, at_hash = ?self.at.hash, "view::revalidate: finish revalidation request received" @@ -447,7 +451,13 @@ where } _ = async { if let Some(tx) = batch_iter.next() { - let validation_result = (api.validate_transaction(self.at.hash, tx.source.clone().into(), tx.data.clone()).await, tx.hash, tx); + let validation_result = ( + api.validate_transaction(self.at.hash, + tx.source.clone().into(), tx.data.clone(), + ValidateTransactionPriority::Maintained).await, + tx.hash, + tx + ); validation_results.push(validation_result); } else { self.revalidation_worker_channels.lock().as_mut().map(|ch| ch.remove_sender()); @@ -518,7 +528,7 @@ where } } - trace!( + debug!( target: LOG_TARGET, at_hash = ?self.at.hash, "view::revalidate: sending revalidation result" @@ -550,7 +560,7 @@ where super::revalidation_worker::RevalidationQueue, >, ) { - trace!( + debug!( target: LOG_TARGET, at_hash = ?view.at.hash, "view::start_background_revalidation" diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs index bcf1e8d5ceef9..91ef881a397c6 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/view_store.rs @@ -30,7 +30,7 @@ use crate::{ BaseSubmitOutcome, BlockHash, ExtrinsicFor, ExtrinsicHash, TransactionFor, ValidatedPoolSubmitOutcome, }, - ReadyIteratorFor, LOG_TARGET, + ReadyIteratorFor, ValidateTransactionPriority, LOG_TARGET, }; use itertools::Itertools; use parking_lot::RwLock; @@ -48,7 +48,7 @@ use std::{ sync::Arc, time::Instant, }; -use tracing::{trace, warn}; +use tracing::{debug, instrument, trace, warn, Level}; /// Helper struct to maintain the context for pending transaction submission, executed for /// newly inserted views. @@ -166,9 +166,9 @@ where /// /// Provides a side-channel allowing to send per-transaction state changes notification. pub(super) listener: Arc>, - /// Most recent block processed by tx-pool. Used in the API functions that were not changed to + /// Most recent view processed by tx-pool. Used in the API functions that were not changed to /// add `at` parameter. - pub(super) most_recent_view: RwLock>, + pub(super) most_recent_view: RwLock>>>, /// The controller of multi view dropped stream. pub(super) dropped_stream_controller: MultiViewDroppedWatcherController, /// The map used to synchronize replacement of transactions between maintain and dropped @@ -229,7 +229,7 @@ where async move { ( view.at.hash, - view.submit_many(xts) + view.submit_many(xts, ValidateTransactionPriority::Submitted) .await .into_iter() .map(|r| r.map(Into::into)) @@ -285,6 +285,7 @@ where /// /// The external stream of aggregated/processed events provided by the `MultiViewListener` /// instance is returned. + #[instrument(level = Level::TRACE, skip_all, target = "txpool", name = "view_store::sumbit_and_watch")] pub(super) async fn submit_and_watch( &self, _at: Block::Hash, @@ -303,7 +304,9 @@ where let view = view.clone(); let xt = xt.clone(); let source = source.clone(); - async move { view.submit_one(source, xt).await } + async move { + view.submit_one(source, xt, ValidateTransactionPriority::Submitted).await + } }) .collect::>() }; @@ -411,12 +414,8 @@ where /// The iterator for future transactions is returned if the most recently notified best block, /// for which maintain process was accomplished, exists. pub(super) fn ready(&self) -> ReadyIteratorFor { - let ready_iterator = self - .most_recent_view - .read() - .map(|at| self.get_view_at(at, true)) - .flatten() - .map(|(v, _)| v.pool.validated_pool().ready()); + let ready_iterator = + self.most_recent_view.read().as_ref().map(|v| v.pool.validated_pool().ready()); if let Some(ready_iterator) = ready_iterator { return Box::new(ready_iterator) @@ -434,8 +433,8 @@ where ) -> Vec, ExtrinsicFor>> { self.most_recent_view .read() - .map(|at| self.futures_at(at)) - .flatten() + .as_ref() + .and_then(|view| self.futures_at(view.at.hash)) .unwrap_or_default() } @@ -459,7 +458,7 @@ where finalized_hash: Block::Hash, tree_route: &[Block::Hash], ) -> Vec> { - trace!( + debug!( target: LOG_TARGET, ?finalized_hash, ?tree_route, @@ -493,6 +492,10 @@ where finalized_transactions.extend(extrinsics); } + debug!( + target: LOG_TARGET, + "finalize_route: done" + ); finalized_transactions } @@ -521,6 +524,7 @@ where /// The `most_recent_view` is updated with the reference to the newly inserted view. /// /// If there are any pending tx replacments, they are applied to the new view. + #[instrument(level = Level::TRACE, skip_all, target = "txpool", name = "view_store::insert_new_view")] pub(super) async fn insert_new_view( &self, view: Arc>, @@ -528,6 +532,8 @@ where ) { self.apply_pending_tx_replacements(view.clone()).await; + let start = Instant::now(); + //note: most_recent_view must be synced with changes in in/active_views. { let mut most_recent_view_lock = self.most_recent_view.write(); @@ -543,11 +549,12 @@ where }); }); active_views.insert(view.at.hash, view.clone()); - most_recent_view_lock.replace(view.at.hash); + most_recent_view_lock.replace(view.clone()); }; - trace!( + debug!( target: LOG_TARGET, inactive_views = ?self.inactive_views.read().keys(), + duration = ?start.elapsed(), "insert_new_view" ); } @@ -622,19 +629,14 @@ where retain }); - trace!( + debug!( target: LOG_TARGET, inactive_views = ?inactive_views.keys(), + ?dropped_views, "handle_finalized" ); } - trace!( - target: LOG_TARGET, - ?dropped_views, - "handle_finalized" - ); - self.listener.remove_stale_controllers(); self.dropped_stream_controller.remove_transactions(finalized_xts.clone()); @@ -663,11 +665,16 @@ where }) .collect::>() }; + debug!( + target: LOG_TARGET, + duration = ?start.elapsed(), + "finish_background_revalidations before" + ); futures::future::join_all(finish_revalidation_futures).await; - trace!( + debug!( target: LOG_TARGET, duration = ?start.elapsed(), - "finish_background_revalidations" + "finish_background_revalidations after" ); } @@ -777,11 +784,17 @@ where /// Applies pending transaction replacements to the specified view. /// /// After application, all already processed replacements are removed. + #[instrument(level = Level::TRACE, skip_all, target = "txpool", name = "view_store::apply_pending_tx_replacements")] async fn apply_pending_tx_replacements(&self, view: Arc>) { + let start = Instant::now(); let mut futures = vec![]; + let mut replace_count = 0; + let mut remove_count = 0; + for replacement in self.pending_txs_tasks.read().values() { match replacement.action { PreInsertAction::SubmitTx(ref submission) => { + replace_count += 1; let xt_hash = self.api.hash_and_length(&submission.xt).0; futures.push(self.replace_transaction_in_view( view.clone(), @@ -791,12 +804,22 @@ where )); }, PreInsertAction::RemoveSubtree(ref removal) => { + remove_count += 1; view.remove_subtree(&[removal.xt_hash], true, &*removal.listener_action); }, } } - let _results = futures::future::join_all(futures).await; - self.pending_txs_tasks.write().retain(|_, r| r.processed); + let _ = futures::future::join_all(futures).await; + self.pending_txs_tasks.write().retain(|_, r| !r.processed); + debug!( + target: LOG_TARGET, + at_hash = ?view.at.hash, + replace_count, + remove_count, + duration = ?start.elapsed(), + count = ?self.pending_txs_tasks.read().len(), + "apply_pending_tx_replacements" + ); } /// Submits `xt` to the given view. @@ -809,7 +832,9 @@ where xt: ExtrinsicFor, tx_hash: ExtrinsicHash, ) { - if let Err(error) = view.submit_one(source, xt).await { + if let Err(error) = + view.submit_one(source, xt, ValidateTransactionPriority::Maintained).await + { trace!( target: LOG_TARGET, ?tx_hash, diff --git a/substrate/client/transaction-pool/src/graph/mod.rs b/substrate/client/transaction-pool/src/graph/mod.rs index 3e6a63babc99c..c4882ab7bc61d 100644 --- a/substrate/client/transaction-pool/src/graph/mod.rs +++ b/substrate/client/transaction-pool/src/graph/mod.rs @@ -39,7 +39,7 @@ pub mod watcher; pub use self::pool::{ BlockHash, ChainApi, ExtrinsicFor, ExtrinsicHash, NumberFor, Options, Pool, RawExtrinsicFor, - TransactionFor, ValidatedTransactionFor, + TransactionFor, ValidateTransactionPriority, ValidatedTransactionFor, }; pub use validated_pool::{ BaseSubmitOutcome, EventDispatcher, IsValidator, ValidatedPoolSubmitOutcome, diff --git a/substrate/client/transaction-pool/src/graph/pool.rs b/substrate/client/transaction-pool/src/graph/pool.rs index 6aa695e980da0..d6cbf7ccdef64 100644 --- a/substrate/client/transaction-pool/src/graph/pool.rs +++ b/substrate/client/transaction-pool/src/graph/pool.rs @@ -33,7 +33,7 @@ use std::{ sync::Arc, time::{Duration, Instant}, }; -use tracing::{debug, trace}; +use tracing::{debug, instrument, trace, Level}; use super::{ base_pool as base, @@ -60,6 +60,19 @@ pub type TransactionFor = Arc, ExtrinsicFo pub type ValidatedTransactionFor = ValidatedTransaction, ExtrinsicFor, ::Error>; +/// The priority of request to validate the transaction. +#[derive(PartialEq, Copy, Clone)] +pub enum ValidateTransactionPriority { + /// Validate the newly submitted transactions + /// + /// Validation will be done with lower priority. + Submitted, + /// Validate the transaction during maintainance process, + /// + /// Validation will be performed with higher priority. + Maintained, +} + /// Concrete extrinsic validation and query logic. pub trait ChainApi: Send + Sync { /// Block type. @@ -80,6 +93,7 @@ pub trait ChainApi: Send + Sync { at: ::Hash, source: TransactionSource, uxt: ExtrinsicFor, + validation_priority: ValidateTransactionPriority, ) -> Self::ValidationFuture; /// Synchronously verify given extrinsic at given block. @@ -218,12 +232,15 @@ impl> Pool { } /// Imports a bunch of unverified extrinsics to the pool + #[instrument(level = Level::TRACE, skip_all, target="txpool", name = "pool::submit_at")] pub async fn submit_at( &self, at: &HashAndNumber, xts: impl IntoIterator)>, + validation_priority: ValidateTransactionPriority, ) -> Vec, B::Error>> { - let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::Yes).await; + let validated_transactions = + self.verify(at, xts, CheckBannedBeforeVerify::Yes, validation_priority).await; self.validated_pool.submit(validated_transactions.into_values()) } @@ -234,8 +251,10 @@ impl> Pool { &self, at: &HashAndNumber, xts: impl IntoIterator)>, + validation_priority: ValidateTransactionPriority, ) -> Vec, B::Error>> { - let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::No).await; + let validated_transactions = + self.verify(at, xts, CheckBannedBeforeVerify::No, validation_priority).await; self.validated_pool.submit(validated_transactions.into_values()) } @@ -246,7 +265,10 @@ impl> Pool { source: base::TimedTransactionSource, xt: ExtrinsicFor, ) -> Result, B::Error> { - let res = self.submit_at(at, std::iter::once((source, xt))).await.pop(); + let res = self + .submit_at(at, std::iter::once((source, xt)), ValidateTransactionPriority::Submitted) + .await + .pop(); res.expect("One extrinsic passed; one result returned; qed") } @@ -258,7 +280,14 @@ impl> Pool { xt: ExtrinsicFor, ) -> Result, B::Error> { let (_, tx) = self - .verify_one(at.hash, at.number, source, xt, CheckBannedBeforeVerify::Yes) + .verify_one( + at.hash, + at.number, + source, + xt, + CheckBannedBeforeVerify::Yes, + ValidateTransactionPriority::Submitted, + ) .await; self.validated_pool.submit_and_watch(tx) } @@ -357,6 +386,7 @@ impl> Pool { parent, TransactionSource::InBlock, Arc::from(extrinsic.clone()), + ValidateTransactionPriority::Maintained, ) .await; @@ -436,8 +466,14 @@ impl> Pool { let pruned_transactions = prune_status.pruned.into_iter().map(|tx| (tx.source.clone(), tx.data.clone())); - let reverified_transactions = - self.verify(at, pruned_transactions, CheckBannedBeforeVerify::Yes).await; + let reverified_transactions = self + .verify( + at, + pruned_transactions, + CheckBannedBeforeVerify::Yes, + ValidateTransactionPriority::Maintained, + ) + .await; let pruned_hashes = reverified_transactions.keys().map(Clone::clone).collect::>(); debug!( @@ -464,18 +500,19 @@ impl> Pool { } /// Returns future that validates a bunch of transactions at given block. + #[instrument(level = Level::TRACE, skip_all, target = "txpool",name = "pool::verify")] async fn verify( &self, at: &HashAndNumber, xts: impl IntoIterator)>, check: CheckBannedBeforeVerify, + validation_priority: ValidateTransactionPriority, ) -> IndexMap, ValidatedTransactionFor> { let HashAndNumber { number, hash } = *at; - let res = futures::future::join_all( - xts.into_iter() - .map(|(source, xt)| self.verify_one(hash, number, source, xt, check)), - ) + let res = futures::future::join_all(xts.into_iter().map(|(source, xt)| { + self.verify_one(hash, number, source, xt, check, validation_priority) + })) .await .into_iter() .collect::>(); @@ -484,6 +521,7 @@ impl> Pool { } /// Returns future that validates single transaction at given block. + #[instrument(level = Level::TRACE, skip_all, target = "txpool",name = "pool::verify_one")] pub(crate) async fn verify_one( &self, block_hash: ::Hash, @@ -491,6 +529,7 @@ impl> Pool { source: base::TimedTransactionSource, xt: ExtrinsicFor, check: CheckBannedBeforeVerify, + validation_priority: ValidateTransactionPriority, ) -> (ExtrinsicHash, ValidatedTransactionFor) { let (hash, bytes) = self.validated_pool.api().hash_and_length(&xt); @@ -502,7 +541,12 @@ impl> Pool { let validation_result = self .validated_pool .api() - .validate_transaction(block_hash, source.clone().into(), xt.clone()) + .validate_transaction( + block_hash, + source.clone().into(), + xt.clone(), + validation_priority, + ) .await; let status = match validation_result { @@ -623,10 +667,14 @@ mod tests { // when let txs = txs.into_iter().map(|x| (SOURCE, Arc::from(x))).collect::>(); - let hashes = block_on(pool.submit_at(&api.expect_hash_and_number(0), txs)) - .into_iter() - .map(|r| r.map(|o| o.hash())) - .collect::>(); + let hashes = block_on(pool.submit_at( + &api.expect_hash_and_number(0), + txs, + ValidateTransactionPriority::Submitted, + )) + .into_iter() + .map(|r| r.map(|o| o.hash())) + .collect::>(); debug!(hashes = ?hashes, "-->"); // then diff --git a/substrate/client/transaction-pool/src/graph/tracked_map.rs b/substrate/client/transaction-pool/src/graph/tracked_map.rs index ca1ee035cf37e..e9bdd1fdae05e 100644 --- a/substrate/client/transaction-pool/src/graph/tracked_map.rs +++ b/substrate/client/transaction-pool/src/graph/tracked_map.rs @@ -18,7 +18,7 @@ use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; use std::{ - collections::{hash_map::Iter, HashMap}, + collections::HashMap, sync::{ atomic::{AtomicIsize, Ordering as AtomicOrdering}, Arc, @@ -115,11 +115,6 @@ where pub fn values(&self) -> std::collections::hash_map::Values { self.inner_guard.values() } - - /// Returns the number of elements in the map. - pub fn len(&self) -> usize { - self.inner_guard.len() - } } pub struct TrackedMapWriteAccess<'a, K, V> { @@ -154,25 +149,10 @@ where val } - /// Returns `true` if the inner map contains a value for the specified key. - pub fn contains_key(&self, key: &K) -> bool { - self.inner_guard.contains_key(key) - } - /// Returns mutable reference to the contained value by key, if exists. pub fn get_mut(&mut self, key: &K) -> Option<&mut V> { self.inner_guard.get_mut(key) } - - /// Returns the number of elements in the map. - pub fn len(&mut self) -> usize { - self.inner_guard.len() - } - - /// Returns an iterator over all key-value pairs. - pub fn iter(&self) -> Iter<'_, K, V> { - self.inner_guard.iter() - } } #[cfg(test)] diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs index caccc3370a0aa..e4e33111a1006 100644 --- a/substrate/client/transaction-pool/src/graph/validated_pool.rs +++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs @@ -16,12 +16,12 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, +use crate::{ + common::{ + sliding_stat::SyncDurationSlidingStats, tracing_log_xt::log_xt_trace, STAT_SLIDING_WINDOW, + }, + insert_and_log_throttled_sync, LOG_TARGET, }; - -use crate::{common::tracing_log_xt::log_xt_trace, LOG_TARGET}; use futures::channel::mpsc::{channel, Sender}; use indexmap::IndexMap; use parking_lot::{Mutex, RwLock}; @@ -31,8 +31,12 @@ use sp_runtime::{ traits::SaturatedConversion, transaction_validity::{TransactionTag as Tag, ValidTransaction}, }; -use std::time::Instant; -use tracing::{debug, trace, warn}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, + time::{Duration, Instant}, +}; +use tracing::{trace, warn, Level}; use super::{ base_pool::{self as base, PruneStatus}, @@ -165,6 +169,7 @@ pub struct ValidatedPool> { pub(crate) pool: RwLock, ExtrinsicFor>>, import_notification_sinks: Mutex>>>, rotator: PoolRotator>, + enforce_limits_stats: SyncDurationSlidingStats, } impl> Clone for ValidatedPool { @@ -177,6 +182,7 @@ impl> Clone for ValidatedPool { pool: RwLock::from(self.pool.read().clone()), import_notification_sinks: Default::default(), rotator: self.rotator.clone(), + enforce_limits_stats: self.enforce_limits_stats.clone(), } } } @@ -248,6 +254,9 @@ impl> ValidatedPool { pool: RwLock::new(base_pool), import_notification_sinks: Default::default(), rotator, + enforce_limits_stats: SyncDurationSlidingStats::new(Duration::from_secs( + STAT_SLIDING_WINDOW, + )), } } @@ -292,7 +301,16 @@ impl> ValidatedPool { // only enforce limits if there is at least one imported transaction let removed = if results.iter().any(|res| res.is_ok()) { - self.enforce_limits() + let start = Instant::now(); + let removed = self.enforce_limits(); + insert_and_log_throttled_sync!( + Level::DEBUG, + target:"txpool", + prefix:"enforce_limits_stats", + self.enforce_limits_stats, + start.elapsed().into() + ); + removed } else { Default::default() }; @@ -379,7 +397,7 @@ impl> ValidatedPool { if ready_limit.is_exceeded(status.ready, status.ready_bytes) || future_limit.is_exceeded(status.future, status.future_bytes) { - debug!( + trace!( target: LOG_TARGET, ready_count = ready_limit.count, ready_kb = ready_limit.total_bytes / 1024, diff --git a/substrate/client/transaction-pool/src/lib.rs b/substrate/client/transaction-pool/src/lib.rs index 366d91a973d27..cd788b6948720 100644 --- a/substrate/client/transaction-pool/src/lib.rs +++ b/substrate/client/transaction-pool/src/lib.rs @@ -38,7 +38,7 @@ pub use common::notification_future; pub use fork_aware_txpool::{ForkAwareTxPool, ForkAwareTxPoolTask}; pub use graph::{ base_pool::{Limit as PoolLimit, TimedTransactionSource}, - ChainApi, Options, Pool, + ChainApi, Options, Pool, ValidateTransactionPriority, }; use single_state_txpool::prune_known_txs_for_block; pub use single_state_txpool::{BasicPool, RevalidationType}; @@ -58,3 +58,4 @@ type ReadyIteratorFor = /// It can be used by other components for logging functionality strictly related to txpool (e.g. /// importing transaction). pub const LOG_TARGET: &str = "txpool"; +const LOG_TARGET_STAT: &str = "txpoolstats"; diff --git a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs index d0efc30156a6e..bf427c3d3a450 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/revalidation.rs @@ -18,7 +18,9 @@ //! Pool periodic revalidation. -use crate::graph::{BlockHash, ChainApi, ExtrinsicHash, ValidatedTransaction}; +use crate::graph::{ + BlockHash, ChainApi, ExtrinsicHash, ValidateTransactionPriority, ValidatedTransaction, +}; use futures::prelude::*; use indexmap::IndexMap; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; @@ -99,8 +101,13 @@ async fn batch_revalidate( let validation_results = futures::future::join_all(batch.into_iter().filter_map(|ext_hash| { pool.validated_pool().ready_by_hash(&ext_hash).map(|ext| { - api.validate_transaction(at, ext.source.clone().into(), ext.data.clone()) - .map(move |validation_result| (validation_result, ext_hash, ext)) + api.validate_transaction( + at, + ext.source.clone().into(), + ext.data.clone(), + ValidateTransactionPriority::Submitted, + ) + .map(move |validation_result| (validation_result, ext_hash, ext)) }) })) .await; @@ -458,14 +465,14 @@ mod tests { let unknown_block = H256::repeat_byte(0x13); let source = TimedTransactionSource::new_external(false); - let uxt_hashes = - block_on(pool.submit_at( - &han_of_block0, - vec![(source.clone(), uxt0.into()), (source, uxt1.into())], - )) - .into_iter() - .map(|r| r.expect("Should be valid").hash()) - .collect::>(); + let uxt_hashes = block_on(pool.submit_at( + &han_of_block0, + vec![(source.clone(), uxt0.into()), (source, uxt1.into())], + ValidateTransactionPriority::Submitted, + )) + .into_iter() + .map(|r| r.expect("Should be valid").hash()) + .collect::>(); assert_eq!(api.validation_requests().len(), 2); assert_eq!(pool.validated_pool().status().ready, 2); diff --git a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs index b7a011a9c5b16..cefdaec05e558 100644 --- a/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs +++ b/substrate/client/transaction-pool/src/single_state_txpool/single_state_txpool.rs @@ -33,7 +33,7 @@ use crate::{ self, base_pool::TimedTransactionSource, EventHandler, ExtrinsicHash, IsValidator, RawExtrinsicFor, }, - ReadyIteratorFor, LOG_TARGET, + ReadyIteratorFor, ValidateTransactionPriority, LOG_TARGET, }; use async_trait::async_trait; use futures::{channel::oneshot, future, prelude::*, Future, FutureExt}; @@ -286,7 +286,7 @@ where let number = self.api.resolve_block_number(at); let at = HashAndNumber { hash: at, number: number? }; Ok(pool - .submit_at(&at, xts) + .submit_at(&at, xts, ValidateTransactionPriority::Submitted) .await .into_iter() .map(|result| result.map(|outcome| outcome.hash())) @@ -742,7 +742,12 @@ where }); } - pool.resubmit_at(&hash_and_number, resubmit_transactions).await; + pool.resubmit_at( + &hash_and_number, + resubmit_transactions, + ValidateTransactionPriority::Submitted, + ) + .await; } let extra_pool = pool.clone(); diff --git a/substrate/client/transaction-pool/tests/fatp.rs b/substrate/client/transaction-pool/tests/fatp.rs index c1411b29dafb9..f0e12dfb6a749 100644 --- a/substrate/client/transaction-pool/tests/fatp.rs +++ b/substrate/client/transaction-pool/tests/fatp.rs @@ -1744,14 +1744,14 @@ fn fatp_transactions_purging_stale_on_finalization_works() { assert_eq!(api.validation_requests().len(), 3); assert_eq!(pool.status_all()[&header01.hash()].ready, 3); - assert_eq!(pool.mempool_len(), (1, 2)); + assert_eq!(block_on(pool.mempool_len()), (1, 2)); let header02 = api.push_block(2, vec![xt1.clone(), xt2.clone(), xt3.clone()], true); api.set_nonce(header02.hash(), Alice.into(), 203); block_on(pool.maintain(finalized_block_event(&pool, header01.hash(), header02.hash()))); assert_eq!(pool.status_all()[&header02.hash()].ready, 0); - assert_eq!(pool.mempool_len(), (0, 0)); + assert_eq!(block_on(pool.mempool_len()), (0, 0)); let xt1_events = futures::executor::block_on_stream(watcher1).collect::>(); let xt2_events = futures::executor::block_on_stream(watcher2).collect::>(); @@ -1904,7 +1904,7 @@ fn fatp_avoid_stuck_transaction() { let xt4i_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4i.clone())).unwrap(); - assert_eq!(pool.mempool_len(), (0, 1)); + assert_eq!(block_on(pool.mempool_len()), (0, 1)); let header01 = api.push_block(1, vec![xt0], true); api.set_nonce(header01.hash(), Alice.into(), 201); @@ -1941,7 +1941,7 @@ fn fatp_avoid_stuck_transaction() { let xt4i_events = futures::executor::block_on_stream(xt4i_watcher).collect::>(); debug!(target: LOG_TARGET, ?xt4i_events, "xt4i_events"); assert_eq!(xt4i_events, vec![TransactionStatus::Future, TransactionStatus::Dropped]); - assert_eq!(pool.mempool_len(), (0, 0)); + assert_eq!(block_on(pool.mempool_len()), (0, 0)); } #[test] @@ -1960,7 +1960,7 @@ fn fatp_future_is_pruned_by_conflicting_tags() { debug!(target: LOG_TARGET, xt2i = ?api.hash_and_length(&xt2i).0, "xt2i"); let _ = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2i.clone())).unwrap(); - assert_eq!(pool.mempool_len(), (0, 1)); + assert_eq!(block_on(pool.mempool_len()), (0, 1)); let header01 = api.push_block(1, vec![], true); let event = new_best_block_event(&pool, None, header01.hash()); @@ -2110,7 +2110,7 @@ fn fatp_dangling_ready_gets_revalidated() { // send xt2 - it will become ready on block 02a. let _ = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap(); assert_pool_status!(header02a.hash(), &pool, 1, 0); - assert_eq!(pool.mempool_len(), (0, 1)); + assert_eq!(block_on(pool.mempool_len()), (0, 1)); //xt2 is still ready: view was just cloned (revalidation executed in background) let header02b = api.push_block_with_parent(header01.hash(), vec![], true); diff --git a/substrate/client/transaction-pool/tests/fatp_common/mod.rs b/substrate/client/transaction-pool/tests/fatp_common/mod.rs index 94fa1639d5649..f41c009a46712 100644 --- a/substrate/client/transaction-pool/tests/fatp_common/mod.rs +++ b/substrate/client/transaction-pool/tests/fatp_common/mod.rs @@ -146,7 +146,7 @@ impl TestPoolBuilder { .map(|blocks| blocks[0].0.header.hash()) .expect("there is block 0. qed"); - let (pool, txpool_task) = if self.use_default_limits { + let (pool, [txpool_task, blocking_task]) = if self.use_default_limits { ForkAwareTxPool::new_test( api.clone(), genesis_hash, @@ -167,6 +167,7 @@ impl TestPoolBuilder { let thread_pool = futures::executor::ThreadPool::new().unwrap(); thread_pool.spawn_ok(txpool_task); + thread_pool.spawn_ok(blocking_task); (pool, api, thread_pool) } diff --git a/substrate/client/transaction-pool/tests/fatp_invalid.rs b/substrate/client/transaction-pool/tests/fatp_invalid.rs index 27033bef09bba..332308fa66e11 100644 --- a/substrate/client/transaction-pool/tests/fatp_invalid.rs +++ b/substrate/client/transaction-pool/tests/fatp_invalid.rs @@ -125,7 +125,7 @@ fn fatp_transactions_purging_invalid_on_finalization_works() { assert_eq!(api.validation_requests().len(), 3); assert_eq!(pool.status_all()[&header01.hash()].ready, 3); - assert_eq!(pool.mempool_len(), (0, 3)); + assert_eq!(block_on(pool.mempool_len()), (0, 3)); let header02 = api.push_block(2, vec![], true); api.add_invalid(&xt1); @@ -142,7 +142,7 @@ fn fatp_transactions_purging_invalid_on_finalization_works() { prev_header = header; } - assert_eq!(pool.mempool_len(), (0, 0)); + assert_eq!(block_on(pool.mempool_len()), (0, 0)); assert_watcher_stream!(watcher1, [TransactionStatus::Ready, TransactionStatus::Invalid]); assert_watcher_stream!(watcher2, [TransactionStatus::Ready, TransactionStatus::Invalid]); @@ -372,7 +372,7 @@ fn fatp_watcher_invalid_single_revalidation2() { let xt0 = uxt(Alice, 200); let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); - assert_eq!(pool.mempool_len(), (0, 1)); + assert_eq!(block_on(pool.mempool_len()), (0, 1)); api.add_invalid(&xt0); let header01 = api.push_block(1, vec![], true); @@ -382,7 +382,7 @@ fn fatp_watcher_invalid_single_revalidation2() { let xt0_events = futures::executor::block_on_stream(xt0_watcher).collect::>(); debug!(target: LOG_TARGET, ?xt0_events, "xt0_events"); assert_eq!(xt0_events, vec![TransactionStatus::Invalid]); - assert_eq!(pool.mempool_len(), (0, 0)); + assert_eq!(block_on(pool.mempool_len()), (0, 0)); } #[test] @@ -393,7 +393,7 @@ fn fatp_watcher_invalid_single_revalidation3() { let xt0 = uxt(Alice, 150); let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap(); - assert_eq!(pool.mempool_len(), (0, 1)); + assert_eq!(block_on(pool.mempool_len()), (0, 1)); let header01 = api.push_block(1, vec![], true); let event = finalized_block_event(&pool, api.genesis_hash(), header01.hash()); @@ -411,7 +411,7 @@ fn fatp_watcher_invalid_single_revalidation3() { let xt0_events = futures::executor::block_on_stream(xt0_watcher).collect::>(); debug!(target: LOG_TARGET, ?xt0_events, "xt0_events"); assert_eq!(xt0_events, vec![TransactionStatus::Invalid]); - assert_eq!(pool.mempool_len(), (0, 0)); + assert_eq!(block_on(pool.mempool_len()), (0, 0)); } #[test] diff --git a/substrate/client/transaction-pool/tests/fatp_limits.rs b/substrate/client/transaction-pool/tests/fatp_limits.rs index 02d307c7e5d05..0a95c8b0cace5 100644 --- a/substrate/client/transaction-pool/tests/fatp_limits.rs +++ b/substrate/client/transaction-pool/tests/fatp_limits.rs @@ -97,20 +97,20 @@ fn fatp_limits_ready_count_works() { assert_ready_iterator!(header01.hash(), pool, [xt1, xt2]); //todo: can we do better? We don't have API to check if event was processed internally. let mut counter = 0; - while pool.mempool_len().0 == 3 { + while block_on(pool.mempool_len()).0 == 3 { sleep(std::time::Duration::from_millis(1)); counter = counter + 1; if counter > 20 { assert!(false, "timeout"); } } - assert_eq!(pool.mempool_len().0, 2); + assert_eq!(block_on(pool.mempool_len()).0, 2); //branch with alice transactions: let header02b = api.push_block(2, vec![xt1.clone(), xt2.clone()], true); let event = new_best_block_event(&pool, Some(header01.hash()), header02b.hash()); block_on(pool.maintain(event)); - assert_eq!(pool.mempool_len().0, 2); + assert_eq!(block_on(pool.mempool_len()).0, 2); assert_pool_status!(header02b.hash(), &pool, 0, 0); assert_ready_iterator!(header02b.hash(), pool, []); @@ -119,7 +119,7 @@ fn fatp_limits_ready_count_works() { api.set_nonce(header02a.hash(), Alice.into(), 201); let event = new_best_block_event(&pool, Some(header02b.hash()), header02a.hash()); block_on(pool.maintain(event)); - assert_eq!(pool.mempool_len().0, 2); + assert_eq!(block_on(pool.mempool_len()).0, 2); // assert_pool_status!(header02a.hash(), &pool, 1, 0); assert_ready_iterator!(header02a.hash(), pool, [xt2]); } @@ -152,7 +152,7 @@ fn fatp_limits_ready_count_works_for_submit_at() { assert!(matches!(results[0].as_ref().unwrap_err().0, TxPoolError::ImmediatelyDropped)); assert!(results[1].as_ref().is_ok()); assert!(results[2].as_ref().is_ok()); - assert_eq!(pool.mempool_len().0, 2); + assert_eq!(block_on(pool.mempool_len()).0, 2); //charlie was not included into view: assert_pool_status!(header01.hash(), &pool, 2, 0); assert_ready_iterator!(header01.hash(), pool, [xt1, xt2]); @@ -186,7 +186,7 @@ fn fatp_limits_ready_count_works_for_submit_and_watch() { assert!(matches!(result2.unwrap_err().0, TxPoolError::ImmediatelyDropped)); assert!(result0.is_ok()); assert!(result1.is_ok()); - assert_eq!(pool.mempool_len().1, 2); + assert_eq!(block_on(pool.mempool_len()).1, 2); //charlie was not included into view: assert_pool_status!(header01.hash(), &pool, 2, 0); assert_ready_iterator!(header01.hash(), pool, [xt0, xt1]); @@ -220,7 +220,7 @@ fn fatp_limits_future_count_works() { assert_pool_status!(header01.hash(), &pool, 0, 2); //todo: can we do better? We don't have API to check if event was processed internally. let mut counter = 0; - while pool.mempool_len().0 != 2 { + while block_on(pool.mempool_len()).0 != 2 { sleep(std::time::Duration::from_millis(1)); counter = counter + 1; if counter > 20 { @@ -234,7 +234,7 @@ fn fatp_limits_future_count_works() { block_on(pool.maintain(event)); assert_pool_status!(header02.hash(), &pool, 2, 0); - assert_eq!(pool.mempool_len().0, 2); + assert_eq!(block_on(pool.mempool_len()).0, 2); } #[test] @@ -428,7 +428,7 @@ fn fatp_limits_watcher_empty_and_full_view_immediately_drops() { assert_eq!(xt0_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); assert_pool_status!(header01.hash(), &pool, 2, 0); - assert_eq!(pool.mempool_len().1, 2); + assert_eq!(block_on(pool.mempool_len()).1, 2); let header02e = api.push_block_with_parent( header01.hash(), @@ -456,7 +456,7 @@ fn fatp_limits_watcher_empty_and_full_view_immediately_drops() { assert_pool_status!(header02e.hash(), &pool, 2, 0); assert_ready_iterator!(header02e.hash(), pool, [xt3, xt4]); - assert_eq!(pool.mempool_len().1, 4); + assert_eq!(block_on(pool.mempool_len()).1, 4); let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(2).collect::>(); assert_eq!( @@ -510,7 +510,7 @@ fn fatp_limits_watcher_empty_and_full_view_drops_with_event() { assert_eq!(xt0_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); assert_pool_status!(header01.hash(), &pool, 2, 0); - assert_eq!(pool.mempool_len().1, 2); + assert_eq!(block_on(pool.mempool_len()).1, 2); let header02e = api.push_block_with_parent( header01.hash(), @@ -540,7 +540,7 @@ fn fatp_limits_watcher_empty_and_full_view_drops_with_event() { assert_eq!(xt3_status, vec![TransactionStatus::Ready, TransactionStatus::Dropped]); //xt5 got dropped - assert_eq!(pool.mempool_len().1, 4); + assert_eq!(block_on(pool.mempool_len()).1, 4); let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(2).collect::>(); assert_eq!( @@ -631,7 +631,7 @@ fn fatp_limits_future_size_works() { //todo: can we do better? We don't have API to check if event was processed internally. let mut counter = 0; - while pool.mempool_len().0 == 4 { + while block_on(pool.mempool_len()).0 == 4 { sleep(std::time::Duration::from_millis(1)); counter = counter + 1; if counter > 20 { @@ -639,7 +639,7 @@ fn fatp_limits_future_size_works() { } } assert_pool_status!(header01.hash(), &pool, 0, 3); - assert_eq!(pool.mempool_len().0, 3); + assert_eq!(block_on(pool.mempool_len()).0, 3); } #[test] @@ -672,7 +672,7 @@ fn fatp_limits_watcher_ready_transactions_are_not_droped_when_view_is_dropped() block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); assert_pool_status!(header01.hash(), &pool, 2, 0); - assert_eq!(pool.mempool_len().1, 2); + assert_eq!(block_on(pool.mempool_len()).1, 2); let header02 = api.push_block_with_parent(header01.hash(), vec![], true); block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); @@ -683,7 +683,7 @@ fn fatp_limits_watcher_ready_transactions_are_not_droped_when_view_is_dropped() block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); assert_pool_status!(header02.hash(), &pool, 2, 0); - assert_eq!(pool.mempool_len().1, 4); + assert_eq!(block_on(pool.mempool_len()).1, 4); let header03 = api.push_block_with_parent(header02.hash(), vec![], true); block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash()))); @@ -694,7 +694,7 @@ fn fatp_limits_watcher_ready_transactions_are_not_droped_when_view_is_dropped() block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap(); assert_pool_status!(header03.hash(), &pool, 2, 0); - assert_eq!(pool.mempool_len().1, 6); + assert_eq!(block_on(pool.mempool_len()).1, 6); let header04 = api.push_block_with_parent(header03.hash(), vec![xt4.clone(), xt5.clone()], true); @@ -719,12 +719,12 @@ fn fatp_limits_watcher_ready_transactions_are_not_droped_when_view_is_dropped() //view 01 was dropped assert!(pool.ready_at(header01.hash()).now_or_never().is_none()); - assert_eq!(pool.mempool_len().1, 6); + assert_eq!(block_on(pool.mempool_len()).1, 6); block_on(pool.maintain(finalized_block_event(&pool, header02.hash(), header03.hash()))); //no revalidation has happened yet, all txs are kept - assert_eq!(pool.mempool_len().1, 6); + assert_eq!(block_on(pool.mempool_len()).1, 6); //view 03 is still there assert!(!pool.status_all().contains_key(&header03.hash())); @@ -741,7 +741,7 @@ fn fatp_limits_watcher_ready_transactions_are_not_droped_when_view_is_dropped() } //now revalidation has happened, all txs are dropped - assert_eq!(pool.mempool_len().1, 0); + assert_eq!(block_on(pool.mempool_len()).1, 0); } #[test] @@ -772,7 +772,7 @@ fn fatp_limits_watcher_future_transactions_are_droped_when_view_is_dropped() { let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); assert_pool_status!(header01.hash(), &pool, 0, 2); - assert_eq!(pool.mempool_len().1, 2); + assert_eq!(block_on(pool.mempool_len()).1, 2); assert_future_iterator!(header01.hash(), pool, [xt0, xt1]); let header02 = api.push_block_with_parent(header01.hash(), vec![], true); @@ -782,7 +782,7 @@ fn fatp_limits_watcher_future_transactions_are_droped_when_view_is_dropped() { let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); assert_pool_status!(header02.hash(), &pool, 0, 2); - assert_eq!(pool.mempool_len().1, 4); + assert_eq!(block_on(pool.mempool_len()).1, 4); assert_future_iterator!(header02.hash(), pool, [xt2, xt3]); let header03 = api.push_block_with_parent(header02.hash(), vec![], true); @@ -792,7 +792,7 @@ fn fatp_limits_watcher_future_transactions_are_droped_when_view_is_dropped() { let xt5_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap(); assert_pool_status!(header03.hash(), &pool, 0, 2); - assert_eq!(pool.mempool_len().1, 6); + assert_eq!(block_on(pool.mempool_len()).1, 6); assert_future_iterator!(header03.hash(), pool, [xt4, xt5]); let header04 = api.push_block_with_parent(header03.hash(), vec![], true); @@ -807,14 +807,14 @@ fn fatp_limits_watcher_future_transactions_are_droped_when_view_is_dropped() { assert_eq!(pool.inactive_views_count(), 0); //todo: can we do better? We don't have API to check if event was processed internally. let mut counter = 0; - while pool.mempool_len().1 != 2 { + while block_on(pool.mempool_len()).1 != 2 { sleep(std::time::Duration::from_millis(1)); counter = counter + 1; if counter > 20 { - assert!(false, "timeout {}", pool.mempool_len().1); + assert!(false, "timeout {}", block_on(pool.mempool_len()).1); } } - assert_eq!(pool.mempool_len().1, 2); + assert_eq!(block_on(pool.mempool_len()).1, 2); assert_pool_status!(header04.hash(), &pool, 0, 2); assert_eq!(pool.futures().len(), 2); diff --git a/substrate/client/transaction-pool/tests/fatp_prios.rs b/substrate/client/transaction-pool/tests/fatp_prios.rs index e4c8321a3d654..38029d6f5db17 100644 --- a/substrate/client/transaction-pool/tests/fatp_prios.rs +++ b/substrate/client/transaction-pool/tests/fatp_prios.rs @@ -53,7 +53,7 @@ fn fatp_prio_ready_higher_evicts_lower() { info!(target: LOG_TARGET, ?result0, "r0"); info!(target: LOG_TARGET, ?result1, "r1"); - info!(target: LOG_TARGET, len = ?pool.mempool_len(), "len"); + info!(target: LOG_TARGET, len = ?block_on(pool.mempool_len()), "len"); info!(target: LOG_TARGET, status = ?pool.status_all()[&header01.hash()], "len"); assert_ready_iterator!(header01.hash(), pool, [xt1]); assert_pool_status!(header01.hash(), &pool, 1, 0); @@ -90,7 +90,7 @@ fn fatp_prio_watcher_ready_higher_evicts_lower() { let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(1).collect::>(); assert_eq!(xt1_status, vec![TransactionStatus::Ready]); - info!(target: LOG_TARGET, len = ?pool.mempool_len(), "len"); + info!(target: LOG_TARGET, len = ?block_on(pool.mempool_len()), "len"); info!(target: LOG_TARGET, pool_status = ?pool.status_all()[&header01.hash()], "len"); assert_ready_iterator!(header01.hash(), pool, [xt1]); assert_pool_status!(header01.hash(), &pool, 1, 0); @@ -133,7 +133,7 @@ fn fatp_prio_watcher_future_higher_evicts_lower() { let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(1).collect::>(); assert_eq!(xt2_status, vec![TransactionStatus::Ready]); - assert_eq!(pool.mempool_len().1, 2); + assert_eq!(block_on(pool.mempool_len()).1, 2); assert_ready_iterator!(header01.hash(), pool, [xt2, xt1]); assert_pool_status!(header01.hash(), &pool, 2, 0); } @@ -288,7 +288,7 @@ fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted() { let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); assert_pool_status!(header01.hash(), &pool, 2, 0); - assert_eq!(pool.mempool_len().1, 2); + assert_eq!(block_on(pool.mempool_len()).1, 2); let header02 = api.push_block_with_parent(header01.hash(), vec![], true); block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); @@ -299,7 +299,7 @@ fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted() { block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap(); assert_pool_status!(header02.hash(), &pool, 2, 0); - assert_eq!(pool.mempool_len().1, 4); + assert_eq!(block_on(pool.mempool_len()).1, 4); let header03 = api.push_block_with_parent(header02.hash(), vec![], true); block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash()))); @@ -310,7 +310,7 @@ fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted() { block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap(); assert_pool_status!(header03.hash(), &pool, 2, 0); - assert_eq!(pool.mempool_len().1, 4); + assert_eq!(block_on(pool.mempool_len()).1, 4); assert_watcher_stream!(xt0_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); assert_watcher_stream!(xt1_watcher, [TransactionStatus::Ready, TransactionStatus::Dropped]); @@ -352,7 +352,7 @@ fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted_with_subtree() { assert_ready_iterator!(header01.hash(), pool, [xt3, xt0, xt1, xt2]); assert_pool_status!(header01.hash(), &pool, 4, 0); - assert_eq!(pool.mempool_len().1, 4); + assert_eq!(block_on(pool.mempool_len()).1, 4); let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap(); assert_pool_status!(header01.hash(), &pool, 2, 0); @@ -397,7 +397,7 @@ fn fatp_prios_watcher_full_mempool_higher_prio_is_accepted_with_subtree2() { assert_ready_iterator!(header01.hash(), pool, [xt3, xt0, xt1, xt2]); assert_pool_status!(header01.hash(), &pool, 4, 0); - assert_eq!(pool.mempool_len().1, 4); + assert_eq!(block_on(pool.mempool_len()).1, 4); let header02 = api.push_block_with_parent(header01.hash(), vec![], true); block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); @@ -444,13 +444,13 @@ fn fatp_prios_watcher_full_mempool_lower_prio_gets_rejected() { block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap(); assert_pool_status!(header01.hash(), &pool, 2, 0); - assert_eq!(pool.mempool_len().1, 2); + assert_eq!(block_on(pool.mempool_len()).1, 2); let header02 = api.push_block_with_parent(header01.hash(), vec![], true); block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); assert_pool_status!(header02.hash(), &pool, 2, 0); - assert_eq!(pool.mempool_len().1, 2); + assert_eq!(block_on(pool.mempool_len()).1, 2); assert_ready_iterator!(header01.hash(), pool, [xt0, xt1]); assert_ready_iterator!(header02.hash(), pool, [xt0, xt1]); @@ -535,7 +535,7 @@ fn fatp_prios_submit_local_full_mempool_higher_prio_is_accepted() { pool.submit_local(invalid_hash(), xt1.clone()).unwrap(); assert_pool_status!(header01.hash(), &pool, 2, 0); - assert_eq!(pool.mempool_len().0, 2); + assert_eq!(block_on(pool.mempool_len()).0, 2); let header02 = api.push_block_with_parent(header01.hash(), vec![], true); block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash()))); @@ -544,7 +544,7 @@ fn fatp_prios_submit_local_full_mempool_higher_prio_is_accepted() { pool.submit_local(invalid_hash(), xt3.clone()).unwrap(); assert_pool_status!(header02.hash(), &pool, 2, 0); - assert_eq!(pool.mempool_len().0, 4); + assert_eq!(block_on(pool.mempool_len()).0, 4); let header03 = api.push_block_with_parent(header02.hash(), vec![], true); block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash()))); @@ -553,7 +553,7 @@ fn fatp_prios_submit_local_full_mempool_higher_prio_is_accepted() { pool.submit_local(invalid_hash(), xt5.clone()).unwrap(); assert_pool_status!(header03.hash(), &pool, 2, 0); - assert_eq!(pool.mempool_len().0, 4); + assert_eq!(block_on(pool.mempool_len()).0, 4); assert_ready_iterator!(header01.hash(), pool, []); assert_ready_iterator!(header02.hash(), pool, [xt3, xt2]); diff --git a/substrate/test-utils/runtime/transaction-pool/src/lib.rs b/substrate/test-utils/runtime/transaction-pool/src/lib.rs index f88694fb1071e..01af801fae942 100644 --- a/substrate/test-utils/runtime/transaction-pool/src/lib.rs +++ b/substrate/test-utils/runtime/transaction-pool/src/lib.rs @@ -22,7 +22,7 @@ use codec::Encode; use futures::future::ready; use parking_lot::RwLock; -use sc_transaction_pool::ChainApi; +use sc_transaction_pool::{ChainApi, ValidateTransactionPriority}; use sp_blockchain::{CachedHeaderMetadata, HashAndNumber, TreeRoute}; use sp_runtime::{ generic::{self, BlockId}, @@ -354,6 +354,7 @@ impl ChainApi for TestApi { at: ::Hash, source: TransactionSource, uxt: Arc<::Extrinsic>, + _: ValidateTransactionPriority, ) -> Self::ValidationFuture { ready(self.validate_transaction_blocking(at, source, uxt)) }