diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3c50b2a0041..2658ff454e9 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -62,52 +62,35 @@ jobs: - name: Set RUSTFLAGS to deny warnings if: "matrix.toolchain == '1.75.0'" run: echo "RUSTFLAGS=-D warnings" >> "$GITHUB_ENV" - - name: Run CI script - shell: bash # Default on Winblows is powershell - run: CI_ENV=1 CI_MINIMIZE_DISK_USAGE=1 ./ci/ci-tests.sh - - build-tx-sync: - strategy: - fail-fast: false - matrix: - platform: [ ubuntu-latest, macos-latest ] - toolchain: [ stable, beta, 1.75.0 ] - runs-on: ${{ matrix.platform }} - steps: - - name: Checkout source code - uses: actions/checkout@v4 - - name: Install Rust ${{ matrix.toolchain }} toolchain - run: | - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ matrix.toolchain }} - - name: Set RUSTFLAGS to deny warnings - if: "matrix.toolchain == '1.75.0'" - run: echo "RUSTFLAGS=-D warnings" >> "$GITHUB_ENV" - name: Enable caching for bitcoind + if: matrix.platform != 'windows-latest' id: cache-bitcoind uses: actions/cache@v4 with: path: bin/bitcoind-${{ runner.os }}-${{ runner.arch }} key: bitcoind-${{ runner.os }}-${{ runner.arch }} - name: Enable caching for electrs + if: matrix.platform != 'windows-latest' id: cache-electrs uses: actions/cache@v4 with: path: bin/electrs-${{ runner.os }}-${{ runner.arch }} key: electrs-${{ runner.os }}-${{ runner.arch }} - name: Download bitcoind/electrs - if: "steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true'" + if: "matrix.platform != 'windows-latest' && (steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true')" run: | source ./contrib/download_bitcoind_electrs.sh mkdir bin mv "$BITCOIND_EXE" bin/bitcoind-${{ runner.os }}-${{ runner.arch }} mv "$ELECTRS_EXE" bin/electrs-${{ runner.os }}-${{ runner.arch }} - name: Set bitcoind/electrs environment variables + if: matrix.platform != 'windows-latest' run: | echo "BITCOIND_EXE=$( pwd )/bin/bitcoind-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" echo "ELECTRS_EXE=$( pwd )/bin/electrs-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" - name: Run CI script shell: bash # Default on Winblows is powershell - run: CI_ENV=1 CI_MINIMIZE_DISK_USAGE=1 ./ci/ci-tx-sync-tests.sh + run: CI_ENV=1 CI_MINIMIZE_DISK_USAGE=1 ./ci/ci-tests.sh coverage: needs: fuzz diff --git a/Cargo.toml b/Cargo.toml index f9f7406339e..a0895fe1641 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,11 +16,11 @@ members = [ "lightning-macros", "lightning-dns-resolver", "lightning-liquidity", + "lightning-transaction-sync", "possiblyrandom", ] exclude = [ - "lightning-transaction-sync", "lightning-tests", "ext-functional-test-demo", "no-std-check", diff --git a/ci/check-lint.sh b/ci/check-lint.sh index 39c10692310..c1f1b08a1e1 100755 --- a/ci/check-lint.sh +++ b/ci/check-lint.sh @@ -107,7 +107,8 @@ CLIPPY() { -A clippy::useless_conversion \ -A clippy::manual_repeat_n `# to be removed once we hit MSRV 1.86` \ -A clippy::manual_is_multiple_of `# to be removed once we hit MSRV 1.87` \ - -A clippy::uninlined-format-args + -A clippy::uninlined-format-args \ + -A clippy::manual-async-fn # Not really sure why this is even a warning when there's a Send bound } CLIPPY diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index 91ead9903cb..488c5ac4826 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -2,7 +2,6 @@ #shellcheck disable=SC2002,SC2207 set -eox pipefail -# Currently unused as we don't have to pin anything for MSRV: RUSTC_MINOR_VERSION=$(rustc --version | awk '{ split($2,a,"."); print a[2] }') # Some crates require pinning to meet our MSRV even for our downstream users, @@ -20,6 +19,9 @@ PIN_RELEASE_DEPS # pin the release dependencies in our main workspace # proptest 1.9.0 requires rustc 1.82.0 [ "$RUSTC_MINOR_VERSION" -lt 82 ] && cargo update -p proptest --precise "1.8.0" --verbose +# Starting with version 1.2.0, the `idna_adapter` crate has an MSRV of rustc 1.81.0. +[ "$RUSTC_MINOR_VERSION" -lt 81 ] && cargo update -p idna_adapter --precise "1.1.0" --verbose + export RUST_BACKTRACE=1 echo -e "\n\nChecking the workspace, except lightning-transaction-sync." @@ -57,6 +59,23 @@ cargo check -p lightning-block-sync --verbose --color always --features rpc-clie cargo test -p lightning-block-sync --verbose --color always --features rpc-client,rest-client,tokio cargo check -p lightning-block-sync --verbose --color always --features rpc-client,rest-client,tokio +echo -e "\n\nChecking Transaction Sync Clients with features." +cargo check -p lightning-transaction-sync --verbose --color always --features esplora-blocking +cargo check -p lightning-transaction-sync --verbose --color always --features esplora-async +cargo check -p lightning-transaction-sync --verbose --color always --features esplora-async-https +cargo check -p lightning-transaction-sync --verbose --color always --features electrum + +if [ -z "$CI_ENV" ] && [[ -z "$BITCOIND_EXE" || -z "$ELECTRS_EXE" ]]; then + echo -e "\n\nSkipping testing Transaction Sync Clients due to BITCOIND_EXE or ELECTRS_EXE being unset." + cargo check -p lightning-transaction-sync --tests +else + echo -e "\n\nTesting Transaction Sync Clients with features." + cargo test -p lightning-transaction-sync --verbose --color always --features esplora-blocking + cargo test -p lightning-transaction-sync --verbose --color always --features esplora-async + cargo test -p lightning-transaction-sync --verbose --color always --features esplora-async-https + cargo test -p lightning-transaction-sync --verbose --color always --features electrum +fi + echo -e "\n\nChecking and testing lightning-persister with features" cargo test -p lightning-persister --verbose --color always --features tokio cargo check -p lightning-persister --verbose --color always --features tokio diff --git a/ci/ci-tx-sync-tests.sh b/ci/ci-tx-sync-tests.sh deleted file mode 100755 index 0839e2ced3d..00000000000 --- a/ci/ci-tx-sync-tests.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -set -eox pipefail - -RUSTC_MINOR_VERSION=$(rustc --version | awk '{ split($2,a,"."); print a[2] }') - -pushd lightning-transaction-sync - -# Some crates require pinning to meet our MSRV even for our downstream users, -# which we do here. -# Further crates which appear only as dev-dependencies are pinned further down. -function PIN_RELEASE_DEPS { - return 0 # Don't fail the script if our rustc is higher than the last check -} - -PIN_RELEASE_DEPS # pin the release dependencies - -# Starting with version 1.2.0, the `idna_adapter` crate has an MSRV of rustc 1.81.0. -[ "$RUSTC_MINOR_VERSION" -lt 81 ] && cargo update -p idna_adapter --precise "1.1.0" --verbose - -export RUST_BACKTRACE=1 - -echo -e "\n\nChecking Transaction Sync Clients with features." -cargo check --verbose --color always --features esplora-blocking -cargo check --verbose --color always --features esplora-async -cargo check --verbose --color always --features esplora-async-https -cargo check --verbose --color always --features electrum - -if [ -z "$CI_ENV" ] && [[ -z "$BITCOIND_EXE" || -z "$ELECTRS_EXE" ]]; then - echo -e "\n\nSkipping testing Transaction Sync Clients due to BITCOIND_EXE or ELECTRS_EXE being unset." - cargo check --tests -else - echo -e "\n\nTesting Transaction Sync Clients with features." - cargo test --verbose --color always --features esplora-blocking - cargo test --verbose --color always --features esplora-async - cargo test --verbose --color always --features esplora-async-https - cargo test --verbose --color always --features electrum -fi - -popd diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 19333c5823a..d9e1f2ce251 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -41,6 +41,8 @@ use lightning::events::ReplayEvent; use lightning::events::{Event, PathFailure}; use lightning::util::ser::Writeable; +#[cfg(not(c_bindings))] +use lightning::io::Error; use lightning::ln::channelmanager::AChannelManager; use lightning::ln::msgs::OnionMessageHandler; use lightning::ln::peer_handler::APeerManager; @@ -59,6 +61,8 @@ use lightning::util::persist::{ NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY, SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, }; +#[cfg(not(c_bindings))] +use lightning::util::native_async::MaybeSend; use lightning::util::sweep::{OutputSweeper, OutputSweeperSync}; #[cfg(feature = "std")] use lightning::util::wakers::Sleeper; @@ -83,7 +87,11 @@ use std::time::Instant; #[cfg(not(feature = "std"))] use alloc::boxed::Box; #[cfg(all(not(c_bindings), not(feature = "std")))] +use alloc::string::String; +#[cfg(all(not(c_bindings), not(feature = "std")))] use alloc::sync::Arc; +#[cfg(all(not(c_bindings), not(feature = "std")))] +use alloc::vec::Vec; /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its @@ -416,6 +424,37 @@ pub const NO_ONION_MESSENGER: Option< >, > = None; +#[cfg(not(c_bindings))] +/// A panicking implementation of [`KVStore`] that is used in [`NO_LIQUIDITY_MANAGER`]. +pub struct DummyKVStore; + +#[cfg(not(c_bindings))] +impl KVStore for DummyKVStore { + fn read( + &self, _: &str, _: &str, _: &str, + ) -> impl core::future::Future, Error>> + MaybeSend + 'static { + async { unimplemented!() } + } + + fn write( + &self, _: &str, _: &str, _: &str, _: Vec, + ) -> impl core::future::Future> + MaybeSend + 'static { + async { unimplemented!() } + } + + fn remove( + &self, _: &str, _: &str, _: &str, _: bool, + ) -> impl core::future::Future> + MaybeSend + 'static { + async { unimplemented!() } + } + + fn list( + &self, _: &str, _: &str, + ) -> impl core::future::Future, Error>> + MaybeSend + 'static { + async { unimplemented!() } + } +} + /// When initializing a background processor without a liquidity manager, this can be used to avoid /// specifying a concrete `LiquidityManager` type. #[cfg(not(c_bindings))] @@ -430,8 +469,8 @@ pub const NO_LIQUIDITY_MANAGER: Option< CM = &DynChannelManager, Filter = dyn chain::Filter + Send + Sync, C = &(dyn chain::Filter + Send + Sync), - KVStore = dyn lightning::util::persist::KVStore + Send + Sync, - K = &(dyn lightning::util::persist::KVStore + Send + Sync), + KVStore = DummyKVStore, + K = &DummyKVStore, TimeProvider = dyn lightning_liquidity::utils::time::TimeProvider + Send + Sync, TP = &(dyn lightning_liquidity::utils::time::TimeProvider + Send + Sync), BroadcasterInterface = dyn lightning::chain::chaininterface::BroadcasterInterface diff --git a/lightning-block-sync/src/async_poll.rs b/lightning-block-sync/src/async_poll.rs new file mode 120000 index 00000000000..eb85cdac697 --- /dev/null +++ b/lightning-block-sync/src/async_poll.rs @@ -0,0 +1 @@ +../../lightning/src/util/async_poll.rs \ No newline at end of file diff --git a/lightning-block-sync/src/gossip.rs b/lightning-block-sync/src/gossip.rs index 0fe221b9231..004914daac9 100644 --- a/lightning-block-sync/src/gossip.rs +++ b/lightning-block-sync/src/gossip.rs @@ -2,7 +2,7 @@ //! current UTXO set. This module defines an implementation of the LDK API required to do so //! against a [`BlockSource`] which implements a few additional methods for accessing the UTXO set. -use crate::{AsyncBlockSourceResult, BlockData, BlockSource, BlockSourceError}; +use crate::{BlockData, BlockSource, BlockSourceError, BlockSourceResult}; use bitcoin::block::Block; use bitcoin::constants::ChainHash; @@ -18,7 +18,7 @@ use lightning::util::native_async::FutureSpawner; use std::collections::VecDeque; use std::future::Future; use std::ops::Deref; -use std::pin::Pin; +use std::pin::{pin, Pin}; use std::sync::{Arc, Mutex}; use std::task::Poll; @@ -35,11 +35,13 @@ pub trait UtxoSource: BlockSource + 'static { /// for gossip validation. fn get_block_hash_by_height<'a>( &'a self, block_height: u32, - ) -> AsyncBlockSourceResult<'a, BlockHash>; + ) -> impl Future> + Send + 'a; /// Returns true if the given output has *not* been spent, i.e. is a member of the current UTXO /// set. - fn is_output_unspent<'a>(&'a self, outpoint: OutPoint) -> AsyncBlockSourceResult<'a, bool>; + fn is_output_unspent<'a>( + &'a self, outpoint: OutPoint, + ) -> impl Future> + Send + 'a; } #[cfg(feature = "tokio")] @@ -47,42 +49,49 @@ pub trait UtxoSource: BlockSource + 'static { pub struct TokioSpawner; #[cfg(feature = "tokio")] impl FutureSpawner for TokioSpawner { - fn spawn + Send + 'static>(&self, future: T) { - tokio::spawn(future); + type E = tokio::task::JoinError; + type SpawnedFutureResult = tokio::task::JoinHandle; + fn spawn + Send + 'static>( + &self, future: F, + ) -> Self::SpawnedFutureResult { + tokio::spawn(future) } } /// A trivial future which joins two other futures and polls them at the same time, returning only /// once both complete. pub(crate) struct Joiner< - A: Future), BlockSourceError>> + Unpin, - B: Future> + Unpin, + 'a, + A: Future), BlockSourceError>>, + B: Future>, > { - pub a: A, - pub b: B, + pub a: Pin<&'a mut A>, + pub b: Pin<&'a mut B>, a_res: Option<(BlockHash, Option)>, b_res: Option, } impl< - A: Future), BlockSourceError>> + Unpin, - B: Future> + Unpin, - > Joiner + 'a, + A: Future), BlockSourceError>>, + B: Future>, + > Joiner<'a, A, B> { - fn new(a: A, b: B) -> Self { + fn new(a: Pin<&'a mut A>, b: Pin<&'a mut B>) -> Self { Self { a, b, a_res: None, b_res: None } } } impl< - A: Future), BlockSourceError>> + Unpin, - B: Future> + Unpin, - > Future for Joiner + 'a, + A: Future), BlockSourceError>>, + B: Future>, + > Future for Joiner<'a, A, B> { type Output = Result<((BlockHash, Option), BlockHash), BlockSourceError>; fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll { if self.a_res.is_none() { - match Pin::new(&mut self.a).poll(ctx) { + match self.a.as_mut().poll(ctx) { Poll::Ready(res) => { if let Ok(ok) = res { self.a_res = Some(ok); @@ -94,7 +103,7 @@ impl< } } if self.b_res.is_none() { - match Pin::new(&mut self.b).poll(ctx) { + match self.b.as_mut().poll(ctx) { Poll::Ready(res) => { if let Ok(ok) = res { self.b_res = Some(ok); @@ -200,10 +209,12 @@ where } } - let ((_, tip_height_opt), block_hash) = - Joiner::new(source.get_best_block(), source.get_block_hash_by_height(block_height)) - .await - .map_err(|_| UtxoLookupError::UnknownTx)?; + let ((_, tip_height_opt), block_hash) = Joiner::new( + pin!(source.get_best_block()), + pin!(source.get_block_hash_by_height(block_height)), + ) + .await + .map_err(|_| UtxoLookupError::UnknownTx)?; if let Some(tip_height) = tip_height_opt { // If the block doesn't yet have five confirmations, error out. // @@ -273,7 +284,7 @@ where let gossiper = Arc::clone(&self.gossiper); let block_cache = Arc::clone(&self.block_cache); let pmw = Arc::clone(&self.peer_manager_wake); - self.spawn.spawn(async move { + let _ = self.spawn.spawn(async move { let res = Self::retrieve_utxo(source, block_cache, short_channel_id).await; fut.resolve(gossiper.network_graph(), &*gossiper, res); (pmw)(); diff --git a/lightning-block-sync/src/init.rs b/lightning-block-sync/src/init.rs index a870f8ca88c..52eea92e244 100644 --- a/lightning-block-sync/src/init.rs +++ b/lightning-block-sync/src/init.rs @@ -1,11 +1,11 @@ //! Utilities to assist in the initial sync required to initialize or reload Rust-Lightning objects //! from disk. -use crate::poll::{ChainPoller, Validate, ValidatedBlockHeader}; -use crate::{BlockSource, BlockSourceResult, Cache, ChainNotifier}; +use crate::async_poll::{MultiResultFuturePoller, ResultFuture}; +use crate::poll::{ChainPoller, Poll, Validate, ValidatedBlockHeader}; +use crate::{BlockData, BlockSource, BlockSourceResult, ChainNotifier, HeaderCache}; use bitcoin::block::Header; -use bitcoin::hash_types::BlockHash; use bitcoin::network::Network; use lightning::chain; @@ -32,19 +32,18 @@ where /// Performs a one-time sync of chain listeners using a single *trusted* block source, bringing each /// listener's view of the chain from its paired block hash to `block_source`'s best chain tip. /// -/// Upon success, the returned header can be used to initialize [`SpvClient`]. In the case of -/// failure, each listener may be left at a different block hash than the one it was originally -/// paired with. +/// Upon success, the returned header and header cache can be used to initialize [`SpvClient`]. In +/// the case of failure, each listener may be left at a different block hash than the one it was +/// originally paired with. /// /// Useful during startup to bring the [`ChannelManager`] and each [`ChannelMonitor`] in sync before /// switching to [`SpvClient`]. For example: /// /// ``` -/// use bitcoin::hash_types::BlockHash; /// use bitcoin::network::Network; /// /// use lightning::chain; -/// use lightning::chain::Watch; +/// use lightning::chain::{BestBlock, Watch}; /// use lightning::chain::chainmonitor; /// use lightning::chain::chainmonitor::ChainMonitor; /// use lightning::chain::channelmonitor::ChannelMonitor; @@ -89,14 +88,14 @@ where /// logger: &L, /// persister: &P, /// ) { -/// // Read a serialized channel monitor paired with the block hash when it was persisted. +/// // Read a serialized channel monitor paired with the best block when it was persisted. /// let serialized_monitor = "..."; -/// let (monitor_block_hash, mut monitor) = <(BlockHash, ChannelMonitor)>::read( +/// let (monitor_best_block, mut monitor) = <(BestBlock, ChannelMonitor)>::read( /// &mut Cursor::new(&serialized_monitor), (entropy_source, signer_provider)).unwrap(); /// -/// // Read the channel manager paired with the block hash when it was persisted. +/// // Read the channel manager paired with the best block when it was persisted. /// let serialized_manager = "..."; -/// let (manager_block_hash, mut manager) = { +/// let (manager_best_block, mut manager) = { /// let read_args = ChannelManagerReadArgs::new( /// entropy_source, /// node_signer, @@ -110,19 +109,18 @@ where /// config, /// vec![&mut monitor], /// ); -/// <(BlockHash, ChannelManager<&ChainMonitor, &T, &ES, &NS, &SP, &F, &R, &MR, &L>)>::read( +/// <(BestBlock, ChannelManager<&ChainMonitor, &T, &ES, &NS, &SP, &F, &R, &MR, &L>)>::read( /// &mut Cursor::new(&serialized_manager), read_args).unwrap() /// }; /// /// // Synchronize any channel monitors and the channel manager to be on the best block. -/// let mut cache = UnboundedCache::new(); /// let mut monitor_listener = (monitor, &*tx_broadcaster, &*fee_estimator, &*logger); /// let listeners = vec![ -/// (monitor_block_hash, &monitor_listener as &dyn chain::Listen), -/// (manager_block_hash, &manager as &dyn chain::Listen), +/// (monitor_best_block, &monitor_listener as &dyn chain::Listen), +/// (manager_best_block, &manager as &dyn chain::Listen), /// ]; -/// let chain_tip = init::synchronize_listeners( -/// block_source, Network::Bitcoin, &mut cache, listeners).await.unwrap(); +/// let (chain_cache, chain_tip) = init::synchronize_listeners( +/// block_source, Network::Bitcoin, listeners).await.unwrap(); /// /// // Allow the chain monitor to watch any channels. /// let monitor = monitor_listener.0; @@ -131,7 +129,7 @@ where /// // Create an SPV client to notify the chain monitor and channel manager of block events. /// let chain_poller = poll::ChainPoller::new(block_source, Network::Bitcoin); /// let mut chain_listener = (chain_monitor, &manager); -/// let spv_client = SpvClient::new(chain_tip, chain_poller, &mut cache, &chain_listener); +/// let spv_client = SpvClient::new(chain_tip, chain_poller, chain_cache, &chain_listener); /// } /// ``` /// @@ -140,85 +138,87 @@ where /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor pub async fn synchronize_listeners< B: Deref + Sized + Send + Sync, - C: Cache, L: chain::Listen + ?Sized, >( - block_source: B, network: Network, header_cache: &mut C, - mut chain_listeners: Vec<(BlockHash, &L)>, -) -> BlockSourceResult + block_source: B, network: Network, + mut chain_listeners: Vec<(BestBlock, &L)>, +) -> BlockSourceResult<(HeaderCache, ValidatedBlockHeader)> where B::Target: BlockSource, { let best_header = validate_best_block_header(&*block_source).await?; - // Fetch the header for the block hash paired with each listener. - let mut chain_listeners_with_old_headers = Vec::new(); - for (old_block_hash, chain_listener) in chain_listeners.drain(..) { - let old_header = match header_cache.look_up(&old_block_hash) { - Some(header) => *header, - None => { - block_source.get_header(&old_block_hash, None).await?.validate(old_block_hash)? - }, - }; - chain_listeners_with_old_headers.push((old_header, chain_listener)) - } - // Find differences and disconnect blocks for each listener individually. let mut chain_poller = ChainPoller::new(block_source, network); let mut chain_listeners_at_height = Vec::new(); - let mut most_common_ancestor = None; let mut most_connected_blocks = Vec::new(); - for (old_header, chain_listener) in chain_listeners_with_old_headers.drain(..) { + let mut header_cache = HeaderCache::new(); + for (old_best_block, chain_listener) in chain_listeners.drain(..) { // Disconnect any stale blocks, but keep them in the cache for the next iteration. - let header_cache = &mut ReadOnlyCache(header_cache); let (common_ancestor, connected_blocks) = { let chain_listener = &DynamicChainListener(chain_listener); - let mut chain_notifier = ChainNotifier { header_cache, chain_listener }; + let mut chain_notifier = ChainNotifier { header_cache: &mut header_cache, chain_listener }; let difference = - chain_notifier.find_difference(best_header, &old_header, &mut chain_poller).await?; - chain_notifier.disconnect_blocks(difference.disconnected_blocks); + chain_notifier.find_difference_from_best_block(best_header, old_best_block, &mut chain_poller).await?; + if difference.common_ancestor.block_hash != old_best_block.block_hash { + chain_notifier.disconnect_blocks(difference.common_ancestor); + } (difference.common_ancestor, difference.connected_blocks) }; // Keep track of the most common ancestor and all blocks connected across all listeners. chain_listeners_at_height.push((common_ancestor.height, chain_listener)); if connected_blocks.len() > most_connected_blocks.len() { - most_common_ancestor = Some(common_ancestor); most_connected_blocks = connected_blocks; } } - // Connect new blocks for all listeners at once to avoid re-fetching blocks. - if let Some(common_ancestor) = most_common_ancestor { - let chain_listener = &ChainListenerSet(chain_listeners_at_height); - let mut chain_notifier = ChainNotifier { header_cache, chain_listener }; - chain_notifier - .connect_blocks(common_ancestor, most_connected_blocks, &mut chain_poller) - .await - .map_err(|(e, _)| e)?; - } - - Ok(best_header) -} - -/// A wrapper to make a cache read-only. -/// -/// Used to prevent losing headers that may be needed to disconnect blocks common to more than one -/// listener. -struct ReadOnlyCache<'a, C: Cache>(&'a mut C); + while !most_connected_blocks.is_empty() { + #[cfg(not(test))] + const MAX_BLOCKS_AT_ONCE: usize = 6 * 6; // Six hours of blocks, 144MiB encoded + #[cfg(test)] + const MAX_BLOCKS_AT_ONCE: usize = 2; + + let mut fetch_block_futures = + Vec::with_capacity(core::cmp::min(MAX_BLOCKS_AT_ONCE, most_connected_blocks.len())); + for header in most_connected_blocks.iter().rev().take(MAX_BLOCKS_AT_ONCE) { + let fetch_future = chain_poller.fetch_block(header); + fetch_block_futures.push(ResultFuture::Pending(Box::pin(async move { + (header, fetch_future.await) + }))); + } + let results = MultiResultFuturePoller::new(fetch_block_futures).await.into_iter(); -impl<'a, C: Cache> Cache for ReadOnlyCache<'a, C> { - fn look_up(&self, block_hash: &BlockHash) -> Option<&ValidatedBlockHeader> { - self.0.look_up(block_hash) - } + let mut fetched_blocks = [const { None }; MAX_BLOCKS_AT_ONCE]; + for ((header, block_res), result) in results.into_iter().zip(fetched_blocks.iter_mut()) { + *result = Some((header.height, block_res?)); + } + debug_assert!(fetched_blocks.iter().take(most_connected_blocks.len()).all(|r| r.is_some())); + debug_assert!(fetched_blocks.is_sorted_by_key(|r| r.as_ref().map(|(height, _)| *height).unwrap_or(u32::MAX))); + + for (listener_height, listener) in chain_listeners_at_height.iter() { + // Connect blocks for this listener. + for result in fetched_blocks.iter() { + if let Some((height, block_data)) = result { + if *height > *listener_height { + match &**block_data { + BlockData::FullBlock(block) => { + listener.block_connected(&block, *height); + }, + BlockData::HeaderOnly(header_data) => { + listener.filtered_block_connected(&header_data, &[], *height); + }, + } + } + } + } + } - fn block_connected(&mut self, _block_hash: BlockHash, _block_header: ValidatedBlockHeader) { - unreachable!() + most_connected_blocks + .truncate(most_connected_blocks.len().saturating_sub(MAX_BLOCKS_AT_ONCE)); } - fn block_disconnected(&mut self, _block_hash: &BlockHash) -> Option { - None - } + Ok((header_cache, best_header)) } /// Wrapper for supporting dynamically sized chain listeners. @@ -236,33 +236,6 @@ impl<'a, L: chain::Listen + ?Sized> chain::Listen for DynamicChainListener<'a, L } } -/// A set of dynamically sized chain listeners, each paired with a starting block height. -struct ChainListenerSet<'a, L: chain::Listen + ?Sized>(Vec<(u32, &'a L)>); - -impl<'a, L: chain::Listen + ?Sized> chain::Listen for ChainListenerSet<'a, L> { - fn block_connected(&self, block: &bitcoin::Block, height: u32) { - for (starting_height, chain_listener) in self.0.iter() { - if height > *starting_height { - chain_listener.block_connected(block, height); - } - } - } - - fn filtered_block_connected( - &self, header: &Header, txdata: &chain::transaction::TransactionData, height: u32, - ) { - for (starting_height, chain_listener) in self.0.iter() { - if height > *starting_height { - chain_listener.filtered_block_connected(header, txdata, height); - } - } - } - - fn blocks_disconnected(&self, _fork_point: BestBlock) { - unreachable!() - } -} - #[cfg(test)] mod tests { use super::*; @@ -282,13 +255,12 @@ mod tests { let listener_3 = MockChainListener::new().expect_block_connected(*chain.at_height(4)); let listeners = vec![ - (chain.at_height(1).block_hash, &listener_1 as &dyn chain::Listen), - (chain.at_height(2).block_hash, &listener_2 as &dyn chain::Listen), - (chain.at_height(3).block_hash, &listener_3 as &dyn chain::Listen), + (chain.best_block_at_height(1), &listener_1 as &dyn chain::Listen), + (chain.best_block_at_height(2), &listener_2 as &dyn chain::Listen), + (chain.best_block_at_height(3), &listener_3 as &dyn chain::Listen), ]; - let mut cache = chain.header_cache(0..=4); - match synchronize_listeners(&chain, Network::Bitcoin, &mut cache, listeners).await { - Ok(header) => assert_eq!(header, chain.tip()), + match synchronize_listeners(&chain, Network::Bitcoin, listeners).await { + Ok((_, header)) => assert_eq!(header, chain.tip()), Err(e) => panic!("Unexpected error: {:?}", e), } } @@ -314,15 +286,12 @@ mod tests { .expect_block_connected(*main_chain.at_height(4)); let listeners = vec![ - (fork_chain_1.tip().block_hash, &listener_1 as &dyn chain::Listen), - (fork_chain_2.tip().block_hash, &listener_2 as &dyn chain::Listen), - (fork_chain_3.tip().block_hash, &listener_3 as &dyn chain::Listen), + (fork_chain_1.best_block(), &listener_1 as &dyn chain::Listen), + (fork_chain_2.best_block(), &listener_2 as &dyn chain::Listen), + (fork_chain_3.best_block(), &listener_3 as &dyn chain::Listen), ]; - let mut cache = fork_chain_1.header_cache(2..=4); - cache.extend(fork_chain_2.header_cache(3..=4)); - cache.extend(fork_chain_3.header_cache(4..=4)); - match synchronize_listeners(&main_chain, Network::Bitcoin, &mut cache, listeners).await { - Ok(header) => assert_eq!(header, main_chain.tip()), + match synchronize_listeners(&main_chain, Network::Bitcoin, listeners).await { + Ok((_, header)) => assert_eq!(header, main_chain.tip()), Err(e) => panic!("Unexpected error: {:?}", e), } } @@ -351,37 +320,12 @@ mod tests { .expect_block_connected(*main_chain.at_height(4)); let listeners = vec![ - (fork_chain_1.tip().block_hash, &listener_1 as &dyn chain::Listen), - (fork_chain_2.tip().block_hash, &listener_2 as &dyn chain::Listen), - (fork_chain_3.tip().block_hash, &listener_3 as &dyn chain::Listen), + (fork_chain_1.best_block(), &listener_1 as &dyn chain::Listen), + (fork_chain_2.best_block(), &listener_2 as &dyn chain::Listen), + (fork_chain_3.best_block(), &listener_3 as &dyn chain::Listen), ]; - let mut cache = fork_chain_1.header_cache(2..=4); - cache.extend(fork_chain_2.header_cache(3..=4)); - cache.extend(fork_chain_3.header_cache(4..=4)); - match synchronize_listeners(&main_chain, Network::Bitcoin, &mut cache, listeners).await { - Ok(header) => assert_eq!(header, main_chain.tip()), - Err(e) => panic!("Unexpected error: {:?}", e), - } - } - - #[tokio::test] - async fn cache_connected_and_keep_disconnected_blocks() { - let main_chain = Blockchain::default().with_height(2); - let fork_chain = main_chain.fork_at_height(1); - let new_tip = main_chain.tip(); - let old_tip = fork_chain.tip(); - - let listener = MockChainListener::new() - .expect_blocks_disconnected(*fork_chain.at_height(1)) - .expect_block_connected(*new_tip); - - let listeners = vec![(old_tip.block_hash, &listener as &dyn chain::Listen)]; - let mut cache = fork_chain.header_cache(2..=2); - match synchronize_listeners(&main_chain, Network::Bitcoin, &mut cache, listeners).await { - Ok(_) => { - assert!(cache.contains_key(&new_tip.block_hash)); - assert!(cache.contains_key(&old_tip.block_hash)); - }, + match synchronize_listeners(&main_chain, Network::Bitcoin, listeners).await { + Ok((_, header)) => assert_eq!(header, main_chain.tip()), Err(e) => panic!("Unexpected error: {:?}", e), } } diff --git a/lightning-block-sync/src/lib.rs b/lightning-block-sync/src/lib.rs index 8656ba6ec6b..ab91d3f396f 100644 --- a/lightning-block-sync/src/lib.rs +++ b/lightning-block-sync/src/lib.rs @@ -16,9 +16,11 @@ #![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::private_intra_doc_links)] #![deny(missing_docs)] -#![deny(unsafe_code)] #![cfg_attr(docsrs, feature(doc_cfg))] +extern crate alloc; +extern crate core; + #[cfg(any(feature = "rest-client", feature = "rpc-client"))] pub mod http; @@ -42,6 +44,9 @@ mod test_utils; #[cfg(any(feature = "rest-client", feature = "rpc-client"))] mod utils; +#[allow(unused)] +mod async_poll; + use crate::poll::{ChainTip, Poll, ValidatedBlockHeader}; use bitcoin::block::{Block, Header}; @@ -53,7 +58,6 @@ use lightning::chain::{BestBlock, Listen}; use std::future::Future; use std::ops::Deref; -use std::pin::Pin; /// Abstract type for retrieving block headers and data. pub trait BlockSource: Sync + Send { @@ -65,12 +69,13 @@ pub trait BlockSource: Sync + Send { /// when `height_hint` is `None`. fn get_header<'a>( &'a self, header_hash: &'a BlockHash, height_hint: Option, - ) -> AsyncBlockSourceResult<'a, BlockHeaderData>; + ) -> impl Future> + Send + 'a; /// Returns the block for a given hash. A headers-only block source should return a `Transient` /// error. - fn get_block<'a>(&'a self, header_hash: &'a BlockHash) - -> AsyncBlockSourceResult<'a, BlockData>; + fn get_block<'a>( + &'a self, header_hash: &'a BlockHash, + ) -> impl Future> + Send + 'a; /// Returns the hash of the best block and, optionally, its height. /// @@ -78,18 +83,14 @@ pub trait BlockSource: Sync + Send { /// to allow for a more efficient lookup. /// /// [`get_header`]: Self::get_header - fn get_best_block(&self) -> AsyncBlockSourceResult<'_, (BlockHash, Option)>; + fn get_best_block<'a>( + &'a self, + ) -> impl Future)>> + Send + 'a; } /// Result type for `BlockSource` requests. pub type BlockSourceResult = Result; -// TODO: Replace with BlockSourceResult once `async` trait functions are supported. For details, -// see: https://areweasyncyet.rs. -/// Result type for asynchronous `BlockSource` requests. -pub type AsyncBlockSourceResult<'a, T> = - Pin> + 'a + Send>>; - /// Error type for `BlockSource` requests. /// /// Transient errors may be resolved when re-polling, but no attempt will be made to re-poll on @@ -174,18 +175,13 @@ pub enum BlockData { /// sources for the best chain tip. During this process it detects any chain forks, determines which /// constitutes the best chain, and updates the listener accordingly with any blocks that were /// connected or disconnected since the last poll. -/// -/// Block headers for the best chain are maintained in the parameterized cache, allowing for a -/// custom cache eviction policy. This offers flexibility to those sensitive to resource usage. -/// Hence, there is a trade-off between a lower memory footprint and potentially increased network -/// I/O as headers are re-fetched during fork detection. -pub struct SpvClient<'a, P: Poll, C: Cache, L: Deref> +pub struct SpvClient where L::Target: chain::Listen, { chain_tip: ValidatedBlockHeader, chain_poller: P, - chain_notifier: ChainNotifier<'a, C, L>, + chain_notifier: ChainNotifier, } /// The `Cache` trait defines behavior for managing a block header cache, where block headers are @@ -198,37 +194,83 @@ where /// Implementations may define how long to retain headers such that it's unlikely they will ever be /// needed to disconnect a block. In cases where block sources provide access to headers on stale /// forks reliably, caches may be entirely unnecessary. -pub trait Cache { +pub(crate) trait Cache { /// Retrieves the block header keyed by the given block hash. fn look_up(&self, block_hash: &BlockHash) -> Option<&ValidatedBlockHeader>; + /// Inserts the given block header during a find_difference operation, implying it might not be + /// the best header. + fn insert_during_diff(&mut self, block_hash: BlockHash, block_header: ValidatedBlockHeader); + /// Called when a block has been connected to the best chain to ensure it is available to be /// disconnected later if needed. fn block_connected(&mut self, block_hash: BlockHash, block_header: ValidatedBlockHeader); - /// Called when a block has been disconnected from the best chain. Once disconnected, a block's - /// header is no longer needed and thus can be removed. - fn block_disconnected(&mut self, block_hash: &BlockHash) -> Option; + /// Called when blocks have been disconnected from the best chain. Only the fork point + /// (best comon ancestor) is provided. + /// + /// Once disconnected, a block's header is no longer needed and thus can be removed. + fn blocks_disconnected(&mut self, fork_point: &ValidatedBlockHeader); +} + +/// Bounded cache of block headers keyed by block hash. +/// +/// Retains only the last `ANTI_REORG_DELAY * 2` block headers based on height. +pub struct HeaderCache(std::collections::HashMap); + +impl HeaderCache { + /// Creates a new empty header cache. + pub fn new() -> Self { + Self(std::collections::HashMap::new()) + } } -/// Unbounded cache of block headers keyed by block hash. -pub type UnboundedCache = std::collections::HashMap; +impl Cache for HeaderCache { + fn look_up(&self, block_hash: &BlockHash) -> Option<&ValidatedBlockHeader> { + self.0.get(block_hash) + } + + fn insert_during_diff(&mut self, block_hash: BlockHash, block_header: ValidatedBlockHeader) { + self.0.insert(block_hash, block_header); + + // Remove headers older than our newest header minus a week. + let best_height = self.0.iter().map(|(_, header)| header.height).max().unwrap_or(0); + let cutoff_height = best_height.saturating_sub(6 * 24 * 7); + self.0.retain(|_, header| header.height >= cutoff_height); + } + + fn block_connected(&mut self, block_hash: BlockHash, block_header: ValidatedBlockHeader) { + self.0.insert(block_hash, block_header); + + // Remove headers older than a week. + let cutoff_height = block_header.height.saturating_sub(6 * 24 * 7); + self.0.retain(|_, header| header.height >= cutoff_height); + } + + fn blocks_disconnected(&mut self, fork_point: &ValidatedBlockHeader) { + self.0.retain(|_, block_info| block_info.height < fork_point.height); + } +} -impl Cache for UnboundedCache { +impl Cache for &mut HeaderCache { fn look_up(&self, block_hash: &BlockHash) -> Option<&ValidatedBlockHeader> { - self.get(block_hash) + self.0.get(block_hash) + } + + fn insert_during_diff(&mut self, block_hash: BlockHash, block_header: ValidatedBlockHeader) { + (*self).insert_during_diff(block_hash, block_header); } fn block_connected(&mut self, block_hash: BlockHash, block_header: ValidatedBlockHeader) { - self.insert(block_hash, block_header); + (*self).block_connected(block_hash, block_header); } - fn block_disconnected(&mut self, block_hash: &BlockHash) -> Option { - self.remove(block_hash) + fn blocks_disconnected(&mut self, fork_point: &ValidatedBlockHeader) { + self.0.retain(|_, block_info| block_info.height < fork_point.height); } } -impl<'a, P: Poll, C: Cache, L: Deref> SpvClient<'a, P, C, L> +impl SpvClient where L::Target: chain::Listen, { @@ -243,7 +285,7 @@ where /// /// [`poll_best_tip`]: SpvClient::poll_best_tip pub fn new( - chain_tip: ValidatedBlockHeader, chain_poller: P, header_cache: &'a mut C, + chain_tip: ValidatedBlockHeader, chain_poller: P, header_cache: HeaderCache, chain_listener: L, ) -> Self { let chain_notifier = ChainNotifier { header_cache, chain_listener }; @@ -297,15 +339,15 @@ where /// Notifies [listeners] of blocks that have been connected or disconnected from the chain. /// /// [listeners]: lightning::chain::Listen -pub struct ChainNotifier<'a, C: Cache, L: Deref> +pub(crate) struct ChainNotifier where L::Target: chain::Listen, { /// Cache for looking up headers before fetching from a block source. - header_cache: &'a mut C, + pub(crate) header_cache: C, /// Listener that will be notified of connected or disconnected blocks. - chain_listener: L, + pub(crate) chain_listener: L, } /// Changes made to the chain between subsequent polls that transformed it from having one chain tip @@ -319,14 +361,11 @@ struct ChainDifference { /// If there are any disconnected blocks, this is where the chain forked. common_ancestor: ValidatedBlockHeader, - /// Blocks that were disconnected from the chain since the last poll. - disconnected_blocks: Vec, - /// Blocks that were connected to the chain since the last poll. connected_blocks: Vec, } -impl<'a, C: Cache, L: Deref> ChainNotifier<'a, C, L> +impl ChainNotifier where L::Target: chain::Listen, { @@ -342,23 +381,65 @@ where chain_poller: &mut P, ) -> Result<(), (BlockSourceError, Option)> { let difference = self - .find_difference(new_header, old_header, chain_poller) + .find_difference_from_header(new_header, old_header, chain_poller) .await .map_err(|e| (e, None))?; - self.disconnect_blocks(difference.disconnected_blocks); + if difference.common_ancestor != *old_header { + self.disconnect_blocks(difference.common_ancestor); + } self.connect_blocks(difference.common_ancestor, difference.connected_blocks, chain_poller) .await } + /// Returns the changes needed to produce the chain with `current_header` as its tip from the + /// chain with `prev_best_block` as its tip. + /// + /// First resolves `prev_best_block` to a `ValidatedBlockHeader` using the `previous_blocks` + /// field as fallback if needed, then finds the common ancestor. + async fn find_difference_from_best_block( + &mut self, current_header: ValidatedBlockHeader, prev_best_block: BestBlock, + chain_poller: &mut P, + ) -> BlockSourceResult { + // Try to resolve the header for the previous best block. First try the block_hash, + // then fall back to previous_blocks if that fails. + let cur_tip = core::iter::once((0, &prev_best_block.block_hash)); + let prev_tips = prev_best_block.previous_blocks.iter().enumerate().filter_map(|(idx, hash_opt)| { + if let Some(block_hash) = hash_opt { + Some((idx as u32 + 1, block_hash)) + } else { + None + } + }); + let mut found_header = None; + for (height_diff, block_hash) in cur_tip.chain(prev_tips) { + if let Some(header) = self.header_cache.look_up(block_hash) { + found_header = Some(*header); + break; + } + let height = prev_best_block.height.checked_sub(height_diff).ok_or( + BlockSourceError::persistent("BestBlock had more previous_blocks than its height") + )?; + if let Ok(header) = chain_poller.get_header(block_hash, Some(height)).await { + found_header = Some(header); + self.header_cache.insert_during_diff(*block_hash, header); + break; + } + } + let found_header = found_header.ok_or_else(|| { + BlockSourceError::persistent("could not resolve any block from BestBlock") + })?; + + self.find_difference_from_header(current_header, &found_header, chain_poller).await + } + /// Returns the changes needed to produce the chain with `current_header` as its tip from the /// chain with `prev_header` as its tip. /// /// Walks backwards from `current_header` and `prev_header`, finding the common ancestor. - async fn find_difference( + async fn find_difference_from_header( &self, current_header: ValidatedBlockHeader, prev_header: &ValidatedBlockHeader, chain_poller: &mut P, ) -> BlockSourceResult { - let mut disconnected_blocks = Vec::new(); let mut connected_blocks = Vec::new(); let mut current = current_header; let mut previous = *prev_header; @@ -373,7 +454,6 @@ where let current_height = current.height; let previous_height = previous.height; if current_height <= previous_height { - disconnected_blocks.push(previous); previous = self.look_up_previous_header(chain_poller, &previous).await?; } if current_height >= previous_height { @@ -383,7 +463,7 @@ where } let common_ancestor = current; - Ok(ChainDifference { common_ancestor, disconnected_blocks, connected_blocks }) + Ok(ChainDifference { common_ancestor, connected_blocks }) } /// Returns the previous header for the given header, either by looking it up in the cache or @@ -398,16 +478,10 @@ where } /// Notifies the chain listeners of disconnected blocks. - fn disconnect_blocks(&mut self, disconnected_blocks: Vec) { - for header in disconnected_blocks.iter() { - if let Some(cached_header) = self.header_cache.block_disconnected(&header.block_hash) { - assert_eq!(cached_header, *header); - } - } - if let Some(block) = disconnected_blocks.last() { - let fork_point = BestBlock::new(block.header.prev_blockhash, block.height - 1); - self.chain_listener.blocks_disconnected(fork_point); - } + fn disconnect_blocks(&mut self, fork_point: ValidatedBlockHeader) { + self.header_cache.blocks_disconnected(&fork_point); + let best_block = BestBlock::new(fork_point.block_hash, fork_point.height); + self.chain_listener.blocks_disconnected(best_block); } /// Notifies the chain listeners of connected blocks. @@ -451,9 +525,9 @@ mod spv_client_tests { let best_tip = chain.at_height(1); let poller = poll::ChainPoller::new(&mut chain, Network::Testnet); - let mut cache = UnboundedCache::new(); + let cache = HeaderCache::new(); let mut listener = NullChainListener {}; - let mut client = SpvClient::new(best_tip, poller, &mut cache, &mut listener); + let mut client = SpvClient::new(best_tip, poller, cache, &mut listener); match client.poll_best_tip().await { Err(e) => { assert_eq!(e.kind(), BlockSourceErrorKind::Persistent); @@ -470,9 +544,9 @@ mod spv_client_tests { let common_tip = chain.tip(); let poller = poll::ChainPoller::new(&mut chain, Network::Testnet); - let mut cache = UnboundedCache::new(); + let cache = HeaderCache::new(); let mut listener = NullChainListener {}; - let mut client = SpvClient::new(common_tip, poller, &mut cache, &mut listener); + let mut client = SpvClient::new(common_tip, poller, cache, &mut listener); match client.poll_best_tip().await { Err(e) => panic!("Unexpected error: {:?}", e), Ok((chain_tip, blocks_connected)) => { @@ -490,9 +564,9 @@ mod spv_client_tests { let old_tip = chain.at_height(1); let poller = poll::ChainPoller::new(&mut chain, Network::Testnet); - let mut cache = UnboundedCache::new(); + let cache = HeaderCache::new(); let mut listener = NullChainListener {}; - let mut client = SpvClient::new(old_tip, poller, &mut cache, &mut listener); + let mut client = SpvClient::new(old_tip, poller, cache, &mut listener); match client.poll_best_tip().await { Err(e) => panic!("Unexpected error: {:?}", e), Ok((chain_tip, blocks_connected)) => { @@ -510,9 +584,9 @@ mod spv_client_tests { let old_tip = chain.at_height(1); let poller = poll::ChainPoller::new(&mut chain, Network::Testnet); - let mut cache = UnboundedCache::new(); + let cache = HeaderCache::new(); let mut listener = NullChainListener {}; - let mut client = SpvClient::new(old_tip, poller, &mut cache, &mut listener); + let mut client = SpvClient::new(old_tip, poller, cache, &mut listener); match client.poll_best_tip().await { Err(e) => panic!("Unexpected error: {:?}", e), Ok((chain_tip, blocks_connected)) => { @@ -530,9 +604,9 @@ mod spv_client_tests { let old_tip = chain.at_height(1); let poller = poll::ChainPoller::new(&mut chain, Network::Testnet); - let mut cache = UnboundedCache::new(); + let cache = HeaderCache::new(); let mut listener = NullChainListener {}; - let mut client = SpvClient::new(old_tip, poller, &mut cache, &mut listener); + let mut client = SpvClient::new(old_tip, poller, cache, &mut listener); match client.poll_best_tip().await { Err(e) => panic!("Unexpected error: {:?}", e), Ok((chain_tip, blocks_connected)) => { @@ -551,9 +625,9 @@ mod spv_client_tests { let worse_tip = chain.tip(); let poller = poll::ChainPoller::new(&mut chain, Network::Testnet); - let mut cache = UnboundedCache::new(); + let cache = HeaderCache::new(); let mut listener = NullChainListener {}; - let mut client = SpvClient::new(best_tip, poller, &mut cache, &mut listener); + let mut client = SpvClient::new(best_tip, poller, cache, &mut listener); match client.poll_best_tip().await { Err(e) => panic!("Unexpected error: {:?}", e), Ok((chain_tip, blocks_connected)) => { diff --git a/lightning-block-sync/src/poll.rs b/lightning-block-sync/src/poll.rs index 843cc961899..fd8c546c56f 100644 --- a/lightning-block-sync/src/poll.rs +++ b/lightning-block-sync/src/poll.rs @@ -1,14 +1,12 @@ //! Adapters that make one or more [`BlockSource`]s simpler to poll for new chain tip transitions. -use crate::{ - AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource, BlockSourceError, - BlockSourceResult, -}; +use crate::{BlockData, BlockHeaderData, BlockSource, BlockSourceError, BlockSourceResult}; use bitcoin::hash_types::BlockHash; use bitcoin::network::Network; use lightning::chain::BestBlock; +use std::future::Future; use std::ops::Deref; /// The `Poll` trait defines behavior for polling block sources for a chain tip and retrieving @@ -22,17 +20,22 @@ pub trait Poll { /// Returns a chain tip in terms of its relationship to the provided chain tip. fn poll_chain_tip<'a>( &'a self, best_known_chain_tip: ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ChainTip>; + ) -> impl Future> + Send + 'a; /// Returns the header that preceded the given header in the chain. fn look_up_previous_header<'a>( &'a self, header: &'a ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ValidatedBlockHeader>; + ) -> impl Future> + Send + 'a; /// Returns the block associated with the given header. fn fetch_block<'a>( &'a self, header: &'a ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ValidatedBlock>; + ) -> impl Future> + Send + 'a; + + /// Returns the header for a given hash and optional height hint. + fn get_header<'a>( + &'a self, block_hash: &'a BlockHash, height_hint: Option, + ) -> impl Future> + Send + 'a; } /// A chain tip relative to another chain tip in terms of block hash and chainwork. @@ -217,8 +220,8 @@ impl + Sized + Send + Sync, T: BlockSource + ?Sized> Poll { fn poll_chain_tip<'a>( &'a self, best_known_chain_tip: ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ChainTip> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let (block_hash, height) = self.block_source.get_best_block().await?; if block_hash == best_known_chain_tip.header.block_hash() { return Ok(ChainTip::Common); @@ -231,13 +234,13 @@ impl + Sized + Send + Sync, T: BlockSource + ?Sized> Poll } else { Ok(ChainTip::Worse(chain_tip)) } - }) + } } fn look_up_previous_header<'a>( &'a self, header: &'a ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ValidatedBlockHeader> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { if header.height == 0 { return Err(BlockSourceError::persistent("genesis block reached")); } @@ -252,14 +255,20 @@ impl + Sized + Send + Sync, T: BlockSource + ?Sized> Poll header.check_builds_on(&previous_header, self.network)?; Ok(previous_header) - }) + } } fn fetch_block<'a>( &'a self, header: &'a ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ValidatedBlock> { + ) -> impl Future> + Send + 'a { + async move { self.block_source.get_block(&header.block_hash).await?.validate(header.block_hash) } + } + + fn get_header<'a>( + &'a self, block_hash: &'a BlockHash, height_hint: Option, + ) -> impl Future> + Send + 'a { Box::pin(async move { - self.block_source.get_block(&header.block_hash).await?.validate(header.block_hash) + self.block_source.get_header(block_hash, height_hint).await?.validate(*block_hash) }) } } diff --git a/lightning-block-sync/src/rest.rs b/lightning-block-sync/src/rest.rs index 1f79ab4a0b0..619981bb4d0 100644 --- a/lightning-block-sync/src/rest.rs +++ b/lightning-block-sync/src/rest.rs @@ -4,13 +4,14 @@ use crate::convert::GetUtxosResponse; use crate::gossip::UtxoSource; use crate::http::{BinaryResponse, HttpClient, HttpEndpoint, JsonResponse}; -use crate::{AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource}; +use crate::{BlockData, BlockHeaderData, BlockSource, BlockSourceResult}; use bitcoin::hash_types::BlockHash; use bitcoin::OutPoint; use std::convert::TryFrom; use std::convert::TryInto; +use std::future::Future; use std::sync::Mutex; /// A simple REST client for requesting resources using HTTP `GET`. @@ -49,49 +50,51 @@ impl RestClient { impl BlockSource for RestClient { fn get_header<'a>( &'a self, header_hash: &'a BlockHash, _height: Option, - ) -> AsyncBlockSourceResult<'a, BlockHeaderData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let resource_path = format!("headers/1/{}.json", header_hash.to_string()); Ok(self.request_resource::(&resource_path).await?) - }) + } } fn get_block<'a>( &'a self, header_hash: &'a BlockHash, - ) -> AsyncBlockSourceResult<'a, BlockData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let resource_path = format!("block/{}.bin", header_hash.to_string()); Ok(BlockData::FullBlock( self.request_resource::(&resource_path).await?, )) - }) + } } - fn get_best_block<'a>(&'a self) -> AsyncBlockSourceResult<'a, (BlockHash, Option)> { - Box::pin( - async move { Ok(self.request_resource::("chaininfo.json").await?) }, - ) + fn get_best_block<'a>( + &'a self, + ) -> impl Future)>> + Send + 'a { + async move { Ok(self.request_resource::("chaininfo.json").await?) } } } impl UtxoSource for RestClient { fn get_block_hash_by_height<'a>( &'a self, block_height: u32, - ) -> AsyncBlockSourceResult<'a, BlockHash> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let resource_path = format!("blockhashbyheight/{}.bin", block_height); Ok(self.request_resource::(&resource_path).await?) - }) + } } - fn is_output_unspent<'a>(&'a self, outpoint: OutPoint) -> AsyncBlockSourceResult<'a, bool> { - Box::pin(async move { + fn is_output_unspent<'a>( + &'a self, outpoint: OutPoint, + ) -> impl Future> + Send + 'a { + async move { let resource_path = format!("getutxos/{}-{}.json", outpoint.txid.to_string(), outpoint.vout); let utxo_result = self.request_resource::(&resource_path).await?; Ok(utxo_result.hit_bitmap_nonempty) - }) + } } } diff --git a/lightning-block-sync/src/rpc.rs b/lightning-block-sync/src/rpc.rs index 3df50a2267b..d851ba2ccf0 100644 --- a/lightning-block-sync/src/rpc.rs +++ b/lightning-block-sync/src/rpc.rs @@ -3,7 +3,7 @@ use crate::gossip::UtxoSource; use crate::http::{HttpClient, HttpEndpoint, HttpError, JsonResponse}; -use crate::{AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource}; +use crate::{BlockData, BlockHeaderData, BlockSource, BlockSourceResult}; use bitcoin::hash_types::BlockHash; use bitcoin::OutPoint; @@ -16,6 +16,7 @@ use std::convert::TryFrom; use std::convert::TryInto; use std::error::Error; use std::fmt; +use std::future::Future; use std::sync::atomic::{AtomicUsize, Ordering}; /// An error returned by the RPC server. @@ -135,47 +136,51 @@ impl RpcClient { impl BlockSource for RpcClient { fn get_header<'a>( &'a self, header_hash: &'a BlockHash, _height: Option, - ) -> AsyncBlockSourceResult<'a, BlockHeaderData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let header_hash = serde_json::json!(header_hash.to_string()); Ok(self.call_method("getblockheader", &[header_hash]).await?) - }) + } } fn get_block<'a>( &'a self, header_hash: &'a BlockHash, - ) -> AsyncBlockSourceResult<'a, BlockData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let header_hash = serde_json::json!(header_hash.to_string()); let verbosity = serde_json::json!(0); Ok(BlockData::FullBlock(self.call_method("getblock", &[header_hash, verbosity]).await?)) - }) + } } - fn get_best_block<'a>(&'a self) -> AsyncBlockSourceResult<'a, (BlockHash, Option)> { - Box::pin(async move { Ok(self.call_method("getblockchaininfo", &[]).await?) }) + fn get_best_block<'a>( + &'a self, + ) -> impl Future)>> + Send + 'a { + async move { Ok(self.call_method("getblockchaininfo", &[]).await?) } } } impl UtxoSource for RpcClient { fn get_block_hash_by_height<'a>( &'a self, block_height: u32, - ) -> AsyncBlockSourceResult<'a, BlockHash> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let height_param = serde_json::json!(block_height); Ok(self.call_method("getblockhash", &[height_param]).await?) - }) + } } - fn is_output_unspent<'a>(&'a self, outpoint: OutPoint) -> AsyncBlockSourceResult<'a, bool> { - Box::pin(async move { + fn is_output_unspent<'a>( + &'a self, outpoint: OutPoint, + ) -> impl Future> + Send + 'a { + async move { let txid_param = serde_json::json!(outpoint.txid.to_string()); let vout_param = serde_json::json!(outpoint.vout); let include_mempool = serde_json::json!(false); let utxo_opt: serde_json::Value = self.call_method("gettxout", &[txid_param, vout_param, include_mempool]).await?; Ok(!utxo_opt.is_null()) - }) + } } } diff --git a/lightning-block-sync/src/test_utils.rs b/lightning-block-sync/src/test_utils.rs index d307c4506eb..89cb3e81d60 100644 --- a/lightning-block-sync/src/test_utils.rs +++ b/lightning-block-sync/src/test_utils.rs @@ -1,7 +1,7 @@ use crate::poll::{Validate, ValidatedBlockHeader}; use crate::{ - AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource, BlockSourceError, - UnboundedCache, + BlockData, BlockHeaderData, BlockSource, BlockSourceError, BlockSourceResult, Cache, + HeaderCache, }; use bitcoin::block::{Block, Header, Version}; @@ -17,6 +17,7 @@ use lightning::chain::BestBlock; use std::cell::RefCell; use std::collections::VecDeque; +use std::future::Future; #[derive(Default)] pub struct Blockchain { @@ -104,6 +105,18 @@ impl Blockchain { block_header.validate(block_hash).unwrap() } + pub fn best_block_at_height(&self, height: usize) -> BestBlock { + let mut previous_blocks = [None; 12]; + for (i, height) in (0..height).rev().take(12).enumerate() { + previous_blocks[i] = Some(self.blocks[height].block_hash()); + } + BestBlock { + height: height as u32, + block_hash: self.blocks[height].block_hash(), + previous_blocks, + } + } + fn at_height_unvalidated(&self, height: usize) -> BlockHeaderData { assert!(!self.blocks.is_empty()); assert!(height < self.blocks.len()); @@ -123,16 +136,21 @@ impl Blockchain { self.at_height(self.blocks.len() - 1) } + pub fn best_block(&self) -> BestBlock { + assert!(!self.blocks.is_empty()); + self.best_block_at_height(self.blocks.len() - 1) + } + pub fn disconnect_tip(&mut self) -> Option { self.blocks.pop() } - pub fn header_cache(&self, heights: std::ops::RangeInclusive) -> UnboundedCache { - let mut cache = UnboundedCache::new(); + pub fn header_cache(&self, heights: std::ops::RangeInclusive) -> HeaderCache { + let mut cache = HeaderCache::new(); for i in heights { let value = self.at_height(i); let key = value.header.block_hash(); - assert!(cache.insert(key, value).is_none()); + cache.block_connected(key, value); } cache } @@ -141,8 +159,8 @@ impl Blockchain { impl BlockSource for Blockchain { fn get_header<'a>( &'a self, header_hash: &'a BlockHash, _height_hint: Option, - ) -> AsyncBlockSourceResult<'a, BlockHeaderData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { if self.without_headers { return Err(BlockSourceError::persistent("header not found")); } @@ -158,13 +176,13 @@ impl BlockSource for Blockchain { } } Err(BlockSourceError::transient("header not found")) - }) + } } fn get_block<'a>( &'a self, header_hash: &'a BlockHash, - ) -> AsyncBlockSourceResult<'a, BlockData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { for (height, block) in self.blocks.iter().enumerate() { if block.header.block_hash() == *header_hash { if let Some(without_blocks) = &self.without_blocks { @@ -181,11 +199,13 @@ impl BlockSource for Blockchain { } } Err(BlockSourceError::transient("block not found")) - }) + } } - fn get_best_block<'a>(&'a self) -> AsyncBlockSourceResult<'a, (BlockHash, Option)> { - Box::pin(async move { + fn get_best_block<'a>( + &'a self, + ) -> impl Future)>> + Send + 'a { + async move { match self.blocks.last() { None => Err(BlockSourceError::transient("empty chain")), Some(block) => { @@ -193,7 +213,7 @@ impl BlockSource for Blockchain { Ok((block.block_hash(), Some(height))) }, } - }) + } } } diff --git a/lightning-invoice/src/lib.rs b/lightning-invoice/src/lib.rs index 47f929377de..60d413cf76a 100644 --- a/lightning-invoice/src/lib.rs +++ b/lightning-invoice/src/lib.rs @@ -40,7 +40,6 @@ use bitcoin::secp256k1::ecdsa::RecoverableSignature; use bitcoin::secp256k1::PublicKey; use bitcoin::secp256k1::{Message, Secp256k1}; -use alloc::boxed::Box; use alloc::string; use core::cmp::Ordering; use core::fmt::{self, Display, Formatter}; @@ -1081,8 +1080,8 @@ macro_rules! find_all_extract { #[allow(missing_docs)] impl RawBolt11Invoice { /// Hash the HRP (as bytes) and signatureless data part (as Fe32 iterator) - fn hash_from_parts<'s>( - hrp_bytes: &[u8], data_without_signature: Box + 's>, + fn hash_from_parts<'s, I: Iterator + 's>( + hrp_bytes: &[u8], data_without_signature: I, ) -> [u8; 32] { use crate::bech32::Fe32IterExt; use bitcoin::hashes::HashEngine; diff --git a/lightning-invoice/src/ser.rs b/lightning-invoice/src/ser.rs index 5c93fa84ae0..853accdd3ca 100644 --- a/lightning-invoice/src/ser.rs +++ b/lightning-invoice/src/ser.rs @@ -1,4 +1,3 @@ -use alloc::boxed::Box; use core::fmt; use core::fmt::{Display, Formatter}; use core::{array, iter}; @@ -13,14 +12,28 @@ use super::{ SignedRawBolt11Invoice, TaggedField, }; +macro_rules! define_iterator_enum { + ($name: ident, $($n: ident),*) => { + enum $name<$($n: Iterator,)*> { + $($n($n),)* + } + impl<$($n: Iterator,)*> Iterator for $name<$($n,)*> { + type Item = Fe32; + fn next(&mut self) -> Option { + match self { + $(Self::$n(iter) => iter.next(),)* + } + } + } + } +} + /// Objects that can be encoded to base32 (bech32). /// -/// Private to this crate to avoid polluting the API. +/// Private to this crate (except in fuzzing) to avoid polluting the API. pub trait Base32Iterable { - /// apoelstra: In future we want to replace this Box with an explicit - /// associated type, to avoid the allocation. But we cannot do this until - /// Rust 1.65 and GATs since the iterator may contain a reference to self. - fn fe_iter<'s>(&'s self) -> Box + 's>; + /// Serialize this object, returning an iterator over bech32 field elements. + fn fe_iter<'s>(&'s self) -> impl Iterator + 's; } /// Interface to calculate the length of the base32 representation before actually serializing @@ -32,7 +45,7 @@ pub(crate) trait Base32Len: Base32Iterable { // Base32Iterable & Base32Len implementations are here, because the traits are in this module. impl Base32Iterable for [u8; N] { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { self[..].fe_iter() } } @@ -45,8 +58,8 @@ impl Base32Len for [u8; N] { } impl Base32Iterable for [u8] { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.iter().copied().bytes_to_fes()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.iter().copied().bytes_to_fes() } } @@ -58,8 +71,8 @@ impl Base32Len for [u8] { } impl Base32Iterable for Vec { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.iter().copied().bytes_to_fes()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.iter().copied().bytes_to_fes() } } @@ -71,8 +84,8 @@ impl Base32Len for Vec { } impl Base32Iterable for PaymentSecret { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.0[..].fe_iter()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.0[..].fe_iter() } } @@ -88,7 +101,7 @@ impl Base32Iterable for Bolt11InvoiceFeatures { /// starting from the rightmost bit, /// and taking the resulting 5-bit values in reverse (left-to-right), /// with the leading 0's skipped. - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { // Fe32 conversion cannot be used, because this packs from right, right-to-left let mut input_iter = self.le_flags().iter(); // Carry bits, 0..7 bits @@ -126,7 +139,7 @@ impl Base32Iterable for Bolt11InvoiceFeatures { output.push(Fe32::try_from(next_out8 & 31u8).expect("<32")) } // Take result in reverse order, and skip leading 0s - Box::new(output.into_iter().rev().skip_while(|e| *e == Fe32::Q)) + output.into_iter().rev().skip_while(|e| *e == Fe32::Q) } } @@ -241,36 +254,35 @@ fn encoded_int_be_base32_size(int: u64) -> usize { } impl Base32Iterable for RawDataPart { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { let ts_iter = self.timestamp.fe_iter(); let fields_iter = self.tagged_fields.iter().map(RawTaggedField::fe_iter).flatten(); - Box::new(ts_iter.chain(fields_iter)) + ts_iter.chain(fields_iter) } } impl Base32Iterable for PositiveTimestamp { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { let fes = encode_int_be_base32(self.as_unix_timestamp()); debug_assert!(fes.len() <= 7, "Invalid timestamp length"); let to_pad = 7 - fes.len(); - Box::new(core::iter::repeat(Fe32::Q).take(to_pad).chain(fes)) + core::iter::repeat(Fe32::Q).take(to_pad).chain(fes) } } impl Base32Iterable for RawTaggedField { - fn fe_iter<'s>(&'s self) -> Box + 's> { - // Annoyingly, when we move to explicit types, we will need an - // explicit enum holding the two iterator variants. + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + define_iterator_enum!(TwoIters, A, B); match *self { - RawTaggedField::UnknownSemantics(ref content) => Box::new(content.iter().copied()), - RawTaggedField::KnownSemantics(ref tagged_field) => tagged_field.fe_iter(), + RawTaggedField::UnknownSemantics(ref content) => TwoIters::A(content.iter().copied()), + RawTaggedField::KnownSemantics(ref tagged_field) => TwoIters::B(tagged_field.fe_iter()), } } } impl Base32Iterable for Sha256 { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.0[..].fe_iter()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.0[..].fe_iter() } } @@ -281,8 +293,8 @@ impl Base32Len for Sha256 { } impl Base32Iterable for Description { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.0 .0.as_bytes().fe_iter()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.0 .0.as_bytes().fe_iter() } } @@ -293,8 +305,8 @@ impl Base32Len for Description { } impl Base32Iterable for PayeePubKey { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.serialize().into_iter().bytes_to_fes()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.serialize().into_iter().bytes_to_fes() } } @@ -305,8 +317,8 @@ impl Base32Len for PayeePubKey { } impl Base32Iterable for ExpiryTime { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(encode_int_be_base32(self.as_seconds())) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + encode_int_be_base32(self.as_seconds()) } } @@ -317,8 +329,8 @@ impl Base32Len for ExpiryTime { } impl Base32Iterable for MinFinalCltvExpiryDelta { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(encode_int_be_base32(self.0)) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + encode_int_be_base32(self.0) } } @@ -329,8 +341,8 @@ impl Base32Len for MinFinalCltvExpiryDelta { } impl Base32Iterable for Fallback { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(match *self { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + match *self { Fallback::SegWitProgram { version: v, program: ref p } => { let v = Fe32::try_from(v.to_num()).expect("valid version"); core::iter::once(v).chain(p[..].fe_iter()) @@ -343,7 +355,7 @@ impl Base32Iterable for Fallback { // 18 'J' core::iter::once(Fe32::J).chain(hash[..].fe_iter()) }, - }) + } } } @@ -371,7 +383,7 @@ type RouteHintHopIter = iter::Chain< >; impl Base32Iterable for PrivateRoute { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { fn serialize_to_iter(hop: &RouteHintHop) -> RouteHintHopIter { let i1 = hop.src_node_id.serialize().into_iter(); let i2 = u64::to_be_bytes(hop.short_channel_id).into_iter(); @@ -381,7 +393,7 @@ impl Base32Iterable for PrivateRoute { i1.chain(i2).chain(i3).chain(i4).chain(i5) } - Box::new(self.0 .0.iter().map(serialize_to_iter).flatten().bytes_to_fes()) + self.0 .0.iter().map(serialize_to_iter).flatten().bytes_to_fes() } } @@ -391,16 +403,11 @@ impl Base32Len for PrivateRoute { } } -// Shorthand type -type TaggedFieldIter = core::iter::Chain, I>; - impl Base32Iterable for TaggedField { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { /// Writes a tagged field: tag, length and data. `tag` should be in `0..32` otherwise the /// function will panic. - fn write_tagged_field<'s, P>( - tag: u8, payload: &'s P, - ) -> TaggedFieldIter + 's>> + fn write_tagged_field<'s, P>(tag: u8, payload: &'s P) -> impl Iterator + 's where P: Base32Iterable + Base32Len + ?Sized, { @@ -416,54 +423,49 @@ impl Base32Iterable for TaggedField { .chain(payload.fe_iter()) } - // we will also need a giant enum for this - Box::new(match *self { + define_iterator_enum!(ManyIters, A, B, C, D, E, F, G, H, I, J, K); + match *self { TaggedField::PaymentHash(ref hash) => { - write_tagged_field(constants::TAG_PAYMENT_HASH, hash) + ManyIters::A(write_tagged_field(constants::TAG_PAYMENT_HASH, hash)) }, TaggedField::Description(ref description) => { - write_tagged_field(constants::TAG_DESCRIPTION, description) + ManyIters::B(write_tagged_field(constants::TAG_DESCRIPTION, description)) }, TaggedField::PayeePubKey(ref pub_key) => { - write_tagged_field(constants::TAG_PAYEE_PUB_KEY, pub_key) + ManyIters::C(write_tagged_field(constants::TAG_PAYEE_PUB_KEY, pub_key)) }, TaggedField::DescriptionHash(ref hash) => { - write_tagged_field(constants::TAG_DESCRIPTION_HASH, hash) + ManyIters::D(write_tagged_field(constants::TAG_DESCRIPTION_HASH, hash)) }, TaggedField::ExpiryTime(ref duration) => { - write_tagged_field(constants::TAG_EXPIRY_TIME, duration) + ManyIters::E(write_tagged_field(constants::TAG_EXPIRY_TIME, duration)) }, TaggedField::MinFinalCltvExpiryDelta(ref expiry) => { - write_tagged_field(constants::TAG_MIN_FINAL_CLTV_EXPIRY_DELTA, expiry) + ManyIters::F(write_tagged_field(constants::TAG_MIN_FINAL_CLTV_EXPIRY_DELTA, expiry)) }, TaggedField::Fallback(ref fallback_address) => { - write_tagged_field(constants::TAG_FALLBACK, fallback_address) + ManyIters::G(write_tagged_field(constants::TAG_FALLBACK, fallback_address)) }, TaggedField::PrivateRoute(ref route_hops) => { - write_tagged_field(constants::TAG_PRIVATE_ROUTE, route_hops) + ManyIters::H(write_tagged_field(constants::TAG_PRIVATE_ROUTE, route_hops)) }, TaggedField::PaymentSecret(ref payment_secret) => { - write_tagged_field(constants::TAG_PAYMENT_SECRET, payment_secret) + ManyIters::I(write_tagged_field(constants::TAG_PAYMENT_SECRET, payment_secret)) }, TaggedField::PaymentMetadata(ref payment_metadata) => { - write_tagged_field(constants::TAG_PAYMENT_METADATA, payment_metadata) + ManyIters::J(write_tagged_field(constants::TAG_PAYMENT_METADATA, payment_metadata)) }, TaggedField::Features(ref features) => { - write_tagged_field(constants::TAG_FEATURES, features) + ManyIters::K(write_tagged_field(constants::TAG_FEATURES, features)) }, - }) + } } } impl Base32Iterable for Bolt11InvoiceSignature { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { let (recovery_id, signature) = self.0.serialize_compact(); - Box::new( - signature - .into_iter() - .chain(core::iter::once(recovery_id.to_i32() as u8)) - .bytes_to_fes(), - ) + signature.into_iter().chain(core::iter::once(recovery_id.to_i32() as u8)).bytes_to_fes() } } diff --git a/lightning-liquidity/src/lsps2/service.rs b/lightning-liquidity/src/lsps2/service.rs index a6736e63eef..5c4bd63bc48 100644 --- a/lightning-liquidity/src/lsps2/service.rs +++ b/lightning-liquidity/src/lsps2/service.rs @@ -9,7 +9,6 @@ //! Contains the main bLIP-52 / LSPS2 server-side object, [`LSPS2ServiceHandler`]. -use alloc::boxed::Box; use alloc::string::{String, ToString}; use alloc::vec::Vec; use lightning::util::persist::KVStore; @@ -17,6 +16,7 @@ use lightning::util::persist::KVStore; use core::cmp::Ordering as CmpOrdering; use core::future::Future as StdFuture; use core::ops::Deref; +use core::pin::pin; use core::sync::atomic::{AtomicUsize, Ordering}; use core::task; @@ -2173,7 +2173,7 @@ where &self, counterparty_node_id: &PublicKey, request_id: LSPSRequestId, intercept_scid: u64, cltv_expiry_delta: u32, client_trusts_lsp: bool, user_channel_id: u128, ) -> Result<(), APIError> { - let mut fut = Box::pin(self.inner.invoice_parameters_generated( + let mut fut = pin!(self.inner.invoice_parameters_generated( counterparty_node_id, request_id, intercept_scid, @@ -2202,7 +2202,7 @@ where &self, intercept_scid: u64, intercept_id: InterceptId, expected_outbound_amount_msat: u64, payment_hash: PaymentHash, ) -> Result<(), APIError> { - let mut fut = Box::pin(self.inner.htlc_intercepted( + let mut fut = pin!(self.inner.htlc_intercepted( intercept_scid, intercept_id, expected_outbound_amount_msat, @@ -2228,7 +2228,7 @@ where pub fn htlc_handling_failed( &self, failure_type: HTLCHandlingFailureType, ) -> Result<(), APIError> { - let mut fut = Box::pin(self.inner.htlc_handling_failed(failure_type)); + let mut fut = pin!(self.inner.htlc_handling_failed(failure_type)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); @@ -2249,7 +2249,7 @@ where pub fn payment_forwarded( &self, next_channel_id: ChannelId, skimmed_fee_msat: u64, ) -> Result<(), APIError> { - let mut fut = Box::pin(self.inner.payment_forwarded(next_channel_id, skimmed_fee_msat)); + let mut fut = pin!(self.inner.payment_forwarded(next_channel_id, skimmed_fee_msat)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); @@ -2290,7 +2290,7 @@ where &self, counterparty_node_id: &PublicKey, user_channel_id: u128, ) -> Result<(), APIError> { let mut fut = - Box::pin(self.inner.channel_open_abandoned(counterparty_node_id, user_channel_id)); + pin!(self.inner.channel_open_abandoned(counterparty_node_id, user_channel_id)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); @@ -2309,8 +2309,7 @@ where pub fn channel_open_failed( &self, counterparty_node_id: &PublicKey, user_channel_id: u128, ) -> Result<(), APIError> { - let mut fut = - Box::pin(self.inner.channel_open_failed(counterparty_node_id, user_channel_id)); + let mut fut = pin!(self.inner.channel_open_failed(counterparty_node_id, user_channel_id)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); @@ -2332,7 +2331,7 @@ where &self, user_channel_id: u128, channel_id: &ChannelId, counterparty_node_id: &PublicKey, ) -> Result<(), APIError> { let mut fut = - Box::pin(self.inner.channel_ready(user_channel_id, channel_id, counterparty_node_id)); + pin!(self.inner.channel_ready(user_channel_id, channel_id, counterparty_node_id)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); diff --git a/lightning-liquidity/src/manager.rs b/lightning-liquidity/src/manager.rs index f0143fc624f..d3822715b8d 100644 --- a/lightning-liquidity/src/manager.rs +++ b/lightning-liquidity/src/manager.rs @@ -7,7 +7,6 @@ // You may not use this file except in accordance with one or both of these // licenses. -use alloc::boxed::Box; use alloc::string::ToString; use alloc::vec::Vec; @@ -61,6 +60,7 @@ use bitcoin::secp256k1::PublicKey; use core::future::Future as StdFuture; use core::ops::Deref; +use core::pin::pin; use core::task; const LSPS_FEATURE_BIT: usize = 729; @@ -1106,7 +1106,7 @@ where ) -> Result { let kv_store = KVStoreSyncWrapper(kv_store_sync); - let mut fut = Box::pin(LiquidityManager::new( + let mut fut = pin!(LiquidityManager::new( entropy_source, node_signer, channel_manager, @@ -1159,7 +1159,7 @@ where client_config: Option, time_provider: TP, ) -> Result { let kv_store = KVStoreSyncWrapper(kv_store_sync); - let mut fut = Box::pin(LiquidityManager::new_with_custom_time_provider( + let mut fut = pin!(LiquidityManager::new_with_custom_time_provider( entropy_source, node_signer, channel_manager, @@ -1289,7 +1289,7 @@ where pub fn persist(&self) -> Result<(), lightning::io::Error> { let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); - match Box::pin(self.inner.persist()).as_mut().poll(&mut ctx) { + match pin!(self.inner.persist()).as_mut().poll(&mut ctx) { task::Poll::Ready(result) => result, task::Poll::Pending => { // In a sync context, we can't wait for the future to complete. diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 068f77a84bb..c6fbd3dc3c5 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -43,7 +43,7 @@ use std::hash::Hash; use std::net::SocketAddr; use std::net::TcpStream as StdTcpStream; use std::ops::Deref; -use std::pin::Pin; +use std::pin::{pin, Pin}; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; use std::task::{self, Poll}; @@ -205,18 +205,17 @@ impl Connection { } us_lock.read_paused }; - // TODO: Drop the Box'ing of the futures once Rust has pin-on-stack support. let select_result = if read_paused { TwoSelector { - a: Box::pin(write_avail_receiver.recv()), - b: Box::pin(read_wake_receiver.recv()), + a: pin!(write_avail_receiver.recv()), + b: pin!(read_wake_receiver.recv()), } .await } else { ThreeSelector { - a: Box::pin(write_avail_receiver.recv()), - b: Box::pin(read_wake_receiver.recv()), - c: Box::pin(reader.readable()), + a: pin!(write_avail_receiver.recv()), + b: pin!(read_wake_receiver.recv()), + c: pin!(reader.readable()), } .await }; diff --git a/lightning-persister/src/fs_store.rs b/lightning-persister/src/fs_store.rs index 9b15398d4d1..b2d327f6bc1 100644 --- a/lightning-persister/src/fs_store.rs +++ b/lightning-persister/src/fs_store.rs @@ -14,8 +14,6 @@ use std::sync::{Arc, Mutex, RwLock}; #[cfg(feature = "tokio")] use core::future::Future; #[cfg(feature = "tokio")] -use core::pin::Pin; -#[cfg(feature = "tokio")] use lightning::util::persist::KVStore; #[cfg(target_os = "windows")] @@ -464,93 +462,85 @@ impl FilesystemStoreInner { impl KVStore for FilesystemStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> Pin, lightning::io::Error>> + 'static + Send>> { + ) -> impl Future, lightning::io::Error>> + 'static + Send { let this = Arc::clone(&self.inner); - let path = match this.get_checked_dest_file_path( + let path = this.get_checked_dest_file_path( primary_namespace, secondary_namespace, Some(key), "read", - ) { - Ok(path) => path, - Err(e) => return Box::pin(async move { Err(e) }), - }; + ); - Box::pin(async move { + async move { + let path = match path { + Ok(path) => path, + Err(e) => return Err(e), + }; tokio::task::spawn_blocking(move || this.read(path)).await.unwrap_or_else(|e| { Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, e)) }) - }) + } } fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> Pin> + 'static + Send>> { + ) -> impl Future> + 'static + Send { let this = Arc::clone(&self.inner); - let path = match this.get_checked_dest_file_path( - primary_namespace, - secondary_namespace, - Some(key), - "write", - ) { - Ok(path) => path, - Err(e) => return Box::pin(async move { Err(e) }), - }; - - let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(path.clone()); - Box::pin(async move { + let path = this + .get_checked_dest_file_path(primary_namespace, secondary_namespace, Some(key), "write") + .map(|path| (self.get_new_version_and_lock_ref(path.clone()), path)); + + async move { + let ((inner_lock_ref, version), path) = match path { + Ok(res) => res, + Err(e) => return Err(e), + }; tokio::task::spawn_blocking(move || { this.write_version(inner_lock_ref, path, buf, version) }) .await .unwrap_or_else(|e| Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, e))) - }) + } } fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> Pin> + 'static + Send>> { + ) -> impl Future> + 'static + Send { let this = Arc::clone(&self.inner); - let path = match this.get_checked_dest_file_path( - primary_namespace, - secondary_namespace, - Some(key), - "remove", - ) { - Ok(path) => path, - Err(e) => return Box::pin(async move { Err(e) }), - }; - - let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(path.clone()); - Box::pin(async move { + let path = this + .get_checked_dest_file_path(primary_namespace, secondary_namespace, Some(key), "remove") + .map(|path| (self.get_new_version_and_lock_ref(path.clone()), path)); + + async move { + let ((inner_lock_ref, version), path) = match path { + Ok(res) => res, + Err(e) => return Err(e), + }; tokio::task::spawn_blocking(move || { this.remove_version(inner_lock_ref, path, lazy, version) }) .await .unwrap_or_else(|e| Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, e))) - }) + } } fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> Pin, lightning::io::Error>> + 'static + Send>> { + ) -> impl Future, lightning::io::Error>> + 'static + Send { let this = Arc::clone(&self.inner); - let path = match this.get_checked_dest_file_path( - primary_namespace, - secondary_namespace, - None, - "list", - ) { - Ok(path) => path, - Err(e) => return Box::pin(async move { Err(e) }), - }; + let path = + this.get_checked_dest_file_path(primary_namespace, secondary_namespace, None, "list"); - Box::pin(async move { + async move { + let path = match path { + Ok(path) => path, + Err(e) => return Err(e), + }; tokio::task::spawn_blocking(move || this.list(path)).await.unwrap_or_else(|e| { Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, e)) }) - }) + } } } @@ -758,24 +748,24 @@ mod tests { let fs_store = Arc::new(FilesystemStore::new(temp_path)); assert_eq!(fs_store.state_size(), 0); - let async_fs_store: Arc = fs_store.clone(); + let async_fs_store = Arc::clone(&fs_store); let data1 = vec![42u8; 32]; let data2 = vec![43u8; 32]; - let primary_namespace = "testspace"; - let secondary_namespace = "testsubspace"; + let primary = "testspace"; + let secondary = "testsubspace"; let key = "testkey"; // Test writing the same key twice with different data. Execute the asynchronous part out of order to ensure // that eventual consistency works. - let fut1 = async_fs_store.write(primary_namespace, secondary_namespace, key, data1); + let fut1 = KVStore::write(&*async_fs_store, primary, secondary, key, data1); assert_eq!(fs_store.state_size(), 1); - let fut2 = async_fs_store.remove(primary_namespace, secondary_namespace, key, false); + let fut2 = KVStore::remove(&*async_fs_store, primary, secondary, key, false); assert_eq!(fs_store.state_size(), 1); - let fut3 = async_fs_store.write(primary_namespace, secondary_namespace, key, data2.clone()); + let fut3 = KVStore::write(&*async_fs_store, primary, secondary, key, data2.clone()); assert_eq!(fs_store.state_size(), 1); fut3.await.unwrap(); @@ -788,21 +778,18 @@ mod tests { assert_eq!(fs_store.state_size(), 0); // Test list. - let listed_keys = - async_fs_store.list(primary_namespace, secondary_namespace).await.unwrap(); + let listed_keys = KVStore::list(&*async_fs_store, primary, secondary).await.unwrap(); assert_eq!(listed_keys.len(), 1); assert_eq!(listed_keys[0], key); // Test read. We expect to read data2, as the write call was initiated later. - let read_data = - async_fs_store.read(primary_namespace, secondary_namespace, key).await.unwrap(); + let read_data = KVStore::read(&*async_fs_store, primary, secondary, key).await.unwrap(); assert_eq!(data2, &*read_data); // Test remove. - async_fs_store.remove(primary_namespace, secondary_namespace, key, false).await.unwrap(); + KVStore::remove(&*async_fs_store, primary, secondary, key, false).await.unwrap(); - let listed_keys = - async_fs_store.list(primary_namespace, secondary_namespace).await.unwrap(); + let listed_keys = KVStore::list(&*async_fs_store, primary, secondary).await.unwrap(); assert_eq!(listed_keys.len(), 0); } diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index d8d6c90921f..32f36fb3a78 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -51,10 +51,9 @@ use crate::sign::ecdsa::EcdsaChannelSigner; use crate::sign::{EntropySource, PeerStorageKey, SignerProvider}; use crate::sync::{Mutex, MutexGuard, RwLock, RwLockReadGuard}; use crate::types::features::{InitFeatures, NodeFeatures}; -use crate::util::async_poll::{MaybeSend, MaybeSync}; use crate::util::errors::APIError; use crate::util::logger::{Logger, WithContext}; -use crate::util::native_async::FutureSpawner; +use crate::util::native_async::{FutureSpawner, MaybeSend, MaybeSync}; use crate::util::persist::{KVStore, MonitorName, MonitorUpdatingPersisterAsync}; #[cfg(peer_storage)] use crate::util::ser::{VecWriter, Writeable}; diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 1d035b68650..e2ea5855576 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1059,7 +1059,7 @@ impl Readable for IrrevocablyResolvedHTLC { /// You MUST ensure that no ChannelMonitors for a given channel anywhere contain out-of-date /// information and are actively monitoring the chain. /// -/// Like the [`ChannelManager`], deserialization is implemented for `(BlockHash, ChannelMonitor)`, +/// Like the [`ChannelManager`], deserialization is implemented for `(BestBlock, ChannelMonitor)`, /// providing you with the last block hash which was connected before shutting down. You must begin /// syncing the chain from that point, disconnecting and connecting blocks as required to get to /// the best chain on startup. Note that all [`ChannelMonitor`]s passed to a [`ChainMonitor`] must @@ -1067,7 +1067,7 @@ impl Readable for IrrevocablyResolvedHTLC { /// initialization. /// /// For those loading potentially-ancient [`ChannelMonitor`]s, deserialization is also implemented -/// for `Option<(BlockHash, ChannelMonitor)>`. LDK can no longer deserialize a [`ChannelMonitor`] +/// for `Option<(BestBlock, ChannelMonitor)>`. LDK can no longer deserialize a [`ChannelMonitor`] /// that was first created in LDK prior to 0.0.110 and last updated prior to LDK 0.0.119. In such /// cases, the `Option<(..)>` deserialization option may return `Ok(None)` rather than failing to /// deserialize, allowing you to differentiate between the two cases. @@ -6420,7 +6420,7 @@ where const MAX_ALLOC_SIZE: usize = 64 * 1024; impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP)> - for (BlockHash, ChannelMonitor) + for (BestBlock, ChannelMonitor) { fn read(reader: &mut R, args: (&'a ES, &'b SP)) -> Result { match >::read(reader, args) { @@ -6432,7 +6432,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP } impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP)> - for Option<(BlockHash, ChannelMonitor)> + for Option<(BestBlock, ChannelMonitor)> { #[rustfmt::skip] fn read(reader: &mut R, args: (&'a ES, &'b SP)) -> Result { @@ -6857,14 +6857,14 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP To continue, run a v0.1 release, send/route a payment over the channel or close it."); } } - Ok(Some((best_block.block_hash, monitor))) + Ok(Some((best_block, monitor))) } } #[cfg(test)] mod tests { use bitcoin::amount::Amount; - use bitcoin::hash_types::{BlockHash, Txid}; + use bitcoin::hash_types::Txid; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; use bitcoin::hex::FromHex; @@ -6964,7 +6964,7 @@ mod tests { nodes[1].chain_monitor.chain_monitor.transactions_confirmed(&new_header, &[(0, broadcast_tx)], conf_height); - let (_, pre_update_monitor) = <(BlockHash, ChannelMonitor<_>)>::read( + let (_, pre_update_monitor) = <(BestBlock, ChannelMonitor<_>)>::read( &mut io::Cursor::new(&get_monitor!(nodes[1], channel.2).encode()), (&nodes[1].keys_manager.backing, &nodes[1].keys_manager.backing)).unwrap(); diff --git a/lightning/src/chain/mod.rs b/lightning/src/chain/mod.rs index b4cc6a302ae..8ef796ecb2e 100644 --- a/lightning/src/chain/mod.rs +++ b/lightning/src/chain/mod.rs @@ -18,9 +18,10 @@ use bitcoin::network::Network; use bitcoin::script::{Script, ScriptBuf}; use bitcoin::secp256k1::PublicKey; -use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, MonitorEvent}; +use crate::chain::channelmonitor::{ + ANTI_REORG_DELAY, ChannelMonitor, ChannelMonitorUpdate, MonitorEvent, +}; use crate::chain::transaction::{OutPoint, TransactionData}; -use crate::impl_writeable_tlv_based; use crate::ln::types::ChannelId; use crate::sign::ecdsa::EcdsaChannelSigner; use crate::sign::HTLCDescriptor; @@ -42,13 +43,20 @@ pub struct BestBlock { pub block_hash: BlockHash, /// The height at which the block was confirmed. pub height: u32, + /// Previous blocks immediately before [`Self::block_hash`], in reverse chronological order. + /// + /// These ensure we can find the fork point of a reorg if our block source no longer has the + /// previous best tip after a restart. + pub previous_blocks: [Option; ANTI_REORG_DELAY as usize * 2], } impl BestBlock { /// Constructs a `BestBlock` that represents the genesis block at height 0 of the given /// network. pub fn from_network(network: Network) -> Self { - BestBlock { block_hash: genesis_block(network).header.block_hash(), height: 0 } + let block_hash = genesis_block(network).header.block_hash(); + let previous_blocks = [None; ANTI_REORG_DELAY as usize * 2]; + BestBlock { block_hash, height: 0, previous_blocks } } /// Returns a `BestBlock` as identified by the given block hash and height. @@ -56,13 +64,75 @@ impl BestBlock { /// This is not exported to bindings users directly as the bindings auto-generate an /// equivalent `new`. pub fn new(block_hash: BlockHash, height: u32) -> Self { - BestBlock { block_hash, height } + let previous_blocks = [None; ANTI_REORG_DELAY as usize * 2]; + BestBlock { block_hash, height, previous_blocks } + } + + /// Advances to a new block at height [`Self::height`] + 1. + pub fn advance(&mut self, new_hash: BlockHash) { + // Shift all block hashes to the right (making room for the old tip at index 0) + for i in (1..self.previous_blocks.len()).rev() { + self.previous_blocks[i] = self.previous_blocks[i - 1]; + } + + // The old tip becomes the new index 0 (tip-1) + self.previous_blocks[0] = Some(self.block_hash); + + // Update to the new tip + self.block_hash = new_hash; + self.height += 1; + } + + /// Returns the block hash at the given height, if available in our history. + pub fn get_hash_at_height(&self, height: u32) -> Option { + if height > self.height { + return None; + } + if height == self.height { + return Some(self.block_hash); + } + + // offset = 1 means we want tip-1, which is block_hashes[0] + // offset = 2 means we want tip-2, which is block_hashes[1], etc. + let offset = self.height.saturating_sub(height) as usize; + if offset >= 1 && offset <= self.previous_blocks.len() { + self.previous_blocks[offset - 1] + } else { + None + } + } + + /// Find the most recent common ancestor between two BestBlocks by searching their block hash + /// histories. + /// + /// Returns the common block hash and height, or None if no common block is found in the + /// available histories. + pub fn find_common_ancestor(&self, other: &BestBlock) -> Option<(BlockHash, u32)> { + // First check if either tip matches + if self.block_hash == other.block_hash && self.height == other.height { + return Some((self.block_hash, self.height)); + } + + // Check all heights covered by self's history + let min_height = self.height.saturating_sub(self.previous_blocks.len() as u32); + for check_height in (min_height..=self.height).rev() { + if let Some(self_hash) = self.get_hash_at_height(check_height) { + if let Some(other_hash) = other.get_hash_at_height(check_height) { + if self_hash == other_hash { + return Some((self_hash, check_height)); + } + } + } + } + None } } impl_writeable_tlv_based!(BestBlock, { (0, block_hash, required), + (1, previous_blocks_read, (legacy, [Option; ANTI_REORG_DELAY as usize * 2], |us: &BestBlock| Some(us.previous_blocks))), (2, height, required), + (unused, previous_blocks, (static_value, previous_blocks_read.unwrap_or([None; ANTI_REORG_DELAY as usize * 2]))), }); /// The `Listen` trait is used to notify when blocks have been connected or disconnected from the diff --git a/lightning/src/events/bump_transaction/mod.rs b/lightning/src/events/bump_transaction/mod.rs index 3d9beb82c07..9c35194751f 100644 --- a/lightning/src/events/bump_transaction/mod.rs +++ b/lightning/src/events/bump_transaction/mod.rs @@ -14,6 +14,7 @@ pub mod sync; use alloc::collections::BTreeMap; +use core::future::Future; use core::ops::Deref; use crate::chain::chaininterface::{ @@ -36,7 +37,7 @@ use crate::sign::{ ChannelDerivationParameters, HTLCDescriptor, SignerProvider, P2WPKH_WITNESS_WEIGHT, }; use crate::sync::Mutex; -use crate::util::async_poll::{AsyncResult, MaybeSend, MaybeSync}; +use crate::util::native_async::{MaybeSend, MaybeSync}; use crate::util::logger::Logger; use bitcoin::amount::Amount; @@ -394,13 +395,15 @@ pub trait CoinSelectionSource { fn select_confirmed_utxos<'a>( &'a self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &'a [TxOut], target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, - ) -> AsyncResult<'a, CoinSelection, ()>; + ) -> impl Future> + MaybeSend + 'a; /// Signs and provides the full witness for all inputs within the transaction known to the /// trait (i.e., any provided via [`CoinSelectionSource::select_confirmed_utxos`]). /// /// If your wallet does not support signing PSBTs you can call `psbt.extract_tx()` to get the /// unsigned transaction and then sign it with your wallet. - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()>; + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a; } /// An alternative to [`CoinSelectionSource`] that can be implemented and used along [`Wallet`] to @@ -412,17 +415,23 @@ pub trait CoinSelectionSource { // Note that updates to documentation on this trait should be copied to the synchronous version. pub trait WalletSource { /// Returns all UTXOs, with at least 1 confirmation each, that are available to spend. - fn list_confirmed_utxos<'a>(&'a self) -> AsyncResult<'a, Vec, ()>; + fn list_confirmed_utxos<'a>( + &'a self, + ) -> impl Future, ()>> + MaybeSend + 'a; /// Returns a script to use for change above dust resulting from a successful coin selection /// attempt. - fn get_change_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf, ()>; + fn get_change_script<'a>( + &'a self, + ) -> impl Future> + MaybeSend + 'a; /// Signs and provides the full [`TxIn::script_sig`] and [`TxIn::witness`] for all inputs within /// the transaction known to the wallet (i.e., any provided via /// [`WalletSource::list_confirmed_utxos`]). /// /// If your wallet does not support signing PSBTs you can call `psbt.extract_tx()` to get the /// unsigned transaction and then sign it with your wallet. - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()>; + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a; } /// A wrapper over [`WalletSource`] that implements [`CoinSelectionSource`] by preferring UTXOs @@ -617,8 +626,8 @@ where fn select_confirmed_utxos<'a>( &'a self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &'a [TxOut], target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, - ) -> AsyncResult<'a, CoinSelection, ()> { - Box::pin(async move { + ) -> impl Future> + MaybeSend + 'a { + async move { let utxos = self.source.list_confirmed_utxos().await?; // TODO: Use fee estimation utils when we upgrade to bitcoin v0.30.0. let total_output_size: u64 = must_pay_to @@ -665,10 +674,12 @@ where } } Err(()) - }) + } } - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()> { + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a { self.source.sign_psbt(psbt) } } diff --git a/lightning/src/events/bump_transaction/sync.rs b/lightning/src/events/bump_transaction/sync.rs index 653710a3358..03ffa479097 100644 --- a/lightning/src/events/bump_transaction/sync.rs +++ b/lightning/src/events/bump_transaction/sync.rs @@ -11,14 +11,16 @@ use core::future::Future; use core::ops::Deref; +use core::pin::pin; use core::task; use crate::chain::chaininterface::BroadcasterInterface; use crate::chain::ClaimId; use crate::prelude::*; use crate::sign::SignerProvider; -use crate::util::async_poll::{dummy_waker, AsyncResult, MaybeSend, MaybeSync}; +use crate::util::async_poll::dummy_waker; use crate::util::logger::Logger; +use crate::util::native_async::{MaybeSend, MaybeSync}; use bitcoin::{Psbt, ScriptBuf, Transaction, TxOut}; @@ -71,19 +73,25 @@ impl WalletSource for WalletSourceSyncWrapper where T::Target: WalletSourceSync, { - fn list_confirmed_utxos<'a>(&'a self) -> AsyncResult<'a, Vec, ()> { + fn list_confirmed_utxos<'a>( + &'a self, + ) -> impl Future, ()>> + MaybeSend + 'a { let utxos = self.0.list_confirmed_utxos(); - Box::pin(async move { utxos }) + async move { utxos } } - fn get_change_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf, ()> { + fn get_change_script<'a>( + &'a self, + ) -> impl Future> + MaybeSend + 'a { let script = self.0.get_change_script(); - Box::pin(async move { script }) + async move { script } } - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()> { + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a { let signed_psbt = self.0.sign_psbt(psbt); - Box::pin(async move { signed_psbt }) + async move { signed_psbt } } } @@ -122,7 +130,7 @@ where &self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &[TxOut], target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, ) -> Result { - let mut fut = self.wallet.select_confirmed_utxos( + let fut = self.wallet.select_confirmed_utxos( claim_id, must_spend, must_pay_to, @@ -131,7 +139,7 @@ where ); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); - match fut.as_mut().poll(&mut ctx) { + match pin!(fut).poll(&mut ctx) { task::Poll::Ready(result) => result, task::Poll::Pending => { unreachable!( @@ -142,10 +150,10 @@ where } fn sign_psbt(&self, psbt: Psbt) -> Result { - let mut fut = self.wallet.sign_psbt(psbt); + let fut = self.wallet.sign_psbt(psbt); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); - match fut.as_mut().poll(&mut ctx) { + match pin!(fut).poll(&mut ctx) { task::Poll::Ready(result) => result, task::Poll::Pending => { unreachable!("Wallet::sign_psbt should not be pending in a sync context"); @@ -233,7 +241,7 @@ where fn select_confirmed_utxos<'a>( &'a self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &'a [TxOut], target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, - ) -> AsyncResult<'a, CoinSelection, ()> { + ) -> impl Future> + MaybeSend + 'a { let coins = self.0.select_confirmed_utxos( claim_id, must_spend, @@ -241,12 +249,14 @@ where target_feerate_sat_per_1000_weight, max_tx_weight, ); - Box::pin(async move { coins }) + async move { coins } } - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()> { + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a { let psbt = self.0.sign_psbt(psbt); - Box::pin(async move { psbt }) + async move { psbt } } } @@ -289,7 +299,7 @@ where /// Handles all variants of [`BumpTransactionEvent`]. pub fn handle_event(&self, event: &BumpTransactionEvent) { - let mut fut = Box::pin(self.bump_transaction_event_handler.handle_event(event)); + let mut fut = pin!(self.bump_transaction_event_handler.handle_event(event)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); match fut.as_mut().poll(&mut ctx) { diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 1a9af4f2071..87eec7c48aa 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -16,7 +16,7 @@ use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::chainmonitor::ChainMonitor; use crate::chain::channelmonitor::{ChannelMonitor, MonitorEvent, ANTI_REORG_DELAY}; use crate::chain::transaction::OutPoint; -use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch}; +use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Listen, Watch}; use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PaymentPurpose}; use crate::ln::channel::AnnouncementSigsState; use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields}; @@ -93,7 +93,7 @@ fn test_monitor_and_persister_update_fail() { let chain_mon = { let new_monitor = { let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan.2).unwrap(); - let (_, new_monitor) = <(BlockHash, ChannelMonitor)>::read( + let (_, new_monitor) = <(BestBlock, ChannelMonitor)>::read( &mut &monitor.encode()[..], (nodes[0].keys_manager, nodes[0].keys_manager), ) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 644920557d2..03ce7affe20 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1941,7 +1941,6 @@ where /// detailed in the [`ChannelManagerReadArgs`] documentation. /// /// ``` -/// use bitcoin::BlockHash; /// use bitcoin::network::Network; /// use lightning::chain::BestBlock; /// # use lightning::chain::channelmonitor::ChannelMonitor; @@ -1990,8 +1989,8 @@ where /// entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster, /// router, message_router, logger, config, channel_monitors.iter().collect(), /// ); -/// let (block_hash, channel_manager) = -/// <(BlockHash, ChannelManager<_, _, _, _, _, _, _, _, _>)>::read(&mut reader, args)?; +/// let (best_block, channel_manager) = +/// <(BestBlock, ChannelManager<_, _, _, _, _, _, _, _, _>)>::read(&mut reader, args)?; /// /// // Update the ChannelManager and ChannelMonitors with the latest chain data /// // ... @@ -2557,7 +2556,7 @@ where /// [`read`], those channels will be force-closed based on the `ChannelMonitor` state and no funds /// will be lost (modulo on-chain transaction fees). /// -/// Note that the deserializer is only implemented for `(`[`BlockHash`]`, `[`ChannelManager`]`)`, which +/// Note that the deserializer is only implemented for `(`[`BestBlock`]`, `[`ChannelManager`]`)`, which /// tells you the last block hash which was connected. You should get the best block tip before using the manager. /// See [`chain::Listen`] and [`chain::Confirm`] for more details. /// @@ -2624,7 +2623,6 @@ where /// [`peer_disconnected`]: msgs::BaseMessageHandler::peer_disconnected /// [`funding_created`]: msgs::FundingCreated /// [`funding_transaction_generated`]: Self::funding_transaction_generated -/// [`BlockHash`]: bitcoin::hash_types::BlockHash /// [`update_channel`]: chain::Watch::update_channel /// [`ChannelUpdate`]: msgs::ChannelUpdate /// [`read`]: ReadableArgs::read @@ -16695,7 +16693,7 @@ impl< MR: Deref, L: Deref + Clone, > ReadableArgs> - for (BlockHash, Arc>) + for (BestBlock, Arc>) where M::Target: chain::Watch<::EcdsaSigner>, T::Target: BroadcasterInterface, @@ -16710,9 +16708,9 @@ where fn read( reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>, ) -> Result { - let (blockhash, chan_manager) = - <(BlockHash, ChannelManager)>::read(reader, args)?; - Ok((blockhash, Arc::new(chan_manager))) + let (best_block, chan_manager) = + <(BestBlock, ChannelManager)>::read(reader, args)?; + Ok((best_block, Arc::new(chan_manager))) } } @@ -16728,7 +16726,7 @@ impl< MR: Deref, L: Deref + Clone, > ReadableArgs> - for (BlockHash, ChannelManager) + for (BestBlock, ChannelManager) where M::Target: chain::Watch<::EcdsaSigner>, T::Target: BroadcasterInterface, @@ -18402,7 +18400,7 @@ where //TODO: Broadcast channel update for closed channels, but only after we've made a //connection or two. - Ok((best_block_hash.clone(), channel_manager)) + Ok((best_block, channel_manager)) } } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 271d458bcc8..57e8b5aa1cd 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -847,7 +847,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { let mon = self.chain_monitor.chain_monitor.get_monitor(channel_id).unwrap(); mon.write(&mut w).unwrap(); let (_, deserialized_monitor) = - <(BlockHash, ChannelMonitor)>::read( + <(BestBlock, ChannelMonitor)>::read( &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager), ) @@ -875,7 +875,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { let mut w = test_utils::TestVecWriter(Vec::new()); self.node.write(&mut w).unwrap(); <( - BlockHash, + BestBlock, ChannelManager< &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, @@ -1332,7 +1332,7 @@ pub fn _reload_node<'a, 'b, 'c>( let mut monitors_read = Vec::with_capacity(monitors_encoded.len()); for encoded in monitors_encoded { let mut monitor_read = &encoded[..]; - let (_, monitor) = <(BlockHash, ChannelMonitor)>::read( + let (_, monitor) = <(BestBlock, ChannelMonitor)>::read( &mut monitor_read, (node.keys_manager, node.keys_manager), ) @@ -1347,7 +1347,7 @@ pub fn _reload_node<'a, 'b, 'c>( for monitor in monitors_read.iter() { assert!(channel_monitors.insert(monitor.channel_id(), monitor).is_none()); } - <(BlockHash, TestChannelManager<'b, 'c>)>::read( + <(BestBlock, TestChannelManager<'b, 'c>)>::read( &mut node_read, ChannelManagerReadArgs { config, diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index c161a9664c0..dd57fd5ecd4 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -19,6 +19,7 @@ use crate::chain::channelmonitor::{ LATENCY_GRACE_PERIOD_BLOCKS, }; use crate::chain::transaction::OutPoint; +use crate::chain::BestBlock; use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch}; use crate::events::{ ClosureReason, Event, HTLCHandlingFailureType, PathFailure, PaymentFailureReason, @@ -7239,7 +7240,7 @@ pub fn test_update_err_monitor_lockdown() { let new_monitor = { let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan_1.2).unwrap(); let new_monitor = - <(BlockHash, channelmonitor::ChannelMonitor)>::read( + <(BestBlock, channelmonitor::ChannelMonitor)>::read( &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager), ) @@ -7345,7 +7346,7 @@ pub fn test_concurrent_monitor_claim() { let new_monitor = { let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan_1.2).unwrap(); let new_monitor = - <(BlockHash, channelmonitor::ChannelMonitor)>::read( + <(BestBlock, channelmonitor::ChannelMonitor)>::read( &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager), ) @@ -7395,7 +7396,7 @@ pub fn test_concurrent_monitor_claim() { let new_monitor = { let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan_1.2).unwrap(); let new_monitor = - <(BlockHash, channelmonitor::ChannelMonitor)>::read( + <(BestBlock, channelmonitor::ChannelMonitor)>::read( &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager), ) diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 8c9e552fa04..9fb5ce8394c 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -11,7 +11,7 @@ //! Functional tests which test for correct behavior across node restarts. -use crate::chain::{ChannelMonitorUpdateStatus, Watch}; +use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Watch}; use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateStep}; use crate::routing::router::{PaymentParameters, RouteParameters}; @@ -29,7 +29,6 @@ use crate::util::ser::{Writeable, ReadableArgs}; use crate::util::config::UserConfig; use bitcoin::hashes::Hash; -use bitcoin::hash_types::BlockHash; use types::payment::{PaymentHash, PaymentPreimage}; use crate::prelude::*; @@ -410,7 +409,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { let mut node_0_stale_monitors = Vec::new(); for serialized in node_0_stale_monitors_serialized.iter() { let mut read = &serialized[..]; - let (_, monitor) = <(BlockHash, ChannelMonitor)>::read(&mut read, (keys_manager, keys_manager)).unwrap(); + let (_, monitor) = <(BestBlock, ChannelMonitor)>::read(&mut read, (keys_manager, keys_manager)).unwrap(); assert!(read.is_empty()); node_0_stale_monitors.push(monitor); } @@ -418,14 +417,14 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { let mut node_0_monitors = Vec::new(); for serialized in node_0_monitors_serialized.iter() { let mut read = &serialized[..]; - let (_, monitor) = <(BlockHash, ChannelMonitor)>::read(&mut read, (keys_manager, keys_manager)).unwrap(); + let (_, monitor) = <(BestBlock, ChannelMonitor)>::read(&mut read, (keys_manager, keys_manager)).unwrap(); assert!(read.is_empty()); node_0_monitors.push(monitor); } let mut nodes_0_read = &nodes_0_serialized[..]; if let Err(msgs::DecodeError::DangerousValue) = - <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestMessageRouter, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs { + <(BestBlock, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestMessageRouter, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs { config: UserConfig::default(), entropy_source: keys_manager, node_signer: keys_manager, @@ -443,7 +442,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { let mut nodes_0_read = &nodes_0_serialized[..]; let (_, nodes_0_deserialized_tmp) = - <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestMessageRouter, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs { + <(BestBlock, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestMessageRouter, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs { config: UserConfig::default(), entropy_source: keys_manager, node_signer: keys_manager, diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index 1d771d22783..d562ff13406 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -58,7 +58,7 @@ use crate::ln::script::ShutdownScript; use crate::offers::invoice::UnsignedBolt12Invoice; use crate::types::features::ChannelTypeFeatures; use crate::types::payment::PaymentPreimage; -use crate::util::async_poll::AsyncResult; +use crate::util::native_async::MaybeSend; use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::transaction_utils; @@ -68,7 +68,9 @@ use crate::sign::ecdsa::EcdsaChannelSigner; #[cfg(taproot)] use crate::sign::taproot::TaprootChannelSigner; use crate::util::atomic_counter::AtomicCounter; + use core::convert::TryInto; +use core::future::Future; use core::ops::Deref; use core::sync::atomic::{AtomicUsize, Ordering}; #[cfg(taproot)] @@ -1066,7 +1068,9 @@ pub trait ChangeDestinationSource { /// /// This method should return a different value each time it is called, to avoid linking /// on-chain funds controlled to the same user. - fn get_change_destination_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf, ()>; + fn get_change_destination_script<'a>( + &'a self, + ) -> impl Future> + MaybeSend + 'a; } /// A synchronous helper trait that describes an on-chain wallet capable of returning a (change) destination script. @@ -1101,9 +1105,11 @@ impl ChangeDestinationSource for ChangeDestinationSourceSyncWrapper where T::Target: ChangeDestinationSourceSync, { - fn get_change_destination_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf, ()> { + fn get_change_destination_script<'a>( + &'a self, + ) -> impl Future> + MaybeSend + 'a { let script = self.0.get_change_destination_script(); - Box::pin(async move { script }) + async move { script } } } diff --git a/lightning/src/sync/nostd_sync.rs b/lightning/src/sync/nostd_sync.rs index 12070741918..18055d1ebe4 100644 --- a/lightning/src/sync/nostd_sync.rs +++ b/lightning/src/sync/nostd_sync.rs @@ -61,7 +61,7 @@ impl<'a, T: 'a> LockTestExt<'a> for Mutex { } type ExclLock = MutexGuard<'a, T>; #[inline] - fn unsafe_well_ordered_double_lock_self(&'a self) -> MutexGuard { + fn unsafe_well_ordered_double_lock_self(&'a self) -> MutexGuard<'a, T> { self.lock().unwrap() } } @@ -132,7 +132,7 @@ impl<'a, T: 'a> LockTestExt<'a> for RwLock { } type ExclLock = RwLockWriteGuard<'a, T>; #[inline] - fn unsafe_well_ordered_double_lock_self(&'a self) -> RwLockWriteGuard { + fn unsafe_well_ordered_double_lock_self(&'a self) -> RwLockWriteGuard<'a, T> { self.write().unwrap() } } diff --git a/lightning/src/util/async_poll.rs b/lightning/src/util/async_poll.rs index eefa40d1055..23ca1aad603 100644 --- a/lightning/src/util/async_poll.rs +++ b/lightning/src/util/async_poll.rs @@ -9,33 +9,106 @@ //! Some utilities to make working with the standard library's [`Future`]s easier -use alloc::boxed::Box; use alloc::vec::Vec; use core::future::Future; use core::marker::Unpin; use core::pin::Pin; use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker}; -pub(crate) enum ResultFuture>, E: Unpin> { +pub(crate) enum ResultFuture + Unpin, O> { Pending(F), - Ready(Result<(), E>), + Ready(O), } -pub(crate) struct MultiResultFuturePoller> + Unpin, E: Unpin> { - futures_state: Vec>, +pub(crate) struct TwoFutureJoiner< + AO, + BO, + AF: Future + Unpin, + BF: Future + Unpin, +> { + a: Option>, + b: Option>, } -impl> + Unpin, E: Unpin> MultiResultFuturePoller { - pub fn new(futures_state: Vec>) -> Self { +impl + Unpin, BF: Future + Unpin> + TwoFutureJoiner +{ + pub fn new(future_a: AF, future_b: BF) -> Self { + Self { a: Some(ResultFuture::Pending(future_a)), b: Some(ResultFuture::Pending(future_b)) } + } +} + +impl + Unpin, BF: Future + Unpin> Future + for TwoFutureJoiner +{ + type Output = (AO, BO); + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<(AO, BO)> { + let mut have_pending_futures = false; + // SAFETY: While we are pinned, we can't get direct access to our internal state because we + // aren't `Unpin`. However, we don't actually need the `Pin` - we only use it below on the + // `Future` in the `ResultFuture::Pending` case, and the `Future` is bound by `Unpin`. + // Thus, the `Pin` is not actually used, and its safe to bypass it and access the inner + // reference directly. + let state = unsafe { &mut self.get_unchecked_mut() }; + macro_rules! poll_future { + ($future: ident) => { + match state.$future { + Some(ResultFuture::Pending(ref mut fut)) => match Pin::new(fut).poll(cx) { + Poll::Ready(res) => { + state.$future = Some(ResultFuture::Ready(res)); + }, + Poll::Pending => { + have_pending_futures = true; + }, + }, + Some(ResultFuture::Ready(_)) => {}, + None => { + debug_assert!(false, "Future polled after Ready"); + return Poll::Pending; + }, + } + }; + } + poll_future!(a); + poll_future!(b); + + if have_pending_futures { + Poll::Pending + } else { + Poll::Ready(( + match state.a.take() { + Some(ResultFuture::Ready(a)) => a, + _ => unreachable!(), + }, + match state.b.take() { + Some(ResultFuture::Ready(b)) => b, + _ => unreachable!(), + }, + )) + } + } +} + +pub(crate) struct MultiResultFuturePoller + Unpin, O> { + futures_state: Vec>, +} + +impl + Unpin, O> MultiResultFuturePoller { + pub fn new(futures_state: Vec>) -> Self { Self { futures_state } } } -impl> + Unpin, E: Unpin> Future for MultiResultFuturePoller { - type Output = Vec>; - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>> { +impl + Unpin, O> Future for MultiResultFuturePoller { + type Output = Vec; + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut have_pending_futures = false; - let futures_state = &mut self.get_mut().futures_state; + // SAFETY: While we are pinned, we can't get direct access to `futures_state` because we + // aren't `Unpin`. However, we don't actually need the `Pin` - we only use it below on the + // `Future` in the `ResultFuture::Pending` case, and the `Future` is bound by `Unpin`. + // Thus, the `Pin` is not actually used, and its safe to bypass it and access the inner + // reference directly. + let futures_state = unsafe { &mut self.get_unchecked_mut().futures_state }; for state in futures_state.iter_mut() { match state { ResultFuture::Pending(ref mut fut) => match Pin::new(fut).poll(cx) { @@ -91,42 +164,3 @@ const DUMMY_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new( pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } } - -#[cfg(feature = "std")] -/// A type alias for a future that returns a result of type `T` or error `E`. -/// -/// This is not exported to bindings users as async is only supported in Rust. -pub type AsyncResult<'a, T, E> = Pin> + 'a + Send>>; -#[cfg(not(feature = "std"))] -/// A type alias for a future that returns a result of type `T` or error `E`. -/// -/// This is not exported to bindings users as async is only supported in Rust. -pub type AsyncResult<'a, T, E> = Pin> + 'a>>; - -/// Marker trait to optionally implement `Sync` under std. -/// -/// This is not exported to bindings users as async is only supported in Rust. -#[cfg(feature = "std")] -pub use core::marker::Sync as MaybeSync; - -#[cfg(not(feature = "std"))] -/// Marker trait to optionally implement `Sync` under std. -/// -/// This is not exported to bindings users as async is only supported in Rust. -pub trait MaybeSync {} -#[cfg(not(feature = "std"))] -impl MaybeSync for T where T: ?Sized {} - -/// Marker trait to optionally implement `Send` under std. -/// -/// This is not exported to bindings users as async is only supported in Rust. -#[cfg(feature = "std")] -pub use core::marker::Send as MaybeSend; - -#[cfg(not(feature = "std"))] -/// Marker trait to optionally implement `Send` under std. -/// -/// This is not exported to bindings users as async is only supported in Rust. -pub trait MaybeSend {} -#[cfg(not(feature = "std"))] -impl MaybeSend for T where T: ?Sized {} diff --git a/lightning/src/util/mod.rs b/lightning/src/util/mod.rs index dcbea904b51..51e608d185f 100644 --- a/lightning/src/util/mod.rs +++ b/lightning/src/util/mod.rs @@ -20,7 +20,7 @@ pub mod mut_global; pub mod anchor_channel_reserves; -pub mod async_poll; +pub(crate) mod async_poll; #[cfg(fuzzing)] pub mod base32; #[cfg(not(fuzzing))] diff --git a/lightning/src/util/native_async.rs b/lightning/src/util/native_async.rs index 886146e976d..34bdb056329 100644 --- a/lightning/src/util/native_async.rs +++ b/lightning/src/util/native_async.rs @@ -8,23 +8,45 @@ //! environment. #[cfg(all(test, feature = "std"))] -use crate::sync::Mutex; -use crate::util::async_poll::{MaybeSend, MaybeSync}; +use crate::sync::{Arc, Mutex}; + +#[cfg(test)] +use alloc::boxed::Box; +#[cfg(all(test, not(feature = "std")))] +use alloc::rc::Rc; #[cfg(all(test, not(feature = "std")))] use core::cell::RefCell; +#[cfg(test)] +use core::convert::Infallible; use core::future::Future; #[cfg(test)] use core::pin::Pin; +#[cfg(test)] +use core::task::{Context, Poll}; -/// A generic trait which is able to spawn futures in the background. +/// A generic trait which is able to spawn futures to be polled in the background. +/// +/// When the spawned future completes, the returned [`Self::SpawnedFutureResult`] should resolve +/// with the output of the spawned future. +/// +/// Spawned futures must be polled independently in the background even if the returned +/// [`Self::SpawnedFutureResult`] is dropped without being polled. This matches the semantics of +/// `tokio::spawn`. /// /// This is not exported to bindings users as async is only supported in Rust. pub trait FutureSpawner: MaybeSend + MaybeSync + 'static { + /// The error type of [`Self::SpawnedFutureResult`]. This can be used to indicate that the + /// spawned future was cancelled or panicked. + type E; + /// The result of [`Self::spawn`], a future which completes when the spawned future completes. + type SpawnedFutureResult: Future> + Unpin; /// Spawns the given future as a background task. /// /// This method MUST NOT block on the given future immediately. - fn spawn + MaybeSend + 'static>(&self, future: T); + fn spawn + MaybeSend + 'static>( + &self, future: T, + ) -> Self::SpawnedFutureResult; } #[cfg(test)] @@ -32,6 +54,34 @@ trait MaybeSendableFuture: Future + MaybeSend + 'static {} #[cfg(test)] impl + MaybeSend + 'static> MaybeSendableFuture for F {} +/// Marker trait to optionally implement `Sync` under std. +/// +/// This is not exported to bindings users as async is only supported in Rust. +#[cfg(feature = "std")] +pub use core::marker::Sync as MaybeSync; + +#[cfg(not(feature = "std"))] +/// Marker trait to optionally implement `Sync` under std. +/// +/// This is not exported to bindings users as async is only supported in Rust. +pub trait MaybeSync {} +#[cfg(not(feature = "std"))] +impl MaybeSync for T where T: ?Sized {} + +/// Marker trait to optionally implement `Send` under std. +/// +/// This is not exported to bindings users as async is only supported in Rust. +#[cfg(feature = "std")] +pub use core::marker::Send as MaybeSend; + +#[cfg(not(feature = "std"))] +/// Marker trait to optionally implement `Send` under std. +/// +/// This is not exported to bindings users as async is only supported in Rust. +pub trait MaybeSend {} +#[cfg(not(feature = "std"))] +impl MaybeSend for T where T: ?Sized {} + /// A simple [`FutureSpawner`] which holds [`Future`]s until they are manually polled via /// [`Self::poll_futures`]. #[cfg(all(test, feature = "std"))] @@ -39,6 +89,69 @@ pub(crate) struct FutureQueue(Mutex>>>); #[cfg(all(test, not(feature = "std")))] pub(crate) struct FutureQueue(RefCell>>>); +#[cfg(all(test, feature = "std"))] +pub struct FutureQueueCompletion(Arc>>); +#[cfg(all(test, not(feature = "std")))] +pub struct FutureQueueCompletion(Rc>>); + +#[cfg(all(test, feature = "std"))] +impl FutureQueueCompletion { + fn new() -> Self { + Self(Arc::new(Mutex::new(None))) + } + + fn complete(&self, o: O) { + *self.0.lock().unwrap() = Some(o); + } +} + +#[cfg(all(test, feature = "std"))] +impl Clone for FutureQueueCompletion { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} + +#[cfg(all(test, not(feature = "std")))] +impl FutureQueueCompletion { + fn new() -> Self { + Self(Rc::new(RefCell::new(None))) + } + + fn complete(&self, o: O) { + *self.0.borrow_mut() = Some(o); + } +} + +#[cfg(all(test, not(feature = "std")))] +impl Clone for FutureQueueCompletion { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} + +#[cfg(all(test, feature = "std"))] +impl Future for FutureQueueCompletion { + type Output = Result; + fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + match Pin::into_inner(self).0.lock().unwrap().take() { + None => Poll::Pending, + Some(o) => Poll::Ready(Ok(o)), + } + } +} + +#[cfg(all(test, not(feature = "std")))] +impl Future for FutureQueueCompletion { + type Output = Result; + fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + match Pin::into_inner(self).0.borrow_mut().take() { + None => Poll::Pending, + Some(o) => Poll::Ready(Ok(o)), + } + } +} + #[cfg(test)] impl FutureQueue { pub(crate) fn new() -> Self { @@ -74,7 +187,6 @@ impl FutureQueue { futures = self.0.borrow_mut(); } futures.retain_mut(|fut| { - use core::task::{Context, Poll}; let waker = crate::util::async_poll::dummy_waker(); match fut.as_mut().poll(&mut Context::from_waker(&waker)) { Poll::Ready(()) => false, @@ -86,7 +198,16 @@ impl FutureQueue { #[cfg(test)] impl FutureSpawner for FutureQueue { - fn spawn + MaybeSend + 'static>(&self, future: T) { + type E = Infallible; + type SpawnedFutureResult = FutureQueueCompletion; + fn spawn + MaybeSend + 'static>( + &self, f: F, + ) -> FutureQueueCompletion { + let completion = FutureQueueCompletion::new(); + let compl_ref = completion.clone(); + let future = async move { + compl_ref.complete(f.await); + }; #[cfg(feature = "std")] { self.0.lock().unwrap().push(Box::pin(future)); @@ -95,6 +216,7 @@ impl FutureSpawner for FutureQueue { { self.0.borrow_mut().push(Box::pin(future)); } + completion } } @@ -102,7 +224,16 @@ impl FutureSpawner for FutureQueue { impl + MaybeSend + MaybeSync + 'static> FutureSpawner for D { - fn spawn + MaybeSend + 'static>(&self, future: T) { + type E = Infallible; + type SpawnedFutureResult = FutureQueueCompletion; + fn spawn + MaybeSend + 'static>( + &self, f: F, + ) -> FutureQueueCompletion { + let completion = FutureQueueCompletion::new(); + let compl_ref = completion.clone(); + let future = async move { + compl_ref.complete(f.await); + }; #[cfg(feature = "std")] { self.0.lock().unwrap().push(Box::pin(future)); @@ -111,5 +242,6 @@ impl + MaybeSend + MaybeSync + 'static { self.0.borrow_mut().push(Box::pin(future)); } + completion } } diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index d00e29e686a..807cf2bf9fd 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -14,12 +14,13 @@ use alloc::sync::Arc; use bitcoin::hashes::hex::FromHex; -use bitcoin::{BlockHash, Txid}; +use bitcoin::Txid; +use core::convert::Infallible; use core::future::Future; use core::mem; use core::ops::Deref; -use core::pin::Pin; +use core::pin::{pin, Pin}; use core::str::FromStr; use core::task; @@ -31,12 +32,15 @@ use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator}; use crate::chain::chainmonitor::Persist; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate}; use crate::chain::transaction::OutPoint; +use crate::chain::BestBlock; use crate::ln::types::ChannelId; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, SignerProvider}; use crate::sync::Mutex; -use crate::util::async_poll::{dummy_waker, AsyncResult, MaybeSend, MaybeSync}; +use crate::util::async_poll::{ + dummy_waker, MultiResultFuturePoller, ResultFuture, TwoFutureJoiner, +}; use crate::util::logger::Logger; -use crate::util::native_async::FutureSpawner; +use crate::util::native_async::{FutureSpawner, MaybeSend, MaybeSync}; use crate::util::ser::{Readable, ReadableArgs, Writeable}; use crate::util::wakers::Notifier; @@ -216,34 +220,34 @@ where { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> AsyncResult<'static, Vec, io::Error> { + ) -> impl Future, io::Error>> + 'static + MaybeSend { let res = self.0.read(primary_namespace, secondary_namespace, key); - Box::pin(async move { res }) + async move { res } } fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> AsyncResult<'static, (), io::Error> { + ) -> impl Future> + 'static + MaybeSend { let res = self.0.write(primary_namespace, secondary_namespace, key, buf); - Box::pin(async move { res }) + async move { res } } fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> AsyncResult<'static, (), io::Error> { + ) -> impl Future> + 'static + MaybeSend { let res = self.0.remove(primary_namespace, secondary_namespace, key, lazy); - Box::pin(async move { res }) + async move { res } } fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> AsyncResult<'static, Vec, io::Error> { + ) -> impl Future, io::Error>> + 'static + MaybeSend { let res = self.0.list(primary_namespace, secondary_namespace); - Box::pin(async move { res }) + async move { res } } } @@ -283,16 +287,18 @@ pub trait KVStore { /// [`ErrorKind::NotFound`]: io::ErrorKind::NotFound fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> AsyncResult<'static, Vec, io::Error>; + ) -> impl Future, io::Error>> + 'static + MaybeSend; /// Persists the given data under the given `key`. /// - /// The order of multiple writes to the same key needs to be retained while persisting - /// asynchronously. In other words, if two writes to the same key occur, the state (as seen by - /// [`Self::read`]) must either see the first write then the second, or only ever the second, - /// no matter when the futures complete (and must always contain the second write once the - /// second future completes). The state should never contain the first write after the second - /// write's future completes, nor should it contain the second write, then contain the first - /// write at any point thereafter (even if the second write's future hasn't yet completed). + /// Note that this is *not* an `async fn`. Rather, the order of multiple writes to the same key + /// (as defined by the order of the synchronous function calls) needs to be retained while + /// persisting asynchronously. In other words, if two writes to the same key occur, the state + /// (as seen by [`Self::read`]) must either see the first write then the second, or only ever + /// the second, no matter when the futures complete (and must always contain the second write + /// once the second future completes). The state should never contain the first write after the + /// second write's future completes, nor should it contain the second write, then contain the + /// first write at any point thereafter (even if the second write's future hasn't yet + /// completed). /// /// One way to ensure this requirement is met is by assigning a version number to each write /// before returning the future, and then during asynchronous execution, ensuring that the @@ -303,7 +309,7 @@ pub trait KVStore { /// Will create the given `primary_namespace` and `secondary_namespace` if not already present in the store. fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> AsyncResult<'static, (), io::Error>; + ) -> impl Future> + 'static + MaybeSend; /// Removes any data that had previously been persisted under the given `key`. /// /// If the `lazy` flag is set to `true`, the backend implementation might choose to lazily @@ -311,6 +317,10 @@ pub trait KVStore { /// eventual batch deletion of multiple keys. As a consequence, subsequent calls to /// [`KVStoreSync::list`] might include the removed key until the changes are actually persisted. /// + /// Note that similar to [`Self::write`] this is *not* an `async fn`, but rather a sync fn + /// which defines the order of writes to a given key, but which may complete its operation + /// asynchronously. + /// /// Note that while setting the `lazy` flag reduces the I/O burden of multiple subsequent /// `remove` calls, it also influences the atomicity guarantees as lazy `remove`s could /// potentially get lost on crash after the method returns. Therefore, this flag should only be @@ -321,12 +331,13 @@ pub trait KVStore { /// to the same key which occur before a removal completes must cancel/overwrite the pending /// removal. /// + /// /// Returns successfully if no data will be stored for the given `primary_namespace`, /// `secondary_namespace`, and `key`, independently of whether it was present before its /// invokation or not. fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> AsyncResult<'static, (), io::Error>; + ) -> impl Future> + 'static + MaybeSend; /// Returns a list of keys that are stored under the given `secondary_namespace` in /// `primary_namespace`. /// @@ -334,7 +345,7 @@ pub trait KVStore { /// returned keys. Returns an empty list if `primary_namespace` or `secondary_namespace` is unknown. fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> AsyncResult<'static, Vec, io::Error>; + ) -> impl Future, io::Error>> + 'static + MaybeSend; } /// Provides additional interface methods that are required for [`KVStore`]-to-[`KVStore`] @@ -437,7 +448,7 @@ impl Persist( kv_store: K, entropy_source: ES, signer_provider: SP, -) -> Result::EcdsaSigner>)>, io::Error> +) -> Result::EcdsaSigner>)>, io::Error> where K::Target: KVStoreSync, ES::Target: EntropySource + Sized, @@ -449,7 +460,7 @@ where CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, )? { - match ::EcdsaSigner>)>>::read( + match ::EcdsaSigner>)>>::read( &mut io::Cursor::new(kv_store.read( CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, @@ -457,7 +468,7 @@ where )?), (&*entropy_source, &*signer_provider), ) { - Ok(Some((block_hash, channel_monitor))) => { + Ok(Some((best_block, channel_monitor))) => { let monitor_name = MonitorName::from_str(&stored_key)?; if channel_monitor.persistence_key() != monitor_name { return Err(io::Error::new( @@ -466,7 +477,7 @@ where )); } - res.push((block_hash, channel_monitor)); + res.push((best_block, channel_monitor)); }, Ok(None) => {}, Err(_) => { @@ -482,7 +493,11 @@ where struct PanicingSpawner; impl FutureSpawner for PanicingSpawner { - fn spawn + MaybeSend + 'static>(&self, _: T) { + type E = Infallible; + type SpawnedFutureResult = Box> + Unpin>; + fn spawn + MaybeSend + 'static>( + &self, _: T, + ) -> Self::SpawnedFutureResult { unreachable!(); } } @@ -490,8 +505,7 @@ impl FutureSpawner for PanicingSpawner { fn poll_sync_future(future: F) -> F::Output { let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); - // TODO A future MSRV bump to 1.68 should allow for the pin macro - match Pin::new(&mut Box::pin(future)).poll(&mut ctx) { + match pin!(future).poll(&mut ctx) { task::Poll::Ready(result) => result, task::Poll::Pending => { // In a sync context, we can't wait for the future to complete. @@ -563,15 +577,6 @@ fn poll_sync_future(future: F) -> F::Output { /// list channel monitors themselves and load channels individually using /// [`MonitorUpdatingPersister::read_channel_monitor_with_updates`]. /// -/// ## EXTREMELY IMPORTANT -/// -/// It is extremely important that your [`KVStoreSync::read`] implementation uses the -/// [`io::ErrorKind::NotFound`] variant correctly: that is, when a file is not found, and _only_ in -/// that circumstance (not when there is really a permissions error, for example). This is because -/// neither channel monitor reading function lists updates. Instead, either reads the monitor, and -/// using its stored `update_id`, synthesizes update storage keys, and tries them in sequence until -/// one is not found. All _other_ errors will be bubbled up in the function's [`Result`]. -/// /// # Pruning stale channel updates /// /// Stale updates are pruned when the consolidation threshold is reached according to `maximum_pending_updates`. @@ -645,14 +650,10 @@ where } /// Reads all stored channel monitors, along with any stored updates for them. - /// - /// It is extremely important that your [`KVStoreSync::read`] implementation uses the - /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the - /// documentation for [`MonitorUpdatingPersister`]. pub fn read_all_channel_monitors_with_updates( &self, ) -> Result< - Vec<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, + Vec<(BestBlock, ChannelMonitor<::EcdsaSigner>)>, io::Error, > { poll_sync_future(self.0.read_all_channel_monitors_with_updates()) @@ -660,10 +661,6 @@ where /// Read a single channel monitor, along with any stored updates for it. /// - /// It is extremely important that your [`KVStoreSync::read`] implementation uses the - /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the - /// documentation for [`MonitorUpdatingPersister`]. - /// /// For `monitor_key`, channel storage keys can be the channel's funding [`OutPoint`], with an /// underscore `_` between txid and index for v1 channels. For example, given: /// @@ -679,7 +676,7 @@ where /// function to accomplish this. Take care to limit the number of parallel readers. pub fn read_channel_monitor_with_updates( &self, monitor_key: &str, - ) -> Result<(BlockHash, ChannelMonitor<::EcdsaSigner>), io::Error> + ) -> Result<(BestBlock, ChannelMonitor<::EcdsaSigner>), io::Error> { poll_sync_future(self.0.read_channel_monitor_with_updates(monitor_key)) } @@ -857,34 +854,81 @@ where /// Reads all stored channel monitors, along with any stored updates for them. /// - /// It is extremely important that your [`KVStore::read`] implementation uses the - /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the - /// documentation for [`MonitorUpdatingPersister`]. + /// While the reads themselves are performend in parallel, deserializing the + /// [`ChannelMonitor`]s is not. For large [`ChannelMonitor`]s actively used for forwarding, + /// this may substantially limit the parallelism of this method. pub async fn read_all_channel_monitors_with_updates( &self, ) -> Result< - Vec<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, + Vec<(BestBlock, ChannelMonitor<::EcdsaSigner>)>, io::Error, > { let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; let monitor_list = self.0.kv_store.list(primary, secondary).await?; - let mut res = Vec::with_capacity(monitor_list.len()); + let mut futures = Vec::with_capacity(monitor_list.len()); for monitor_key in monitor_list { - let result = - self.0.maybe_read_channel_monitor_with_updates(monitor_key.as_str()).await?; - if let Some(read_res) = result { + futures.push(ResultFuture::Pending(Box::pin(async move { + self.0.maybe_read_channel_monitor_with_updates(monitor_key.as_str()).await + }))); + } + let future_results = MultiResultFuturePoller::new(futures).await; + let mut res = Vec::with_capacity(future_results.len()); + for result in future_results { + if let Some(read_res) = result? { res.push(read_res); } } Ok(res) } - /// Read a single channel monitor, along with any stored updates for it. + /// Reads all stored channel monitors, along with any stored updates for them, in parallel. /// - /// It is extremely important that your [`KVStoreSync::read`] implementation uses the - /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the - /// documentation for [`MonitorUpdatingPersister`]. + /// Because deserializing large [`ChannelMonitor`]s from forwarding nodes is often CPU-bound, + /// this version of [`Self::read_all_channel_monitors_with_updates`] uses the [`FutureSpawner`] + /// to parallelize deserialization as well as the IO operations. + /// + /// Because [`FutureSpawner`] requires that the spawned future be `'static` (matching `tokio` + /// and other multi-threaded runtime requirements), this method requires that `self` be an + /// `Arc` that can live for `'static` and be sent and accessed across threads. + pub async fn read_all_channel_monitors_with_updates_parallel( + self: &Arc, + ) -> Result< + Vec<(BestBlock, ChannelMonitor<::EcdsaSigner>)>, + io::Error, + > where + K: MaybeSend + MaybeSync + 'static, + L: MaybeSend + MaybeSync + 'static, + ES: MaybeSend + MaybeSync + 'static, + SP: MaybeSend + MaybeSync + 'static, + BI: MaybeSend + MaybeSync + 'static, + FE: MaybeSend + MaybeSync + 'static, + ::EcdsaSigner: MaybeSend, + { + let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; + let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; + let monitor_list = self.0.kv_store.list(primary, secondary).await?; + let mut futures = Vec::with_capacity(monitor_list.len()); + for monitor_key in monitor_list { + let us = Arc::clone(&self); + futures.push(ResultFuture::Pending(self.0.future_spawner.spawn(async move { + us.0.maybe_read_channel_monitor_with_updates(monitor_key.as_str()).await + }))); + } + let future_results = MultiResultFuturePoller::new(futures).await; + let mut res = Vec::with_capacity(future_results.len()); + for result in future_results { + match result { + Err(_) => return Err(io::Error::new(io::ErrorKind::Other, "Future was cancelled")), + Ok(Err(e)) => return Err(e), + Ok(Ok(Some(read_res))) => res.push(read_res), + Ok(Ok(None)) => {}, + } + } + Ok(res) + } + + /// Read a single channel monitor, along with any stored updates for it. /// /// For `monitor_key`, channel storage keys can be the channel's funding [`OutPoint`], with an /// underscore `_` between txid and index for v1 channels. For example, given: @@ -901,7 +945,7 @@ where /// function to accomplish this. Take care to limit the number of parallel readers. pub async fn read_channel_monitor_with_updates( &self, monitor_key: &str, - ) -> Result<(BlockHash, ChannelMonitor<::EcdsaSigner>), io::Error> + ) -> Result<(BestBlock, ChannelMonitor<::EcdsaSigner>), io::Error> { self.0.read_channel_monitor_with_updates(monitor_key).await } @@ -946,7 +990,7 @@ where let future = inner.persist_new_channel(monitor_name, monitor); let channel_id = monitor.channel_id(); let completion = (monitor.channel_id(), monitor.get_latest_update_id()); - self.0.future_spawner.spawn(async move { + let _runs_free = self.0.future_spawner.spawn(async move { match future.await { Ok(()) => { inner.async_completed_updates.lock().unwrap().push(completion); @@ -978,7 +1022,7 @@ where None }; let inner = Arc::clone(&self.0); - self.0.future_spawner.spawn(async move { + let _runs_free = self.0.future_spawner.spawn(async move { match future.await { Ok(()) => if let Some(completion) = completion { inner.async_completed_updates.lock().unwrap().push(completion); @@ -996,7 +1040,7 @@ where pub(crate) fn spawn_async_archive_persisted_channel(&self, monitor_name: MonitorName) { let inner = Arc::clone(&self.0); - self.0.future_spawner.spawn(async move { + let _runs_free = self.0.future_spawner.spawn(async move { inner.archive_persisted_channel(monitor_name).await; }); } @@ -1006,6 +1050,9 @@ where } } +trait MaybeSendableFuture: Future> + MaybeSend {} +impl> + MaybeSend> MaybeSendableFuture for F {} + impl MonitorUpdatingPersisterAsyncInner where @@ -1018,7 +1065,7 @@ where { pub async fn read_channel_monitor_with_updates( &self, monitor_key: &str, - ) -> Result<(BlockHash, ChannelMonitor<::EcdsaSigner>), io::Error> + ) -> Result<(BestBlock, ChannelMonitor<::EcdsaSigner>), io::Error> { match self.maybe_read_channel_monitor_with_updates(monitor_key).await? { Some(res) => Ok(res), @@ -1037,32 +1084,33 @@ where async fn maybe_read_channel_monitor_with_updates( &self, monitor_key: &str, ) -> Result< - Option<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, + Option<(BestBlock, ChannelMonitor<::EcdsaSigner>)>, io::Error, > { let monitor_name = MonitorName::from_str(monitor_key)?; - let read_res = self.maybe_read_monitor(&monitor_name, monitor_key).await?; - let (block_hash, monitor) = match read_res { + let read_future = pin!(self.maybe_read_monitor(&monitor_name, monitor_key)); + let list_future = pin!(self + .kv_store + .list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_key)); + let (read_res, list_res) = TwoFutureJoiner::new(read_future, list_future).await; + let (best_block, monitor) = match read_res? { Some(res) => res, None => return Ok(None), }; - let mut current_update_id = monitor.get_latest_update_id(); - // TODO: Parallelize this loop by speculatively reading a batch of updates - loop { - current_update_id = match current_update_id.checked_add(1) { - Some(next_update_id) => next_update_id, - None => break, - }; - let update_name = UpdateName::from(current_update_id); - let update = match self.read_monitor_update(monitor_key, &update_name).await { - Ok(update) => update, - Err(err) if err.kind() == io::ErrorKind::NotFound => { - // We can't find any more updates, so we are done. - break; - }, - Err(err) => return Err(err), - }; - + let current_update_id = monitor.get_latest_update_id(); + let updates: Result, _> = + list_res?.into_iter().map(|name| UpdateName::new(name)).collect(); + let mut updates = updates?; + updates.sort_unstable(); + let updates_to_load = updates.iter().filter(|update| update.0 > current_update_id); + let mut update_futures = Vec::with_capacity(updates_to_load.clone().count()); + for update_name in updates_to_load { + update_futures.push(ResultFuture::Pending(Box::pin(async move { + (update_name, self.read_monitor_update(monitor_key, update_name).await) + }))); + } + for (update_name, update_res) in MultiResultFuturePoller::new(update_futures).await { + let update = update_res?; monitor .update_monitor(&update, &self.broadcaster, &self.fee_estimator, &self.logger) .map_err(|e| { @@ -1076,14 +1124,14 @@ where io::Error::new(io::ErrorKind::Other, "Monitor update failed") })?; } - Ok(Some((block_hash, monitor))) + Ok(Some((best_block, monitor))) } /// Read a channel monitor. async fn maybe_read_monitor( &self, monitor_name: &MonitorName, monitor_key: &str, ) -> Result< - Option<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, + Option<(BestBlock, ChannelMonitor<::EcdsaSigner>)>, io::Error, > { let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; @@ -1094,12 +1142,12 @@ where if monitor_cursor.get_ref().starts_with(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL) { monitor_cursor.set_position(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() as u64); } - match ::EcdsaSigner>)>>::read( + match ::EcdsaSigner>)>>::read( &mut monitor_cursor, (&*self.entropy_source, &*self.signer_provider), ) { Ok(None) => Ok(None), - Ok(Some((blockhash, channel_monitor))) => { + Ok(Some((best_block, channel_monitor))) => { if channel_monitor.persistence_key() != *monitor_name { log_error!( self.logger, @@ -1111,7 +1159,7 @@ where "ChannelMonitor was stored under the wrong key", )) } else { - Ok(Some((blockhash, channel_monitor))) + Ok(Some((best_block, channel_monitor))) } }, Err(e) => { @@ -1179,9 +1227,9 @@ where Ok(()) } - fn persist_new_channel( - &self, monitor_name: MonitorName, monitor: &ChannelMonitor, - ) -> impl Future> { + fn persist_new_channel<'a, ChannelSigner: EcdsaChannelSigner>( + &'a self, monitor_name: MonitorName, monitor: &'a ChannelMonitor, + ) -> Pin> + 'static>> { // Determine the proper key for this monitor let monitor_key = monitor_name.to_string(); // Serialize and write the new monitor @@ -1200,7 +1248,10 @@ where // completion of the write. This ensures monitor persistence ordering is preserved. let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; - self.kv_store.write(primary, secondary, monitor_key.as_str(), monitor_bytes) + // There's no real reason why this needs to be boxed, but dropping it rams into the "hidden + // type for impl... captures lifetime that does not appear in bounds" issue. This can + // trivially be dropped once we upgrade to edition 2024/MSRV 1.85. + Box::pin(self.kv_store.write(primary, secondary, monitor_key.as_str(), monitor_bytes)) } fn update_persisted_channel<'a, ChannelSigner: EcdsaChannelSigner + 'a>( @@ -1226,12 +1277,10 @@ where // write method, allowing it to do its queueing immediately, and then return a // future for the completion of the write. This ensures monitor persistence // ordering is preserved. - res_a = Some(self.kv_store.write( - primary, - &monitor_key, - update_name.as_str(), - update.encode(), - )); + let encoded = update.encode(); + res_a = Some(async move { + self.kv_store.write(primary, &monitor_key, update_name.as_str(), encoded).await + }); } else { // We could write this update, but it meets criteria of our design that calls for a full monitor write. // Note that this is NOT an async function, but rather calls the *sync* KVStore @@ -1289,7 +1338,7 @@ where async fn archive_persisted_channel(&self, monitor_name: MonitorName) { let monitor_key = monitor_name.to_string(); let monitor = match self.read_channel_monitor_with_updates(&monitor_key).await { - Ok((_block_hash, monitor)) => monitor, + Ok((_best_block, monitor)) => monitor, Err(_) => return, }; let primary = ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; @@ -1448,7 +1497,7 @@ impl core::fmt::Display for MonitorName { /// let monitor_name = "some_monitor_name"; /// let storage_key = format!("channel_monitor_updates/{}/{}", monitor_name, update_name.as_str()); /// ``` -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] pub struct UpdateName(pub u64, String); impl UpdateName { diff --git a/lightning/src/util/ser.rs b/lightning/src/util/ser.rs index f821aa5afc0..2bd8a9eb879 100644 --- a/lightning/src/util/ser.rs +++ b/lightning/src/util/ser.rs @@ -1448,6 +1448,33 @@ impl Readable for BlockHash { } } +impl Writeable for [Option; 12] { + fn write(&self, w: &mut W) -> Result<(), io::Error> { + for hash_opt in self { + match hash_opt { + Some(hash) => hash.write(w)?, + None => ([0u8; 32]).write(w)?, + } + } + Ok(()) + } +} + +impl Readable for [Option; 12] { + fn read(r: &mut R) -> Result { + use bitcoin::hashes::Hash; + + let mut res = [None; 12]; + for hash_opt in res.iter_mut() { + let buf: [u8; 32] = Readable::read(r)?; + if buf != [0; 32] { + *hash_opt = Some(BlockHash::from_slice(&buf[..]).unwrap()); + } + } + Ok(res) + } +} + impl Writeable for ChainHash { fn write(&self, w: &mut W) -> Result<(), io::Error> { w.write_all(self.as_bytes()) diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index 5a1ffad3e04..bf048efdae1 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -35,10 +35,11 @@ use bitcoin::{BlockHash, ScriptBuf, Transaction, Txid}; use core::future::Future; use core::ops::Deref; +use core::pin::{pin, Pin}; use core::sync::atomic::{AtomicBool, Ordering}; use core::task; -use super::async_poll::{dummy_waker, AsyncResult}; +use super::async_poll::dummy_waker; /// The number of blocks we wait before we prune the tracked spendable outputs. pub const PRUNE_DELAY_BLOCKS: u32 = ARCHIVAL_DELAY_BLOCKS + ANTI_REORG_DELAY; @@ -609,15 +610,32 @@ where sweeper_state.dirty = true; } - fn persist_state<'a>(&self, sweeper_state: &SweeperState) -> AsyncResult<'a, (), io::Error> { + #[cfg(feature = "std")] + fn persist_state<'a>( + &'a self, sweeper_state: &SweeperState, + ) -> Pin> + Send + 'static>> { let encoded = sweeper_state.encode(); - self.kv_store.write( + Box::pin(self.kv_store.write( OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_KEY, encoded, - ) + )) + } + + #[cfg(not(feature = "std"))] + fn persist_state<'a>( + &'a self, sweeper_state: &SweeperState, + ) -> Pin> + 'static>> { + let encoded = sweeper_state.encode(); + + Box::pin(self.kv_store.write( + OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, + OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, + OUTPUT_SWEEPER_PERSISTENCE_KEY, + encoded, + )) } /// Updates the sweeper state by executing the given callback. Persists the state afterwards if it is marked dirty, @@ -970,7 +988,7 @@ where &self, output_descriptors: Vec, channel_id: Option, exclude_static_outputs: bool, delay_until_height: Option, ) -> Result<(), ()> { - let mut fut = Box::pin(self.sweeper.track_spendable_outputs( + let mut fut = pin!(self.sweeper.track_spendable_outputs( output_descriptors, channel_id, exclude_static_outputs, @@ -1005,7 +1023,7 @@ where /// /// Wraps [`OutputSweeper::regenerate_and_broadcast_spend_if_necessary`]. pub fn regenerate_and_broadcast_spend_if_necessary(&self) -> Result<(), ()> { - let mut fut = Box::pin(self.sweeper.regenerate_and_broadcast_spend_if_necessary()); + let mut fut = pin!(self.sweeper.regenerate_and_broadcast_spend_if_necessary()); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); match fut.as_mut().poll(&mut ctx) { diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index ad8ea224205..8e66b8b0dfb 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -20,6 +20,7 @@ use crate::chain::channelmonitor::{ ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, MonitorEvent, }; use crate::chain::transaction::OutPoint; +use crate::chain::BestBlock; use crate::chain::WatchedOutput; use crate::events::bump_transaction::sync::WalletSourceSync; use crate::events::bump_transaction::Utxo; @@ -50,7 +51,6 @@ use crate::sign::{self, ReceiveAuthKey}; use crate::sign::{ChannelSigner, PeerStorageKey}; use crate::sync::RwLock; use crate::types::features::{ChannelFeatures, InitFeatures, NodeFeatures}; -use crate::util::async_poll::AsyncResult; use crate::util::config::UserConfig; use crate::util::dyn_signer::{ DynKeysInterface, DynKeysInterfaceTrait, DynPhantomKeysInterface, DynSigner, @@ -58,6 +58,7 @@ use crate::util::dyn_signer::{ use crate::util::logger::{Logger, Record}; #[cfg(feature = "std")] use crate::util::mut_global::MutGlobal; +use crate::util::native_async::MaybeSend; use crate::util::persist::{KVStore, KVStoreSync, MonitorName}; use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer}; use crate::util::test_channel_signer::{EnforcementState, TestChannelSigner}; @@ -66,7 +67,7 @@ use bitcoin::amount::Amount; use bitcoin::block::Block; use bitcoin::constants::genesis_block; use bitcoin::constants::ChainHash; -use bitcoin::hash_types::{BlockHash, Txid}; +use bitcoin::hash_types::Txid; use bitcoin::hashes::Hash; use bitcoin::network::Network; use bitcoin::script::{Builder, Script, ScriptBuf}; @@ -533,7 +534,7 @@ impl<'a> TestChainMonitor<'a> { // underlying `ChainMonitor`. let mut w = TestVecWriter(Vec::new()); monitor.write(&mut w).unwrap(); - let new_monitor = <(BlockHash, ChannelMonitor)>::read( + let new_monitor = <(BestBlock, ChannelMonitor)>::read( &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager), ) @@ -565,7 +566,7 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { // monitor to a serialized copy and get he same one back. let mut w = TestVecWriter(Vec::new()); monitor.write(&mut w).unwrap(); - let new_monitor = <(BlockHash, ChannelMonitor)>::read( + let new_monitor = <(BestBlock, ChannelMonitor)>::read( &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager), ) @@ -621,7 +622,7 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { let monitor = self.chain_monitor.get_monitor(channel_id).unwrap(); w.0.clear(); monitor.write(&mut w).unwrap(); - let new_monitor = <(BlockHash, ChannelMonitor)>::read( + let new_monitor = <(BestBlock, ChannelMonitor)>::read( &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager), ) @@ -1012,13 +1013,13 @@ impl TestStore { impl KVStore for TestStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> AsyncResult<'static, Vec, io::Error> { + ) -> impl Future, io::Error>> + 'static + MaybeSend { let res = self.read_internal(&primary_namespace, &secondary_namespace, &key); - Box::pin(async move { res }) + async move { res } } fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> AsyncResult<'static, (), io::Error> { + ) -> impl Future> + 'static + MaybeSend { let path = format!("{primary_namespace}/{secondary_namespace}/{key}"); let future = Arc::new(Mutex::new((None, None))); @@ -1027,19 +1028,19 @@ impl KVStore for TestStore { let new_id = pending_writes.last().map(|(id, _, _)| id + 1).unwrap_or(0); pending_writes.push((new_id, Arc::clone(&future), buf)); - Box::pin(OneShotChannel(future)) + OneShotChannel(future) } fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> AsyncResult<'static, (), io::Error> { + ) -> impl Future> + 'static + MaybeSend { let res = self.remove_internal(&primary_namespace, &secondary_namespace, &key, lazy); - Box::pin(async move { res }) + async move { res } } fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> AsyncResult<'static, Vec, io::Error> { + ) -> impl Future, io::Error>> + 'static + MaybeSend { let res = self.list_internal(primary_namespace, secondary_namespace); - Box::pin(async move { res }) + async move { res } } }