-
Notifications
You must be signed in to change notification settings - Fork 1.5k
Chain Selection Subsystem Logic #3277
Changes from all commits
66bf4b2
dfb09e5
5a474eb
9c2340b
3c49a32
06e92a6
a2b0250
8790e52
d0f0f45
60196bf
a023348
537e59b
22705aa
c71d5b2
422afc4
70e5d8a
a792b1c
3487cf3
4a1f610
ae74536
b678828
645327a
2a40721
f76d410
2602e50
efc0963
5ec0064
750b8d7
bb1de40
8f3c533
ec48118
82c1817
6f2edf3
a1d9169
a969a2b
6ed7ac4
0577ed6
c10d52c
5407dae
53e4539
f975bf2
2518a17
32d73a1
ca45cc3
d08f418
8a97b71
05dc72a
f95d23e
5c2d2d9
fc13846
fabc28d
46d2416
14e8518
c4457ae
6308490
45a07c8
f439ed7
d163da5
c838388
b8c12ee
a05cb2b
1711f11
0586dbc
d73d621
1185b75
2dd5358
f42230c
99b4392
5afd66a
bf15a67
1aabcb3
8dff999
16c26e8
2b075b1
5a7abf9
a512985
cb2d136
4e01714
9a16ffc
31f47de
eeb4c89
085b612
4132129
16ceda8
0465903
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,23 @@ | ||
| [package] | ||
| name = "polkadot-node-core-chain-selection" | ||
| description = "Chain Selection Subsystem" | ||
| version = "0.1.0" | ||
| authors = ["Parity Technologies <admin@parity.io>"] | ||
| edition = "2018" | ||
|
|
||
| [dependencies] | ||
| futures = "0.3.15" | ||
| tracing = "0.1.26" | ||
| polkadot-primitives = { path = "../../../primitives" } | ||
| polkadot-node-primitives = { path = "../../primitives" } | ||
| polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } | ||
| polkadot-node-subsystem-util = { path = "../../subsystem-util" } | ||
| kvdb = "0.9.0" | ||
| thiserror = "1.0.23" | ||
| parity-scale-codec = "2" | ||
|
|
||
| [dev-dependencies] | ||
| polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } | ||
| sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } | ||
| parking_lot = "0.11" | ||
| assert_matches = "1" |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,235 @@ | ||
| // Copyright 2021 Parity Technologies (UK) Ltd. | ||
| // This file is part of Polkadot. | ||
|
|
||
| // Polkadot is free software: you can redistribute it and/or modify | ||
| // it under the terms of the GNU General Public License as published by | ||
| // the Free Software Foundation, either version 3 of the License, or | ||
| // (at your option) any later version. | ||
|
|
||
| // Polkadot is distributed in the hope that it will be useful, | ||
| // but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| // GNU General Public License for more details. | ||
|
|
||
| // You should have received a copy of the GNU General Public License | ||
| // along with Polkadot. If not, see <http://www.gnu.org/licenses/>. | ||
|
|
||
| //! An abstraction over storage used by the chain selection subsystem. | ||
| //! | ||
| //! This provides both a [`Backend`] trait and an [`OverlayedBackend`] | ||
| //! struct which allows in-memory changes to be applied on top of a | ||
| //! [`Backend`], maintaining consistency between queries and temporary writes, | ||
| //! before any commit to the underlying storage is made. | ||
|
|
||
| use polkadot_primitives::v1::{BlockNumber, Hash}; | ||
|
|
||
| use std::collections::HashMap; | ||
|
|
||
| use crate::{Error, LeafEntrySet, BlockEntry, Timestamp}; | ||
|
|
||
| pub(super) enum BackendWriteOp { | ||
| WriteBlockEntry(BlockEntry), | ||
| WriteBlocksByNumber(BlockNumber, Vec<Hash>), | ||
| WriteViableLeaves(LeafEntrySet), | ||
| WriteStagnantAt(Timestamp, Vec<Hash>), | ||
| DeleteBlocksByNumber(BlockNumber), | ||
| DeleteBlockEntry(Hash), | ||
| DeleteStagnantAt(Timestamp), | ||
| } | ||
|
|
||
| /// An abstraction over backend storage for the logic of this subsystem. | ||
| pub(super) trait Backend { | ||
| /// Load a block entry from the DB. | ||
| fn load_block_entry(&self, hash: &Hash) -> Result<Option<BlockEntry>, Error>; | ||
| /// Load the active-leaves set. | ||
| fn load_leaves(&self) -> Result<LeafEntrySet, Error>; | ||
| /// Load the stagnant list at the given timestamp. | ||
| fn load_stagnant_at(&self, timestamp: Timestamp) -> Result<Vec<Hash>, Error>; | ||
| /// Load all stagnant lists up to and including the given unix timestamp | ||
| /// in ascending order. | ||
| fn load_stagnant_at_up_to(&self, up_to: Timestamp) | ||
| -> Result<Vec<(Timestamp, Vec<Hash>)>, Error>; | ||
| /// Load the earliest kept block number. | ||
| fn load_first_block_number(&self) -> Result<Option<BlockNumber>, Error>; | ||
| /// Load blocks by number. | ||
| fn load_blocks_by_number(&self, number: BlockNumber) -> Result<Vec<Hash>, Error>; | ||
|
|
||
| /// Atomically write the list of operations, with later operations taking precedence over prior. | ||
| fn write<I>(&mut self, ops: I) -> Result<(), Error> | ||
| where I: IntoIterator<Item = BackendWriteOp>; | ||
| } | ||
|
|
||
| /// An in-memory overlay over the backend. | ||
| /// | ||
| /// This maintains read-only access to the underlying backend, but can be | ||
| /// converted into a set of write operations which will, when written to | ||
| /// the underlying backend, give the same view as the state of the overlay. | ||
| pub(super) struct OverlayedBackend<'a, B: 'a> { | ||
| inner: &'a B, | ||
|
|
||
| // `None` means 'deleted', missing means query inner. | ||
| block_entries: HashMap<Hash, Option<BlockEntry>>, | ||
| // `None` means 'deleted', missing means query inner. | ||
| blocks_by_number: HashMap<BlockNumber, Option<Vec<Hash>>>, | ||
| // 'None' means 'deleted', missing means query inner. | ||
| stagnant_at: HashMap<Timestamp, Option<Vec<Hash>>>, | ||
| // 'None' means query inner. | ||
| leaves: Option<LeafEntrySet>, | ||
| } | ||
|
|
||
| impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> { | ||
| pub(super) fn new(backend: &'a B) -> Self { | ||
| OverlayedBackend { | ||
| inner: backend, | ||
| block_entries: HashMap::new(), | ||
| blocks_by_number: HashMap::new(), | ||
| stagnant_at: HashMap::new(), | ||
| leaves: None, | ||
| } | ||
| } | ||
|
|
||
| pub(super) fn load_block_entry(&self, hash: &Hash) -> Result<Option<BlockEntry>, Error> { | ||
| if let Some(val) = self.block_entries.get(&hash) { | ||
| return Ok(val.clone()) | ||
| } | ||
|
|
||
| self.inner.load_block_entry(hash) | ||
| } | ||
|
|
||
| pub(super) fn load_blocks_by_number(&self, number: BlockNumber) -> Result<Vec<Hash>, Error> { | ||
| if let Some(val) = self.blocks_by_number.get(&number) { | ||
| return Ok(val.as_ref().map_or(Vec::new(), Clone::clone)); | ||
| } | ||
|
|
||
| self.inner.load_blocks_by_number(number) | ||
| } | ||
|
|
||
| pub(super) fn load_leaves(&self) -> Result<LeafEntrySet, Error> { | ||
| if let Some(ref set) = self.leaves { | ||
| return Ok(set.clone()) | ||
| } | ||
|
|
||
| self.inner.load_leaves() | ||
| } | ||
|
|
||
| pub(super) fn load_stagnant_at(&self, timestamp: Timestamp) -> Result<Vec<Hash>, Error> { | ||
| if let Some(val) = self.stagnant_at.get(×tamp) { | ||
| return Ok(val.as_ref().map_or(Vec::new(), Clone::clone)); | ||
| } | ||
|
|
||
| self.inner.load_stagnant_at(timestamp) | ||
| } | ||
|
|
||
| pub(super) fn write_block_entry(&mut self, entry: BlockEntry) { | ||
| self.block_entries.insert(entry.block_hash, Some(entry)); | ||
| } | ||
|
|
||
| pub(super) fn delete_block_entry(&mut self, hash: &Hash) { | ||
| self.block_entries.insert(*hash, None); | ||
| } | ||
|
|
||
| pub(super) fn write_blocks_by_number(&mut self, number: BlockNumber, blocks: Vec<Hash>) { | ||
| if blocks.is_empty() { | ||
| self.blocks_by_number.insert(number, None); | ||
| } else { | ||
| self.blocks_by_number.insert(number, Some(blocks)); | ||
| } | ||
| } | ||
|
|
||
| pub(super) fn delete_blocks_by_number(&mut self, number: BlockNumber) { | ||
| self.blocks_by_number.insert(number, None); | ||
| } | ||
|
|
||
| pub(super) fn write_leaves(&mut self, leaves: LeafEntrySet) { | ||
| self.leaves = Some(leaves); | ||
| } | ||
|
|
||
| pub(super) fn write_stagnant_at(&mut self, timestamp: Timestamp, hashes: Vec<Hash>) { | ||
| self.stagnant_at.insert(timestamp, Some(hashes)); | ||
| } | ||
|
|
||
| pub(super) fn delete_stagnant_at(&mut self, timestamp: Timestamp) { | ||
| self.stagnant_at.insert(timestamp, None); | ||
| } | ||
|
|
||
| /// Transform this backend into a set of write-ops to be written to the | ||
| /// inner backend. | ||
| pub(super) fn into_write_ops(self) -> impl Iterator<Item = BackendWriteOp> { | ||
| let block_entry_ops = self.block_entries.into_iter().map(|(h, v)| match v { | ||
| Some(v) => BackendWriteOp::WriteBlockEntry(v), | ||
| None => BackendWriteOp::DeleteBlockEntry(h), | ||
| }); | ||
|
|
||
| let blocks_by_number_ops = self.blocks_by_number.into_iter().map(|(n, v)| match v { | ||
| Some(v) => BackendWriteOp::WriteBlocksByNumber(n, v), | ||
| None => BackendWriteOp::DeleteBlocksByNumber(n), | ||
| }); | ||
|
|
||
| let leaf_ops = self.leaves.into_iter().map(BackendWriteOp::WriteViableLeaves); | ||
|
|
||
| let stagnant_at_ops = self.stagnant_at.into_iter().map(|(n, v)| match v { | ||
| Some(v) => BackendWriteOp::WriteStagnantAt(n, v), | ||
| None => BackendWriteOp::DeleteStagnantAt(n), | ||
| }); | ||
|
|
||
| block_entry_ops | ||
| .chain(blocks_by_number_ops) | ||
| .chain(leaf_ops) | ||
| .chain(stagnant_at_ops) | ||
| } | ||
| } | ||
|
|
||
| /// Attempt to find the given ancestor in the chain with given head. | ||
| /// | ||
| /// If the ancestor is the most recently finalized block, and the `head` is | ||
| /// a known unfinalized block, this will return `true`. | ||
| /// | ||
| /// If the ancestor is an unfinalized block and `head` is known, this will | ||
| /// return true if `ancestor` is in `head`'s chain. | ||
| /// | ||
| /// If the ancestor is an older finalized block, this will return `false`. | ||
| fn contains_ancestor( | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This could be part of the
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, I can see that, but I believe it's simpler right now to expose a minimal
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This function is \Omega(block_height) in the worst case, so if someone were to pass in the genesis hash we would basically read the entire chain, right? I wonder whether it makes sense to put a limit on the depth of this loop...
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @Lldenaurois It's actually O(block_height - finalized_height) because we only store unfinalized subtrees here. Pretty much all the algorithms here have the same complexity but as long as we don't have thousands of unfinalized blocks they'll work fine.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Right now it's more inefficient than it needs to be but we could easily keep a |
||
| backend: &impl Backend, | ||
| head: Hash, | ||
| ancestor: Hash, | ||
| ) -> Result<bool, Error> { | ||
| let mut current_hash = head; | ||
| loop { | ||
| if current_hash == ancestor { return Ok(true) } | ||
| match backend.load_block_entry(¤t_hash)? { | ||
| Some(e) => { current_hash = e.parent_hash } | ||
| None => break | ||
| } | ||
| } | ||
|
|
||
| Ok(false) | ||
| } | ||
ordian marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
| /// This returns the best unfinalized leaf containing the required block. | ||
| /// | ||
| /// If the required block is finalized but not the most recent finalized block, | ||
| /// this will return `None`. | ||
| /// | ||
| /// If the required block is unfinalized but not an ancestor of any viable leaf, | ||
| /// this will return `None`. | ||
| // | ||
| // Note: this is O(N^2) in the depth of `required` and the number of leaves. | ||
| // We expect the number of unfinalized blocks to be small, as in, to not exceed | ||
| // single digits in practice, and exceedingly unlikely to surpass 1000. | ||
| // | ||
| // However, if we need to, we could implement some type of skip-list for | ||
| // fast ancestry checks. | ||
| pub(super) fn find_best_leaf_containing( | ||
| backend: &impl Backend, | ||
| required: Hash, | ||
| ) -> Result<Option<Hash>, Error> { | ||
| let leaves = backend.load_leaves()?; | ||
| for leaf in leaves.into_hashes_descending() { | ||
| if contains_ancestor(backend, leaf, required)? { | ||
| return Ok(Some(leaf)) | ||
| } | ||
| } | ||
ordian marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
| // If there are no viable leaves containing the ancestor | ||
| Ok(None) | ||
| } | ||
Uh oh!
There was an error while loading. Please reload this page.