Skip to content
This repository was archived by the owner on Nov 15, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 5 additions & 0 deletions core/client/db/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ log = "0.4"
kvdb = { git = "https://github.com/paritytech/parity-common", rev="b0317f649ab2c665b7987b8475878fc4d2e1f81d" }
# FIXME replace with release as soon as our rocksdb changes are released upstream https://github.com/paritytech/parity-common/issues/88
kvdb-rocksdb = { git = "https://github.com/paritytech/parity-common", rev="b0317f649ab2c665b7987b8475878fc4d2e1f81d" }
kvdb-memorydb = { git = "https://github.com/paritytech/parity-common", rev="b0317f649ab2c665b7987b8475878fc4d2e1f81d", optional = true }
lru-cache = "0.1.1"
hash-db = { version = "0.11" }
primitives = { package = "substrate-primitives", path = "../../primitives" }
Expand All @@ -27,3 +28,7 @@ kvdb-memorydb = { git = "https://github.com/paritytech/parity-common", rev="b031
substrate-keyring = { path = "../../keyring" }
test-client = { package = "substrate-test-client", path = "../../test-client" }
env_logger = { version = "0.6" }

[features]
default = []
test-helpers = ["kvdb-memorydb"]
65 changes: 59 additions & 6 deletions core/client/db/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,9 @@ use crate::storage_cache::{CachingState, SharedCache, new_shared_cache};
use log::{trace, debug, warn};
pub use state_db::PruningMode;

#[cfg(feature = "test-helpers")]
use client::in_mem::Backend as InMemoryBackend;

const CANONICALIZATION_DELAY: u64 = 4096;
const MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR: u64 = 32768;
const STATE_CACHE_SIZE_BYTES: usize = 16 * 1024 * 1024;
Expand Down Expand Up @@ -463,8 +466,10 @@ impl<Block: BlockT> client::backend::PrunableStateChangesTrieStorage<Blake2Hashe

impl<Block: BlockT> state_machine::ChangesTrieRootsStorage<Blake2Hasher> for DbChangesTrieStorage<Block> {
fn root(&self, anchor: &state_machine::ChangesTrieAnchorBlockId<H256>, block: u64) -> Result<Option<H256>, String> {
// check API requirement
assert!(block <= anchor.number, "API requirement");
// check API requirement: we can't get NEXT block(s) based on anchor
if block > anchor.number {
return Err(format!("Can't get changes trie root at {} using anchor at {}", block, anchor.number));
}

// we need to get hash of the block to resolve changes trie root
let block_id = if block <= self.meta.read().finalized_number.as_() {
Expand Down Expand Up @@ -531,8 +536,8 @@ impl<Block: BlockT<Hash=H256>> Backend<Block> {
Backend::from_kvdb(db as Arc<_>, config.pruning, canonicalization_delay)
}

#[cfg(test)]
fn new_test(keep_blocks: u32, canonicalization_delay: u64) -> Self {
#[cfg(any(test, feature = "test-helpers"))]
pub fn new_test(keep_blocks: u32, canonicalization_delay: u64) -> Self {
use utils::NUM_COLUMNS;

let db = Arc::new(::kvdb_memorydb::create(NUM_COLUMNS));
Expand Down Expand Up @@ -570,6 +575,54 @@ impl<Block: BlockT<Hash=H256>> Backend<Block> {
})
}

/// Returns in-memory blockchain that contains the same set of blocks that the self.
#[cfg(feature = "test-helpers")]
pub fn as_in_memory(&self) -> InMemoryBackend<Block, Blake2Hasher> {
use client::backend::{Backend as ClientBackend, BlockImportOperation};
use client::blockchain::Backend as BlockchainBackend;

let inmem = InMemoryBackend::<Block, Blake2Hasher>::new();

// get all headers hashes && sort them by number (could be duplicate)
let mut headers: Vec<(NumberFor<Block>, Block::Hash, Block::Header)> = Vec::new();
for (_, header) in self.blockchain.db.iter(columns::HEADER) {
let header = Block::Header::decode(&mut &header[..]).unwrap();
let hash = header.hash();
let number = *header.number();
let pos = headers.binary_search_by(|item| item.0.cmp(&number));
match pos {
Ok(pos) => headers.insert(pos, (number, hash, header)),
Err(pos) => headers.insert(pos, (number, hash, header)),
}
}

// insert all other headers + bodies + justifications
let info = self.blockchain.info().unwrap();
for (number, hash, header) in headers {
let id = BlockId::Hash(hash);
let justification = self.blockchain.justification(id).unwrap();
let body = self.blockchain.body(id).unwrap();
let state = self.state_at(id).unwrap().pairs();

let new_block_state = if number.is_zero() {
NewBlockState::Final
} else if hash == info.best_hash {
NewBlockState::Best
} else {
NewBlockState::Normal
};
let mut op = inmem.begin_operation().unwrap();
op.set_block_data(header, body, justification, new_block_state).unwrap();
op.update_db_storage(state.into_iter().map(|(k, v)| (None, k, Some(v))).collect()).unwrap();
inmem.commit_operation(op).unwrap();
}

// and now finalize the best block we have
inmem.finalize_block(BlockId::Hash(info.finalized_hash), None).unwrap();

inmem
}

/// Handle setting head within a transaction. `route_to` should be the last
/// block that existed in the database. `best_to` should be the best block
/// to be set.
Expand Down Expand Up @@ -712,8 +765,8 @@ impl<Block: BlockT<Hash=H256>> Backend<Block> {
operation.apply_aux(&mut transaction);

let mut meta_updates = Vec::new();
let mut last_finalized_hash = self.blockchain.meta.read().finalized_hash;
if !operation.finalized_blocks.is_empty() {
let mut last_finalized_hash = self.blockchain.meta.read().finalized_hash;
for (block, justification) in operation.finalized_blocks {
let block_hash = self.blockchain.expect_block_hash_from_id(&block)?;
let block_header = self.blockchain.expect_header(BlockId::Hash(block_hash))?;
Expand Down Expand Up @@ -787,7 +840,7 @@ impl<Block: BlockT<Hash=H256>> Backend<Block> {

if finalized {
// TODO: ensure best chain contains this block.
self.ensure_sequential_finalization(header, None)?;
self.ensure_sequential_finalization(header, Some(last_finalized_hash))?;
self.note_finalized(&mut transaction, header, hash)?;
} else {
// canonicalize blocks which are old enough, regardless of finality.
Expand Down
6 changes: 3 additions & 3 deletions core/client/db/src/light.rs
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,8 @@ impl<Block> LightStorage<Block>
Self::from_kvdb(db as Arc<_>)
}

#[cfg(test)]
pub(crate) fn new_test() -> Self {
#[cfg(any(test, feature = "test-helpers"))]
pub fn new_test() -> Self {
use utils::NUM_COLUMNS;

let db = Arc::new(::kvdb_memorydb::create(NUM_COLUMNS));
Expand Down Expand Up @@ -493,7 +493,7 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
}

fn cache(&self) -> Option<&BlockchainCache<Block>> {
None
Some(&self.cache)
}
}

Expand Down
17 changes: 7 additions & 10 deletions core/client/src/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1628,7 +1628,7 @@ pub(crate) mod tests {
client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();

assert_eq!(client.info().unwrap().chain.best_number, 1);
assert!(client.state_at(&BlockId::Number(1)).unwrap() != client.state_at(&BlockId::Number(0)).unwrap());
assert!(client.state_at(&BlockId::Number(1)).unwrap().pairs() != client.state_at(&BlockId::Number(0)).unwrap().pairs());
assert_eq!(
client.runtime_api().balance_of(
&BlockId::Number(client.info().unwrap().chain.best_number),
Expand All @@ -1647,14 +1647,11 @@ pub(crate) mod tests {

#[test]
fn client_uses_authorities_from_blockchain_cache() {
let client = test_client::new();
test_client::client::in_mem::cache_authorities_at(
client.backend().blockchain(),
Default::default(),
Some(vec![[1u8; 32].into()]));
assert_eq!(client.authorities_at(
&BlockId::Hash(Default::default())).unwrap(),
vec![[1u8; 32].into()]);
let client = test_client::new_light();
let genesis_hash = client.header(&BlockId::Number(0)).unwrap().unwrap().hash();
// authorities cache is first filled in genesis block
// => should be read from cache here (remote request will fail in this test)
assert!(!client.authorities_at(&BlockId::Hash(genesis_hash)).unwrap().is_empty());
}

#[test]
Expand All @@ -1680,7 +1677,7 @@ pub(crate) mod tests {
client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();

assert_eq!(client.info().unwrap().chain.best_number, 1);
assert!(client.state_at(&BlockId::Number(1)).unwrap() != client.state_at(&BlockId::Number(0)).unwrap());
assert!(client.state_at(&BlockId::Number(1)).unwrap().pairs() != client.state_at(&BlockId::Number(0)).unwrap().pairs());
assert_eq!(client.body(&BlockId::Number(1)).unwrap().unwrap().len(), 1)
}

Expand Down
30 changes: 17 additions & 13 deletions core/network/src/test/sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,8 @@ fn sync_from_two_peers_works() {
net.peer(1).push_blocks(100, false);
net.peer(2).push_blocks(100, false);
net.sync();
assert!(net.peer(0).client.backend().blockchain().equals_to(net.peer(1).client.backend().blockchain()));
assert!(net.peer(0).client.backend().as_in_memory().blockchain()
.equals_to(net.peer(1).client.backend().as_in_memory().blockchain()));
let status = net.peer(0).status();
assert_eq!(status.sync.state, SyncState::Idle);
}
Expand All @@ -45,7 +46,8 @@ fn sync_from_two_peers_with_ancestry_search_works() {
net.peer(2).push_blocks(100, false);
net.restart_peer(0);
net.sync();
assert!(net.peer(0).client.backend().blockchain().canon_equals_to(net.peer(1).client.backend().blockchain()));
assert!(net.peer(0).client.backend().as_in_memory().blockchain()
.canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain()));
}

#[test]
Expand All @@ -58,7 +60,8 @@ fn sync_long_chain_works() {
net.sync();
// Wait for peers to get up to speed.
thread::sleep(time::Duration::from_millis(1000));
assert!(net.peer(0).client.backend().blockchain().equals_to(net.peer(1).client.backend().blockchain()));
assert!(net.peer(0).client.backend().as_in_memory().blockchain()
.equals_to(net.peer(1).client.backend().as_in_memory().blockchain()));
}

#[test]
Expand All @@ -68,7 +71,8 @@ fn sync_no_common_longer_chain_fails() {
net.peer(0).push_blocks(20, true);
net.peer(1).push_blocks(20, false);
net.sync();
assert!(!net.peer(0).client.backend().blockchain().canon_equals_to(net.peer(1).client.backend().blockchain()));
assert!(!net.peer(0).client.backend().as_in_memory().blockchain()
.canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain()));
}

#[test]
Expand Down Expand Up @@ -111,11 +115,11 @@ fn sync_after_fork_works() {
net.peer(2).push_blocks(1, false);

// peer 1 has the best chain
let peer1_chain = net.peer(1).client.backend().blockchain().clone();
let peer1_chain = net.peer(1).client.backend().as_in_memory().blockchain().clone();
net.sync();
assert!(net.peer(0).client.backend().blockchain().canon_equals_to(&peer1_chain));
assert!(net.peer(1).client.backend().blockchain().canon_equals_to(&peer1_chain));
assert!(net.peer(2).client.backend().blockchain().canon_equals_to(&peer1_chain));
assert!(net.peer(0).client.backend().as_in_memory().blockchain().canon_equals_to(&peer1_chain));
assert!(net.peer(1).client.backend().as_in_memory().blockchain().canon_equals_to(&peer1_chain));
assert!(net.peer(2).client.backend().as_in_memory().blockchain().canon_equals_to(&peer1_chain));
}

#[test]
Expand All @@ -131,8 +135,8 @@ fn syncs_all_forks() {

net.sync();
// Check that all peers have all of the blocks.
assert_eq!(9, net.peer(0).client.backend().blockchain().blocks_count());
assert_eq!(9, net.peer(1).client.backend().blockchain().blocks_count());
assert_eq!(9, net.peer(0).client.backend().as_in_memory().blockchain().blocks_count());
assert_eq!(9, net.peer(1).client.backend().as_in_memory().blockchain().blocks_count());
}

#[test]
Expand All @@ -147,9 +151,9 @@ fn own_blocks_are_announced() {
net.sync();
assert_eq!(net.peer(0).client.backend().blockchain().info().unwrap().best_number, 1);
assert_eq!(net.peer(1).client.backend().blockchain().info().unwrap().best_number, 1);
let peer0_chain = net.peer(0).client.backend().blockchain().clone();
assert!(net.peer(1).client.backend().blockchain().canon_equals_to(&peer0_chain));
assert!(net.peer(2).client.backend().blockchain().canon_equals_to(&peer0_chain));
let peer0_chain = net.peer(0).client.backend().as_in_memory().blockchain().clone();
assert!(net.peer(1).client.backend().as_in_memory().blockchain().canon_equals_to(&peer0_chain));
assert!(net.peer(2).client.backend().as_in_memory().blockchain().canon_equals_to(&peer0_chain));
}

#[test]
Expand Down
9 changes: 9 additions & 0 deletions core/state-machine/src/changes_trie/changes_iterator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,9 @@ pub fn key_changes<'a, S: Storage<H>, H: Hasher>(
max: u64,
key: &'a [u8],
) -> Result<DrilldownIterator<'a, S, S, H>, String> where H::Out: HeapSizeOf {
// we can't query any roots before root
let max = ::std::cmp::min(max, end.number);

Ok(DrilldownIterator {
essence: DrilldownIteratorEssence {
key,
Expand Down Expand Up @@ -67,6 +70,9 @@ pub fn key_changes_proof<S: Storage<H>, H: Hasher>(
max: u64,
key: &[u8],
) -> Result<Vec<Vec<u8>>, String> where H::Out: HeapSizeOf {
// we can't query any roots before root
let max = ::std::cmp::min(max, end.number);

let mut iter = ProvingDrilldownIterator {
essence: DrilldownIteratorEssence {
key,
Expand Down Expand Up @@ -104,6 +110,9 @@ pub fn key_changes_proof_check<S: RootsStorage<H>, H: Hasher>(
max: u64,
key: &[u8]
) -> Result<Vec<(u64, u32)>, String> where H::Out: HeapSizeOf {
// we can't query any roots before root
let max = ::std::cmp::min(max, end.number);

let mut proof_db = MemoryDB::<H>::default();
for item in proof {
proof_db.insert(&item);
Expand Down
2 changes: 2 additions & 0 deletions core/test-client/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ edition = "2018"

[dependencies]
client = { package = "substrate-client", path = "../client" }
client-db = { package = "substrate-client-db", path = "../client/db", features = ["test-helpers"] }
futures = { version = "0.1.17" }
parity-codec = "3.0"
executor = { package = "substrate-executor", path = "../executor" }
consensus = { package = "substrate-consensus-common", path = "../consensus/common" }
Expand Down
Loading